From 8f45b67b20f891c60c362b892d9c8b86fbb4c21e Mon Sep 17 00:00:00 2001 From: Luke K Date: Mon, 24 Feb 2020 14:04:11 +0000 Subject: [PATCH] Vendor Knative Serving Replace `git clone` calls in tests with vendored copy. Update all `github.com/knative` paths to `knative.dev` Explicitly add Knative Serving to dependencies (source OpenShift Knative) Update extant dependency to v0.12.1 including code update to v1 from v1beta1 --- hack/lib/vars.bash | 2 +- test/Gopkg.lock | 505 +- test/Gopkg.toml | 39 +- test/clients.go | 2 +- test/lib.bash | 11 +- test/service.go | 32 +- .../serving => cloud.google.com/go}/LICENSE | 0 .../go/compute/metadata/metadata.go | 524 + .../container/apiv1/cluster_manager_client.go | 676 + .../go/container/apiv1/doc.go | 49 + .../go/internal/version/version.go | 71 + .../monitoring/apiv3/alert_policy_client.go | 318 + .../go/monitoring/apiv3/doc.go | 108 + .../go/monitoring/apiv3/group_client.go | 432 + .../go/monitoring/apiv3/metric_client.go | 544 + .../apiv3/notification_channel_client.go | 545 + .../go/monitoring/apiv3/path_funcs.go | 107 + .../apiv3/service_monitoring_client.go | 505 + .../monitoring/apiv3/uptime_check_client.go | 420 + .../cloud.google.com/go/trace/apiv2/doc.go | 105 + .../go/trace/apiv2/path_funcs.go | 43 + .../go/trace/apiv2/trace_client.go | 160 + .../exporter/ocagent}/LICENSE | 0 .../exporter/ocagent/common.go | 38 + .../exporter/ocagent/connection.go | 113 + .../exporter/ocagent/nodeinfo.go | 46 + .../exporter/ocagent/ocagent.go | 572 + .../exporter/ocagent/options.go | 161 + .../exporter/ocagent/transform_spans.go | 248 + .../ocagent/transform_stats_to_metrics.go | 274 + .../exporter/ocagent/version.go | 17 + .../exporter/prometheus/LICENSE | 201 + .../exporter/prometheus/prometheus.go | 277 + .../exporter/prometheus/sanitize.go | 50 + .../exporter/stackdriver/AUTHORS | 1 + .../exporter/stackdriver}/LICENSE | 0 .../exporter/stackdriver/label.go | 33 + .../exporter/stackdriver/metrics.go | 519 + .../exporter/stackdriver/metrics_batcher.go | 201 + .../exporter/stackdriver/metrics_proto.go | 569 + .../aws_identity_doc_utils.go | 57 + .../monitoredresource/gcp_metadata_config.go | 117 + .../monitoredresource/monitored_resources.go | 232 + .../exporter/stackdriver/resource.go | 143 + .../exporter/stackdriver/sanitize.go | 50 + .../exporter/stackdriver/stackdriver.go | 480 + .../exporter/stackdriver/stats.go | 628 + .../exporter/stackdriver/trace.go | 178 + .../exporter/stackdriver/trace_proto.go | 291 + .../github.com/aws/aws-sdk-go/LICENSE.txt | 202 + .../github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 164 + .../aws/aws-sdk-go/aws/awserr/types.go | 221 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 221 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 + .../aws-sdk-go/aws/awsutil/string_value.go | 88 + .../aws/aws-sdk-go/aws/client/client.go | 97 + .../aws-sdk-go/aws/client/default_retryer.go | 177 + .../aws/aws-sdk-go/aws/client/logger.go | 194 + .../aws/client/metadata/client_info.go | 14 + .../aws-sdk-go/aws/client/no_op_retryer.go | 28 + .../github.com/aws/aws-sdk-go/aws/config.go | 586 + .../aws/aws-sdk-go/aws/context_1_5.go | 37 + .../aws/aws-sdk-go/aws/context_1_9.go | 11 + .../aws-sdk-go/aws/context_background_1_5.go | 22 + .../aws-sdk-go/aws/context_background_1_7.go | 20 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 918 ++ .../aws-sdk-go/aws/corehandlers/handlers.go | 230 + .../aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 100 + .../credentials/context_background_go1.5.go | 22 + .../credentials/context_background_go1.7.go | 20 + .../aws/credentials/context_go1.5.go | 39 + .../aws/credentials/context_go1.9.go | 13 + .../aws-sdk-go/aws/credentials/credentials.go | 310 + .../ec2rolecreds/ec2_role_provider.go | 180 + .../aws/credentials/endpointcreds/provider.go | 203 + .../aws/credentials/env_provider.go | 74 + .../aws/credentials/processcreds/provider.go | 426 + .../shared_credentials_provider.go | 150 + .../aws/credentials/static_provider.go | 55 + .../stscreds/assume_role_provider.go | 321 + .../stscreds/web_identity_provider.go | 100 + .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 + .../aws/aws-sdk-go/aws/csm/enable.go | 89 + .../aws/aws-sdk-go/aws/csm/metric.go | 109 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 55 + .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 264 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 + .../aws-sdk-go/aws/defaults/shared_config.go | 27 + .../github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 199 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 228 + .../aws/ec2metadata/token_provider.go | 92 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 216 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 6435 ++++++++ .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 564 + .../aws/endpoints/legacy_regions.go | 24 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 341 + .../aws/endpoints/v3model_codegen.go | 351 + .../github.com/aws/aws-sdk-go/aws/errors.go | 13 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 118 + .../aws/request/connection_reset_error.go | 18 + .../aws/aws-sdk-go/aws/request/handlers.go | 343 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 65 + .../aws/aws-sdk-go/aws/request/request.go | 698 + .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 36 + .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 266 + .../aws/aws-sdk-go/aws/request/retryer.go | 309 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 286 + .../aws/aws-sdk-go/aws/request/waiter.go | 295 + .../aws/session/cabundle_transport.go | 26 + .../aws/session/cabundle_transport_1_5.go | 22 + .../aws/session/cabundle_transport_1_6.go | 23 + .../aws/aws-sdk-go/aws/session/credentials.go | 259 + .../aws/aws-sdk-go/aws/session/doc.go | 245 + .../aws/aws-sdk-go/aws/session/env_config.go | 345 + .../aws/aws-sdk-go/aws/session/session.go | 734 + .../aws-sdk-go/aws/session/shared_config.go | 547 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 81 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/signer/v4/request_context_go1.5.go | 13 + .../aws/signer/v4/request_context_go1.7.go | 13 + .../aws/aws-sdk-go/aws/signer/v4/stream.go | 63 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 846 + .../github.com/aws/aws-sdk-go/aws/types.go | 241 + .../github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../internal/context/background_go1.5.go | 40 + .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 29 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 17 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 356 + .../aws-sdk-go/internal/ini/literal_tokens.go | 324 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 166 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../aws/aws-sdk-go/internal/sdkmath/floor.go | 15 + .../internal/sdkmath/floor_go1.9.go | 56 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkrand/read.go | 11 + .../aws-sdk-go/internal/sdkrand/read_1_5.go | 24 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../internal/shareddefaults/shared_config.go | 40 + .../aws-sdk-go/internal/strings/strings.go | 11 + .../internal/sync/singleflight/LICENSE | 27 + .../sync/singleflight/singleflight.go | 120 + .../aws/aws-sdk-go/private/protocol/host.go | 68 + .../private/protocol/host_prefix.go | 54 + .../private/protocol/idempotency.go | 75 + .../private/protocol/json/jsonutil/build.go | 296 + .../protocol/json/jsonutil/unmarshal.go | 282 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../aws-sdk-go/private/protocol/protocol.go | 49 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 246 + .../private/protocol/query/unmarshal.go | 39 + .../private/protocol/query/unmarshal_error.go | 69 + .../aws-sdk-go/private/protocol/rest/build.go | 310 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 257 + .../aws-sdk-go/private/protocol/timestamp.go | 84 + .../aws-sdk-go/private/protocol/unmarshal.go | 27 + .../private/protocol/unmarshal_error.go | 65 + .../private/protocol/xml/xmlutil/build.go | 306 + .../private/protocol/xml/xmlutil/sort.go | 32 + .../private/protocol/xml/xmlutil/unmarshal.go | 291 + .../protocol/xml/xmlutil/xml_to_struct.go | 159 + .../aws/aws-sdk-go/service/sts/api.go | 3115 ++++ .../aws-sdk-go/service/sts/customizations.go | 11 + .../aws/aws-sdk-go/service/sts/doc.go | 108 + .../aws/aws-sdk-go/service/sts/errors.go | 82 + .../aws/aws-sdk-go/service/sts/service.go | 98 + .../service/sts/stsiface/interface.go | 96 + test/vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/stream.go | 316 + .../opencensus-proto/AUTHORS | 1 + .../opencensus-proto/LICENSE | 202 + .../gen-go/agent/common/v1/common.pb.go | 361 + .../agent/metrics/v1/metrics_service.pb.go | 275 + .../agent/metrics/v1/metrics_service.pb.gw.go | 150 + .../gen-go/agent/trace/v1/trace_service.pb.go | 457 + .../agent/trace/v1/trace_service.pb.gw.go | 150 + .../gen-go/metrics/v1/metrics.pb.go | 1127 ++ .../gen-go/resource/v1/resource.pb.go | 100 + .../gen-go/trace/v1/trace.pb.go | 1553 ++ .../gen-go/trace/v1/trace_config.pb.go | 359 + test/vendor/github.com/ghodss/yaml/LICENSE | 50 + test/vendor/github.com/ghodss/yaml/fields.go | 501 + test/vendor/github.com/ghodss/yaml/yaml.go | 277 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 1421 ++ .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 ++++ .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + .../github.com/gogo/protobuf/types/any.go | 140 + .../github.com/gogo/protobuf/types/any.pb.go | 723 + .../github.com/gogo/protobuf/types/api.pb.go | 2169 +++ .../github.com/gogo/protobuf/types/doc.go | 35 + .../gogo/protobuf/types/duration.go | 100 + .../gogo/protobuf/types/duration.pb.go | 546 + .../gogo/protobuf/types/duration_gogo.go | 100 + .../gogo/protobuf/types/empty.pb.go | 491 + .../gogo/protobuf/types/field_mask.pb.go | 767 + .../gogo/protobuf/types/protosize.go | 34 + .../gogo/protobuf/types/source_context.pb.go | 553 + .../gogo/protobuf/types/struct.pb.go | 2300 +++ .../gogo/protobuf/types/timestamp.go | 130 + .../gogo/protobuf/types/timestamp.pb.go | 566 + .../gogo/protobuf/types/timestamp_gogo.go | 94 + .../github.com/gogo/protobuf/types/type.pb.go | 3396 ++++ .../gogo/protobuf/types/wrappers.pb.go | 2756 ++++ .../gogo/protobuf/types/wrappers_gogo.go | 300 + .../golang/protobuf/descriptor/descriptor.go | 93 + .../golang/protobuf/jsonpb/jsonpb.go | 1284 ++ .../protoc-gen-go/descriptor/descriptor.pb.go | 2887 ++++ .../golang/protobuf/ptypes/empty/empty.pb.go | 83 + .../protobuf/ptypes/struct/struct.pb.go | 336 + .../protobuf/ptypes/wrappers/wrappers.pb.go | 461 + .../go-containerregistry/pkg/name/ref.go | 3 +- .../pkg/name/repository.go | 1 + .../github.com/google/uuid/CONTRIBUTORS | 9 + test/vendor/github.com/google/uuid/LICENSE | 27 + test/vendor/github.com/google/uuid/dce.go | 80 + test/vendor/github.com/google/uuid/doc.go | 12 + test/vendor/github.com/google/uuid/hash.go | 53 + test/vendor/github.com/google/uuid/marshal.go | 37 + test/vendor/github.com/google/uuid/node.go | 90 + test/vendor/github.com/google/uuid/node_js.go | 12 + .../vendor/github.com/google/uuid/node_net.go | 33 + test/vendor/github.com/google/uuid/sql.go | 59 + test/vendor/github.com/google/uuid/time.go | 123 + test/vendor/github.com/google/uuid/util.go | 43 + test/vendor/github.com/google/uuid/uuid.go | 245 + .../vendor/github.com/google/uuid/version1.go | 44 + .../vendor/github.com/google/uuid/version4.go | 38 + .../github.com/googleapis/gax-go/LICENSE | 27 + .../googleapis/gax-go/v2/call_option.go | 161 + .../github.com/googleapis/gax-go/v2/gax.go | 39 + .../github.com/googleapis/gax-go/v2/header.go | 53 + .../github.com/googleapis/gax-go/v2/invoke.go | 99 + .../grpc-ecosystem/grpc-gateway/LICENSE.txt | 27 + .../grpc-gateway/internal/errors.pb.go | 189 + .../grpc-gateway/runtime/context.go | 236 + .../grpc-gateway/runtime/convert.go | 318 + .../grpc-gateway/runtime/doc.go | 5 + .../grpc-gateway/runtime/errors.go | 130 + .../grpc-gateway/runtime/fieldmask.go | 82 + .../grpc-gateway/runtime/handler.go | 209 + .../runtime/marshal_httpbodyproto.go | 43 + .../grpc-gateway/runtime/marshal_json.go | 45 + .../grpc-gateway/runtime/marshal_jsonpb.go | 262 + .../grpc-gateway/runtime/marshal_proto.go | 62 + .../grpc-gateway/runtime/marshaler.go | 48 + .../runtime/marshaler_registry.go | 91 + .../grpc-gateway/runtime/mux.go | 303 + .../grpc-gateway/runtime/pattern.go | 262 + .../grpc-gateway/runtime/proto2_convert.go | 80 + .../grpc-gateway/runtime/proto_errors.go | 106 + .../grpc-gateway/runtime/query.go | 391 + .../third_party/googleapis/LICENSE | 201 + .../grpc-gateway/utilities/doc.go | 2 + .../grpc-gateway/utilities/pattern.go | 22 + .../grpc-gateway/utilities/readerfactory.go | 20 + .../grpc-gateway/utilities/trie.go | 177 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../knative/pkg/apis/condition_set.go | 335 - .../knative/pkg/apis/condition_types.go | 109 - .../github.com/knative/pkg/apis/contexts.go | 182 - .../github.com/knative/pkg/apis/deprecated.go | 180 - .../knative/pkg/apis/field_error.go | 379 - .../github.com/knative/pkg/apis/interfaces.go | 68 - .../knative/pkg/apis/kind2resource.go | 47 - .../knative/pkg/apis/metadata_validation.go | 62 - .../vendor/github.com/knative/pkg/apis/url.go | 73 - .../knative/pkg/apis/volatile_time.go | 46 - .../knative/pkg/apis/zz_generated.deepcopy.go | 130 - .../github.com/knative/pkg/kmeta/names.go | 41 - .../vendor/github.com/knative/pkg/kmp/diff.go | 92 - .../github.com/knative/pkg/kmp/reporters.go | 136 - .../serving/config/300-imagecache.yaml | 1 - .../apis/autoscaling/annotation_validation.go | 63 - .../autoscaling/v1alpha1/pa_validation.go | 80 - .../apis/config/testdata/config-defaults.yaml | 1 - .../v1alpha1/clusteringress_lifecycle.go | 30 - .../v1alpha1/clusteringress_types.go | 75 - .../apis/serving/v1beta1/revision_defaults.go | 91 - .../testdata/config-autoscaler.yaml | 1 - .../networking/v1alpha1/clusteringress.go | 163 - .../testdata/config-deployment.yaml | 1 - .../serving/pkg/gc/testdata/config-gc.yaml | 1 - .../pkg/logging/testdata/config-logging.yaml | 1 - .../testdata/config-observability.yaml | 1 - .../pkg/network/testdata/config-network.yaml | 1 - .../config/testdata/config-certmanager.yaml | 1 - .../config/testdata/config-gc.yaml | 1 - .../ingress/config/testdata/config-istio.yaml | 1 - .../config/testdata/config-network.yaml | 1 - .../config/testdata/config-autoscaler.yaml | 1 - .../config/testdata/config-deployment.yaml | 1 - .../config/testdata/config-logging.yaml | 1 - .../config/testdata/config-network.yaml | 1 - .../config/testdata/config-observability.yaml | 1 - .../route/config/testdata/config-domain.yaml | 1 - .../route/config/testdata/config-gc.yaml | 1 - .../route/config/testdata/config-network.yaml | 1 - .../test/config/100-istio-default-domain.yaml | 1 - .../serving/third_party/istio-1.0-latest | 1 - .../serving/third_party/istio-1.1-latest | 1 - .../golang_protobuf_extensions}/LICENSE | 0 .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/build_info.go | 29 + .../prometheus/build_info_pre_1.12.go | 22 + .../client_golang/prometheus/collector.go | 120 + .../client_golang/prometheus/counter.go | 277 + .../client_golang/prometheus/desc.go | 184 + .../client_golang/prometheus/doc.go | 200 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 42 + .../client_golang/prometheus/gauge.go | 286 + .../client_golang/prometheus/go_collector.go | 396 + .../client_golang/prometheus/histogram.go | 586 + .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang/prometheus/metric.go | 174 + .../client_golang/prometheus/observer.go | 52 + .../prometheus/process_collector.go | 151 + .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 112 + .../prometheus/promhttp/delegator.go | 357 + .../client_golang/prometheus/promhttp/http.go | 349 + .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 447 + .../client_golang/prometheus/registry.go | 945 ++ .../client_golang/prometheus/summary.go | 736 + .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 162 + .../client_golang/prometheus/vec.go | 472 + .../client_golang/prometheus/wrap.go | 200 + .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 723 + .../github.com/prometheus/common/LICENSE | 201 + .../github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 + .../prometheus/common/expfmt/encode.go | 162 + .../prometheus/common/expfmt/expfmt.go | 41 + .../prometheus/common/expfmt/fuzz.go | 36 + .../common/expfmt/openmetrics_create.go | 527 + .../prometheus/common/expfmt/text_create.go | 465 + .../prometheus/common/expfmt/text_parse.go | 764 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 102 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 270 + .../prometheus/common/model/value.go | 416 + .../github.com/prometheus/procfs/LICENSE | 201 + .../github.com/prometheus/procfs/NOTICE | 7 + .../github.com/prometheus/procfs/arp.go | 85 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + .../github.com/prometheus/procfs/cpuinfo.go | 167 + .../github.com/prometheus/procfs/crypto.go | 153 + .../github.com/prometheus/procfs/doc.go | 45 + .../vendor/github.com/prometheus/procfs/fs.go | 43 + .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 88 + .../procfs/internal/util/readfile.go | 38 + .../procfs/internal/util/sysreadfile.go | 48 + .../internal/util/sysreadfile_compat.go | 26 + .../procfs/internal/util/valueparser.go | 91 + .../github.com/prometheus/procfs/ipvs.go | 241 + .../github.com/prometheus/procfs/loadavg.go | 62 + .../github.com/prometheus/procfs/mdstat.go | 194 + .../github.com/prometheus/procfs/meminfo.go | 277 + .../github.com/prometheus/procfs/mountinfo.go | 180 + .../prometheus/procfs/mountstats.go | 621 + .../prometheus/procfs/net_conntrackstat.go | 153 + .../github.com/prometheus/procfs/net_dev.go | 205 + .../prometheus/procfs/net_sockstat.go | 163 + .../prometheus/procfs/net_softnet.go | 98 + .../github.com/prometheus/procfs/net_udp.go | 229 + .../github.com/prometheus/procfs/net_unix.go | 257 + .../github.com/prometheus/procfs/proc.go | 298 + .../prometheus/procfs/proc_environ.go | 37 + .../prometheus/procfs/proc_fdinfo.go | 125 + .../github.com/prometheus/procfs/proc_io.go | 59 + .../prometheus/procfs/proc_limits.go | 157 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 100 + .../github.com/prometheus/procfs/proc_stat.go | 192 + .../prometheus/procfs/proc_status.go | 166 + .../github.com/prometheus/procfs/schedstat.go | 118 + .../github.com/prometheus/procfs/stat.go | 244 + .../github.com/prometheus/procfs/swaps.go | 89 + .../vendor/github.com/prometheus/procfs/vm.go | 210 + .../github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/zoneinfo.go | 196 + .../metric/metricexport/doc.go | 19 + .../metric/metricexport/export.go | 26 + .../metric/metricexport/reader.go | 187 + .../go.opencensus.io/plugin/ocgrpc/client.go | 56 + .../plugin/ocgrpc/client_metrics.go | 107 + .../plugin/ocgrpc/client_stats_handler.go | 49 + .../go.opencensus.io/plugin/ocgrpc/doc.go | 19 + .../go.opencensus.io/plugin/ocgrpc/server.go | 80 + .../plugin/ocgrpc/server_metrics.go | 97 + .../plugin/ocgrpc/server_stats_handler.go | 63 + .../plugin/ocgrpc/stats_common.go | 227 + .../plugin/ocgrpc/trace_common.go | 107 + .../resource/resourcekeys/const.go | 68 + test/vendor/golang.org/x/net/http2/h2c/h2c.go | 495 + .../x/net/internal/timeseries/timeseries.go | 525 + test/vendor/golang.org/x/net/trace/events.go | 532 + .../golang.org/x/net/trace/histogram.go | 365 + test/vendor/golang.org/x/net/trace/trace.go | 1130 ++ .../golang.org/x/oauth2/google/appengine.go | 38 + .../x/oauth2/google/appengine_gen1.go | 77 + .../x/oauth2/google/appengine_gen2_flex.go | 27 + .../golang.org/x/oauth2/google/default.go | 154 + test/vendor/golang.org/x/oauth2/google/doc.go | 40 + .../golang.org/x/oauth2/google/google.go | 209 + test/vendor/golang.org/x/oauth2/google/jwt.go | 74 + test/vendor/golang.org/x/oauth2/google/sdk.go | 201 + test/vendor/golang.org/x/oauth2/jws/jws.go | 182 + test/vendor/golang.org/x/oauth2/jwt/jwt.go | 185 + test/vendor/golang.org/x/sync/AUTHORS | 3 + test/vendor/golang.org/x/sync/CONTRIBUTORS | 3 + test/vendor/golang.org/x/sync/LICENSE | 27 + test/vendor/golang.org/x/sync/PATENTS | 22 + .../golang.org/x/sync/errgroup/errgroup.go | 66 + .../golang.org/x/sync/semaphore/semaphore.go | 127 + test/vendor/google.golang.org/api/AUTHORS | 11 + .../vendor/google.golang.org/api/CONTRIBUTORS | 56 + test/vendor/google.golang.org/api/LICENSE | 27 + .../api/googleapi/transport/apikey.go | 44 + .../api/internal/conn_pool.go | 30 + .../google.golang.org/api/internal/creds.go | 105 + .../google.golang.org/api/internal/pool.go | 53 + .../api/internal/settings.go | 95 + .../internal/third_party/uritemplates/LICENSE | 27 + .../api/iterator/iterator.go | 227 + .../api/option/credentials_go19.go | 23 + .../api/option/credentials_notgo19.go | 22 + .../google.golang.org/api/option/option.go | 237 + .../api/support/bundler/bundler.go | 402 + .../google.golang.org/api/transport/dial.go | 36 + .../google.golang.org/api/transport/doc.go | 11 + .../google.golang.org/api/transport/go19.go | 25 + .../api/transport/grpc/dial.go | 287 + .../api/transport/grpc/dial_appengine.go | 31 + .../api/transport/grpc/dial_socketopt.go | 49 + .../api/transport/grpc/pool.go | 92 + .../api/transport/http/dial.go | 156 + .../api/transport/http/dial_appengine.go | 20 + .../http/internal/propagation/http.go | 86 + .../api/transport/not_go19.go | 25 + .../google.golang.org/appengine/appengine.go | 135 + .../appengine/appengine_vm.go | 20 + .../google.golang.org/appengine/errors.go | 46 + .../google.golang.org/appengine/identity.go | 142 + .../app_identity/app_identity_service.pb.go | 611 + .../internal/modules/modules_service.pb.go | 786 + .../internal/socket/socket_service.pb.go | 2822 ++++ .../google.golang.org/appengine/namespace.go | 25 + .../google.golang.org/appengine/socket/doc.go | 10 + .../appengine/socket/socket_classic.go | 290 + .../appengine/socket/socket_vm.go | 64 + .../google.golang.org/appengine/timeout.go | 20 + .../vendor/google.golang.org/genproto/LICENSE | 202 + .../api/annotations/annotations.pb.go | 55 + .../googleapis/api/annotations/client.pb.go | 79 + .../api/annotations/field_behavior.pb.go | 122 + .../googleapis/api/annotations/http.pb.go | 633 + .../googleapis/api/annotations/resource.pb.go | 441 + .../api/distribution/distribution.pb.go | 638 + .../googleapis/api/httpbody/httpbody.pb.go | 146 + .../genproto/googleapis/api/label/label.pb.go | 140 + .../googleapis/api/launch_stage.pb.go | 113 + .../googleapis/api/metric/metric.pb.go | 535 + .../api/monitoredres/monitored_resource.pb.go | 306 + .../container/v1/cluster_service.pb.go | 9166 +++++++++++ .../devtools/cloudtrace/v2/trace.pb.go | 1274 ++ .../devtools/cloudtrace/v2/tracing.pb.go | 249 + .../googleapis/monitoring/v3/alert.pb.go | 862 + .../monitoring/v3/alert_service.pb.go | 707 + .../googleapis/monitoring/v3/common.pb.go | 854 + .../monitoring/v3/dropped_labels.pb.go | 106 + .../googleapis/monitoring/v3/group.pb.go | 165 + .../monitoring/v3/group_service.pb.go | 931 ++ .../googleapis/monitoring/v3/metric.pb.go | 236 + .../monitoring/v3/metric_service.pb.go | 1406 ++ .../monitoring/v3/mutation_record.pb.go | 101 + .../monitoring/v3/notification.pb.go | 395 + .../monitoring/v3/notification_service.pb.go | 1377 ++ .../googleapis/monitoring/v3/service.pb.go | 1553 ++ .../monitoring/v3/service_service.pb.go | 1232 ++ .../monitoring/v3/span_context.pb.go | 100 + .../googleapis/monitoring/v3/uptime.pb.go | 1019 ++ .../monitoring/v3/uptime_service.pb.go | 830 + .../googleapis/rpc/status/status.pb.go | 115 + .../type/calendarperiod/calendar_period.pb.go | 106 + .../protobuf/field_mask/field_mask.pb.go | 282 + test/vendor/google.golang.org/grpc/AUTHORS | 1 + test/vendor/google.golang.org/grpc/LICENSE | 202 + .../grpc/attributes/attributes.go | 70 + test/vendor/google.golang.org/grpc/backoff.go | 58 + .../google.golang.org/grpc/backoff/backoff.go | 52 + .../vendor/google.golang.org/grpc/balancer.go | 391 + .../grpc/balancer/balancer.go | 454 + .../grpc/balancer/base/balancer.go | 278 + .../grpc/balancer/base/base.go | 93 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 772 + .../grpc/balancer/grpclb/grpclb.go | 488 + .../grpc/balancer/grpclb/grpclb_config.go | 66 + .../grpc/balancer/grpclb/grpclb_picker.go | 202 + .../balancer/grpclb/grpclb_remote_balancer.go | 407 + .../grpc/balancer/grpclb/grpclb_util.go | 208 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_conn_wrappers.go | 271 + .../grpc/balancer_v1_wrapper.go | 334 + .../grpc_binarylog_v1/binarylog.pb.go | 900 ++ test/vendor/google.golang.org/grpc/call.go | 74 + .../google.golang.org/grpc/clientconn.go | 1568 ++ test/vendor/google.golang.org/grpc/codec.go | 50 + .../grpc/codes/code_string.go | 62 + .../google.golang.org/grpc/codes/codes.go | 198 + .../grpc/connectivity/connectivity.go | 73 + .../grpc/credentials/alts/alts.go | 330 + .../alts/internal/authinfo/authinfo.go | 89 + .../grpc/credentials/alts/internal/common.go | 69 + .../alts/internal/conn/aeadrekey.go | 131 + .../alts/internal/conn/aes128gcm.go | 105 + .../alts/internal/conn/aes128gcmrekey.go | 116 + .../credentials/alts/internal/conn/common.go | 70 + .../credentials/alts/internal/conn/counter.go | 62 + .../credentials/alts/internal/conn/record.go | 271 + .../credentials/alts/internal/conn/utils.go | 63 + .../alts/internal/handshaker/handshaker.go | 375 + .../internal/handshaker/service/service.go | 54 + .../internal/proto/grpc_gcp/altscontext.pb.go | 152 + .../internal/proto/grpc_gcp/handshaker.pb.go | 1105 ++ .../grpc_gcp/transport_security_common.pb.go | 184 + .../grpc/credentials/alts/utils.go | 163 + .../grpc/credentials/credentials.go | 251 + .../grpc/credentials/go12.go | 30 + .../grpc/credentials/google/google.go | 125 + .../grpc/credentials/internal/syscallconn.go | 61 + .../internal/syscallconn_appengine.go | 30 + .../grpc/credentials/oauth/oauth.go | 185 + .../google.golang.org/grpc/credentials/tls.go | 225 + .../google.golang.org/grpc/dialoptions.go | 594 + test/vendor/google.golang.org/grpc/doc.go | 24 + .../grpc/encoding/encoding.go | 122 + .../grpc/encoding/proto/proto.go | 110 + .../google.golang.org/grpc/grpclog/grpclog.go | 126 + .../google.golang.org/grpc/grpclog/logger.go | 85 + .../grpc/grpclog/loggerv2.go | 195 + .../google.golang.org/grpc/interceptor.go | 77 + .../grpc/internal/backoff/backoff.go | 73 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 167 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 210 + .../grpc/internal/binarylog/method_logger.go | 423 + .../grpc/internal/binarylog/sink.go | 162 + .../grpc/internal/binarylog/util.go | 41 + .../grpc/internal/buffer/unbounded.go | 85 + .../grpc/internal/channelz/funcs.go | 727 + .../grpc/internal/channelz/types.go | 702 + .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 44 + .../grpc/internal/channelz/util_linux.go | 39 + .../grpc/internal/channelz/util_nonlinux.go | 26 + .../grpc/internal/envconfig/envconfig.go | 38 + .../grpc/internal/grpcrand/grpcrand.go | 56 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/internal.go | 72 + .../internal/resolver/dns/dns_resolver.go | 441 + .../grpc/internal/resolver/dns/go113.go | 33 + .../resolver/passthrough/passthrough.go | 57 + .../grpc/internal/syscall/syscall_linux.go | 114 + .../grpc/internal/syscall/syscall_nonlinux.go | 73 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 926 ++ .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 217 + .../grpc/internal/transport/handler_server.go | 435 + .../grpc/internal/transport/http2_client.go | 1454 ++ .../grpc/internal/transport/http2_server.go | 1253 ++ .../grpc/internal/transport/http_util.go | 677 + .../grpc/internal/transport/log.go | 44 + .../grpc/internal/transport/transport.go | 808 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 209 + .../grpc/naming/dns_resolver.go | 293 + .../google.golang.org/grpc/naming/naming.go | 68 + .../google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 229 + .../google.golang.org/grpc/pickfirst.go | 159 + .../google.golang.org/grpc/preloader.go | 64 + test/vendor/google.golang.org/grpc/proxy.go | 152 + .../grpc/resolver/resolver.go | 253 + .../grpc/resolver_conn_wrapper.go | 263 + .../vendor/google.golang.org/grpc/rpc_util.go | 887 ++ test/vendor/google.golang.org/grpc/server.go | 1548 ++ .../google.golang.org/grpc/service_config.go | 434 + .../grpc/serviceconfig/serviceconfig.go | 41 + .../google.golang.org/grpc/stats/handlers.go | 63 + .../google.golang.org/grpc/stats/stats.go | 311 + .../google.golang.org/grpc/status/status.go | 228 + test/vendor/google.golang.org/grpc/stream.go | 1529 ++ test/vendor/google.golang.org/grpc/tap/tap.go | 51 + test/vendor/google.golang.org/grpc/trace.go | 123 + test/vendor/google.golang.org/grpc/version.go | 22 + test/vendor/istio.io/api/LICENSE | 202 + .../api/common/config/license-lint.yml | 141 + .../api/licenses/cloud.google.com/go/LICENSE | 202 + .../github.com/BurntSushi/toml/COPYING | 21 + .../toml/cmd/toml-test-decoder/COPYING | 21 + .../toml/cmd/toml-test-encoder/COPYING | 21 + .../BurntSushi/toml/cmd/tomlv/COPYING | 21 + .../github.com/client9/misspell/LICENSE | 22 + .../licenses/github.com/gogo/protobuf/LICENSE | 35 + .../licenses/github.com/golang/glog/LICENSE | 191 + .../licenses/github.com/golang/mock/LICENSE | 202 + .../github.com/golang/protobuf/LICENSE | 28 + .../licenses/github.com/google/go-cmp/LICENSE | 27 + .../github.com/kisielk/errcheck/LICENSE | 22 + .../github.com/kisielk/gotool/LICENSE | 20 + .../api/licenses/golang.org/x/crypto/LICENSE | 27 + .../api/licenses/golang.org/x/lint/LICENSE | 27 + .../api/licenses/golang.org/x/net/LICENSE | 27 + .../api/licenses/golang.org/x/oauth2/LICENSE | 27 + .../api/licenses/golang.org/x/sync/LICENSE | 27 + .../api/licenses/golang.org/x/sys/LICENSE | 27 + .../api/licenses/golang.org/x/text/LICENSE | 27 + .../api/licenses/golang.org/x/tools/LICENSE | 27 + .../golang.org/x/tools/cmd/getgo/LICENSE | 27 + .../google.golang.org/appengine/LICENSE | 202 + .../google.golang.org/genproto/LICENSE | 202 + .../licenses/google.golang.org/grpc/LICENSE | 202 + .../api/licenses/honnef.co/go/tools/LICENSE | 20 + .../honnef.co/go/tools/gcsizes/LICENSE | 27 + .../licenses/honnef.co/go/tools/lint/LICENSE | 28 + .../licenses/honnef.co/go/tools/ssa/LICENSE | 28 + .../licenses/istio.io/gogo-genproto/LICENSE | 202 + .../v1alpha3/destination_rule.pb.go | 5436 +++++++ .../v1alpha3/destination_rule_deepcopy.gen.go | 95 + .../v1alpha3/destination_rule_json.gen.go | 239 + .../networking/v1alpha3/envoy_filter.pb.go | 6132 +++++++ .../v1alpha3/envoy_filter_deepcopy.gen.go | 200 + .../v1alpha3/envoy_filter_json.gen.go | 377 + .../api/networking/v1alpha3/gateway.pb.go | 2391 +++ .../v1alpha3/gateway_deepcopy.gen.go | 194 + .../networking/v1alpha3/gateway_json.gen.go | 239 + .../networking/v1alpha3/service_entry.pb.go | 1885 +++ .../v1alpha3/service_entry_deepcopy.gen.go | 335 + .../v1alpha3/service_entry_json.gen.go | 358 + .../api/networking/v1alpha3/sidecar.pb.go | 2095 +++ .../v1alpha3/sidecar_deepcopy.gen.go | 225 + .../networking/v1alpha3/sidecar_json.gen.go | 281 + .../networking/v1alpha3/virtual_service.pb.go | 10921 +++++++++++++ .../v1alpha3/virtual_service_deepcopy.gen.go | 115 + .../v1alpha3/virtual_service_json.gen.go | 358 + test/vendor/istio.io/client-go/LICENSE | 202 + .../client-go/common/config/license-lint.yml | 141 + .../licenses/cloud.google.com/go/LICENSE | 202 + .../github.com/BurntSushi/toml/COPYING | 21 + .../toml/cmd/toml-test-decoder/COPYING | 21 + .../toml/cmd/toml-test-encoder/COPYING | 21 + .../BurntSushi/toml/cmd/tomlv/COPYING | 21 + .../github.com/NYTimes/gziphandler/LICENSE.md | 13 + .../github.com/PuerkitoBio/purell/LICENSE | 12 + .../github.com/PuerkitoBio/urlesc/LICENSE | 27 + .../github.com/client9/misspell/LICENSE | 22 + .../github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/docopt/docopt-go/LICENSE | 21 + .../github.com/emicklei/go-restful/LICENSE | 22 + .../github.com/evanphx/json-patch/LICENSE | 25 + .../licenses/github.com/ghodss/yaml/LICENSE | 50 + .../licenses/github.com/go-logr/logr}/LICENSE | 0 .../github.com/go-openapi/jsonpointer/LICENSE | 202 + .../go-openapi/jsonreference/LICENSE | 202 + .../github.com/go-openapi/spec/LICENSE | 202 + .../github.com/go-openapi/swag/LICENSE | 202 + .../licenses/github.com/gogo/protobuf/LICENSE | 35 + .../licenses/github.com/golang/glog/LICENSE | 191 + .../github.com/golang/groupcache/LICENSE | 191 + .../licenses/github.com/golang/mock/LICENSE | 202 + .../github.com/golang/protobuf/LICENSE | 28 + .../licenses/github.com/google/btree/LICENSE | 202 + .../licenses/github.com/google/go-cmp/LICENSE | 27 + .../licenses/github.com/google/gofuzz/LICENSE | 202 + .../github.com/googleapis/gnostic/LICENSE | 203 + .../gregjones/httpcache/LICENSE.txt | 7 + .../github.com/hashicorp/golang-lru/LICENSE | 362 + .../github.com/json-iterator/go/LICENSE | 21 + .../github.com/kisielk/errcheck/LICENSE | 22 + .../github.com/kisielk/gotool/LICENSE | 20 + .../licenses/github.com/kr/pretty/License | 21 + .../licenses/github.com/kr/pty/License | 23 + .../licenses/github.com/kr/text/License | 19 + .../github.com/mailru/easyjson/LICENSE | 7 + .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/reflect2/LICENSE | 201 + .../licenses/github.com/onsi/ginkgo/LICENSE | 20 + .../stenographer/support/go-colorable/LICENSE | 21 + .../stenographer/support/go-isatty/LICENSE | 9 + .../licenses/github.com/onsi/gomega/LICENSE | 20 + .../github.com/peterbourgon/diskv/LICENSE | 19 + .../licenses/github.com/pkg/errors/LICENSE | 23 + .../github.com/pmezard/go-difflib/LICENSE | 27 + .../licenses/github.com/spf13/pflag/LICENSE | 28 + .../licenses/github.com/stretchr/objx/LICENSE | 22 + .../github.com/stretchr/testify/LICENSE | 21 + .../licenses/golang.org/x/crypto/LICENSE | 27 + .../licenses/golang.org/x/exp/LICENSE | 27 + .../licenses/golang.org/x/lint/LICENSE | 27 + .../licenses/golang.org/x/net/LICENSE | 27 + .../licenses/golang.org/x/oauth2/LICENSE | 27 + .../licenses/golang.org/x/sync/LICENSE | 27 + .../licenses/golang.org/x/sys/LICENSE | 27 + .../licenses/golang.org/x/text/LICENSE | 27 + .../licenses/golang.org/x/time/LICENSE | 27 + .../licenses/golang.org/x/tools/LICENSE | 27 + .../golang.org/x/tools/cmd/getgo/LICENSE | 27 + .../google.golang.org/appengine/LICENSE | 202 + .../google.golang.org/genproto/LICENSE | 202 + .../licenses/google.golang.org/grpc/LICENSE | 202 + .../licenses/gopkg.in/check.v1/LICENSE | 25 + .../licenses/gopkg.in/inf.v0/LICENSE | 28 + .../licenses/gopkg.in/yaml.v2/LICENSE | 201 + .../licenses/honnef.co/go/tools/LICENSE | 20 + .../honnef.co/go/tools/gcsizes/LICENSE | 27 + .../licenses/honnef.co/go/tools/lint/LICENSE | 28 + .../licenses/honnef.co/go/tools/ssa/LICENSE | 28 + .../client-go/licenses/istio.io/api/LICENSE | 202 + .../licenses/istio.io/gogo-genproto/LICENSE | 202 + .../client-go/licenses/k8s.io/api/LICENSE | 202 + .../licenses/k8s.io/apimachinery/LICENSE | 202 + .../licenses/k8s.io/client-go/LICENSE | 202 + .../client-go/licenses/k8s.io/gengo/LICENSE | 202 + .../client-go/licenses/k8s.io/klog/LICENSE | 191 + .../licenses/k8s.io/kube-openapi/LICENSE | 202 + .../sigs.k8s.io/structured-merge-diff/LICENSE | 201 + .../licenses/sigs.k8s.io/yaml/LICENSE | 50 + .../pkg/apis/networking/v1alpha3/doc.go | 21 + .../apis/networking/v1alpha3/register.gen.go | 59 + .../pkg/apis/networking/v1alpha3/types.gen.go | 214 + .../v1alpha3/zz_generated.deepcopy.gen.go | 383 + test/vendor/istio.io/gogo-genproto/LICENSE | 202 + .../common/config/license-lint.yml | 141 + .../googleapis/google/api/annotations.pb.go | 56 + .../google/api/field_behavior.pb.go | 128 + .../googleapis/google/api/http.pb.go | 2197 +++ .../licenses/cloud.google.com/go/LICENSE | 202 + .../github.com/BurntSushi/toml/COPYING | 21 + .../toml/cmd/toml-test-decoder/COPYING | 21 + .../toml/cmd/toml-test-encoder/COPYING | 21 + .../BurntSushi/toml/cmd/tomlv/COPYING | 21 + .../github.com/client9/misspell/LICENSE | 22 + .../licenses/github.com/gogo/protobuf/LICENSE | 35 + .../licenses/github.com/golang/glog/LICENSE | 191 + .../licenses/github.com/golang/mock/LICENSE | 202 + .../github.com/golang/protobuf/LICENSE | 28 + .../licenses/github.com/google/go-cmp/LICENSE | 27 + .../github.com/kisielk/errcheck/LICENSE | 22 + .../github.com/kisielk/gotool/LICENSE | 20 + .../licenses/golang.org/x/crypto/LICENSE | 27 + .../licenses/golang.org/x/lint/LICENSE | 27 + .../licenses/golang.org/x/net/LICENSE | 27 + .../licenses/golang.org/x/oauth2/LICENSE | 27 + .../licenses/golang.org/x/sync/LICENSE | 27 + .../licenses/golang.org/x/sys/LICENSE | 27 + .../licenses/golang.org/x/text/LICENSE | 27 + .../licenses/golang.org/x/tools/LICENSE | 27 + .../golang.org/x/tools/cmd/getgo/LICENSE | 27 + .../google.golang.org/appengine/LICENSE | 202 + .../google.golang.org/genproto/LICENSE | 202 + .../licenses/google.golang.org/grpc/LICENSE | 202 + .../licenses/honnef.co/go/tools/LICENSE | 20 + .../honnef.co/go/tools/gcsizes/LICENSE | 27 + .../licenses/honnef.co/go/tools/lint/LICENSE | 28 + .../licenses/honnef.co/go/tools/ssa/LICENSE | 28 + .../plugin/pkg/client/auth/gcp/gcp.go | 383 + .../forked/golang/template/exec.go | 94 + .../forked/golang/template/funcs.go | 599 + .../k8s.io/client-go/util/jsonpath/doc.go | 20 + .../client-go/util/jsonpath/jsonpath.go | 525 + .../k8s.io/client-go/util/jsonpath/node.go | 256 + .../k8s.io/client-go/util/jsonpath/parser.go | 526 + .../util/workqueue/default_rate_limiters.go | 211 + .../util/workqueue/delaying_queue.go | 264 + .../k8s.io/client-go/util/workqueue/doc.go | 26 + .../client-go/util/workqueue/metrics.go | 334 + .../client-go/util/workqueue/parallelizer.go | 63 + .../k8s.io/client-go/util/workqueue/queue.go | 212 + .../util/workqueue/rate_limiting_queue.go | 69 + .../knative.dev/pkg/apis/condition_set.go | 15 +- .../knative.dev/pkg/apis/condition_types.go | 1 - .../pkg/apis/duck/cached.go | 0 .../vendor/knative.dev/pkg/apis/duck/const.go | 33 + .../pkg/apis/duck/doc.go | 0 .../pkg/apis/duck/enqueue.go | 0 .../knative.dev/pkg/apis/duck/interface.go | 69 + .../pkg/apis/duck/patch.go | 13 + .../pkg/apis/duck/proxy.go | 0 .../pkg/apis/duck/register.go | 0 .../pkg/apis/duck/typed.go | 10 +- .../knative.dev/pkg/apis/duck/unstructured.go | 62 + .../pkg/apis/duck/v1}/addressable_types.go | 32 +- .../pkg/apis/duck/v1/destination.go | 93 + .../knative.dev/pkg/apis/duck/v1/doc.go | 23 + .../pkg/apis/duck/v1/podspec_types.go | 93 + .../knative.dev/pkg/apis/duck/v1/register.go | 59 + .../pkg/apis/duck/v1/source_types.go | 156 + .../pkg/apis/duck/v1/status_types.go | 141 + .../pkg/apis/duck/v1/zz_generated.deepcopy.go | 483 + .../apis/duck/v1alpha1/addressable_types.go | 53 +- .../pkg/apis/duck/v1alpha1/binding_types.go | 92 + .../pkg/apis/duck/v1alpha1/condition_set.go | 8 +- .../apis/duck/v1alpha1/conditions_types.go | 9 +- .../pkg/apis/duck/v1alpha1/doc.go | 0 .../duck/v1alpha1/legacy_targetable_types.go | 9 +- .../pkg/apis/duck/v1alpha1/register.go | 2 +- .../duck/v1alpha1/retired_targetable_types.go | 9 +- .../duck/v1alpha1/zz_generated.deepcopy.go | 87 +- .../apis/duck/v1beta1/addressable_types.go | 128 + .../pkg/apis/duck/v1beta1/destination.go | 161 + .../pkg/apis/duck/v1beta1/doc.go | 0 .../pkg/apis/duck/v1beta1/register.go | 2 +- .../pkg/apis/duck/v1beta1/source_types.go | 156 + .../pkg/apis/duck/v1beta1/status_types.go | 7 +- .../duck/v1beta1/zz_generated.deepcopy.go | 163 +- .../pkg/apis/duck/verify.go | 5 +- .../vendor/knative.dev/pkg/apis/interfaces.go | 9 - test/vendor/knative.dev/pkg/apis/url.go | 62 + .../knative.dev/pkg/apis/volatile_time.go | 1 - .../pkg/apis/zz_generated.deepcopy.go | 2 +- .../pkg/configmap/doc.go | 0 .../pkg/configmap/filter.go | 32 +- .../pkg/configmap/informed_watcher.go | 77 +- .../pkg/configmap/load.go | 0 .../pkg/configmap/manual_watcher.go | 16 +- .../pkg/configmap/static_watcher.go | 13 +- .../pkg/configmap/store.go | 18 +- .../pkg/configmap/watcher.go | 8 +- .../knative.dev/pkg/controller/controller.go | 551 + .../knative.dev/pkg/controller/helper.go | 52 + .../pkg/controller/stats_reporter.go | 250 + .../pkg/kmeta/accessor.go | 55 +- .../knative => knative.dev}/pkg/kmeta/doc.go | 0 .../pkg/kmeta/labels.go | 0 test/vendor/knative.dev/pkg/kmeta/names.go | 64 + .../pkg/kmeta/owner_references.go | 0 .../pkg/kmeta/ownerrefable_accessor.go | 25 + test/vendor/knative.dev/pkg/logging/config.go | 70 +- .../pkg/logging/zz_generated.deepcopy.go | 2 +- test/vendor/knative.dev/pkg/metrics/client.go | 62 + test/vendor/knative.dev/pkg/metrics/config.go | 322 + .../pkg/metrics/config_observability.go | 103 + test/vendor/knative.dev/pkg/metrics/doc.go | 16 + .../knative.dev/pkg/metrics/exporter.go | 208 + .../knative.dev/pkg/metrics/gcp_metadata.go | 53 + .../knative.dev/pkg/metrics/memstats.go | 539 + .../vendor/knative.dev/pkg/metrics/metrics.go | 173 + .../pkg/metrics/metricskey/constants.go | 41 + .../metrics/metricskey/constants_eventing.go | 104 + .../metrics/metricskey/constants_serving.go | 72 + .../pkg/metrics/monitored_resources.go | 43 + .../metrics/monitored_resources_eventing.go | 159 + .../metrics/monitored_resources_serving.go | 73 + .../pkg/metrics/opencensus_exporter.go | 38 + .../pkg/metrics/prometheus_exporter.go | 74 + test/vendor/knative.dev/pkg/metrics/record.go | 54 + .../knative.dev/pkg/metrics/reflector.go | 176 + .../pkg/metrics/stackdriver_exporter.go | 263 + test/vendor/knative.dev/pkg/metrics/utils.go | 26 + .../knative.dev/pkg/metrics/workqueue.go | 179 + .../pkg/metrics/zz_generated.deepcopy.go | 37 + test/vendor/knative.dev/pkg/network/doc.go | 19 + test/vendor/knative.dev/pkg/network/domain.go | 75 + .../knative.dev/pkg/network/error_handler.go | 43 + test/vendor/knative.dev/pkg/network/h2c.go | 54 + .../vendor/knative.dev/pkg/network/network.go | 45 + .../knative.dev/pkg/network/transports.go | 120 + .../knative.dev/pkg/profiling/server.go | 116 + .../knative => knative.dev}/pkg/ptr/doc.go | 0 .../knative => knative.dev}/pkg/ptr/ptr.go | 14 + test/vendor/knative.dev/pkg/signals/signal.go | 83 + .../pkg/signals/signal_posix.go} | 13 +- .../knative.dev/pkg/signals/signal_windows.go | 23 + test/vendor/knative.dev/pkg/system/clock.go | 32 + test/vendor/knative.dev/pkg/system/env.go | 59 + test/vendor/knative.dev/pkg/test/e2e_flags.go | 87 +- .../knative.dev/pkg/test/helpers/dryrun.go | 32 + .../pkg/test/helpers/error.go} | 29 +- .../knative.dev/pkg/test/helpers/name.go | 99 + .../knative.dev/pkg/test/kube_checks.go | 46 + .../knative.dev/pkg/test/logging/logging.go | 10 - test/vendor/knative.dev/pkg/test/request.go | 19 +- .../pkg/test/spoof/error_checks.go | 11 +- .../knative.dev/pkg/test/spoof/spoof.go | 67 +- .../vendor/knative.dev/pkg/test/tinterface.go | 35 + .../knative.dev/pkg/test/zipkin/util.go | 38 +- test/vendor/knative.dev/pkg/tracker/doc.go | 23 + .../vendor/knative.dev/pkg/tracker/enqueue.go | 263 + .../knative.dev/pkg/tracker/interface.go | 170 + .../pkg/tracker/zz_generated.deepcopy.go | 46 + .../vendor/knative.dev/serving/.gitattributes | 15 + .../.github/ISSUE_TEMPLATE/ask-question.md | 28 + .../.github/ISSUE_TEMPLATE/bug-report.md | 48 + .../.github/ISSUE_TEMPLATE/feature-request.md | 28 + .../serving/.github/issue-template.md | 37 + .../serving/.github/pull-request-template.md | 23 + test/vendor/knative.dev/serving/.gitignore | 11 + test/vendor/knative.dev/serving/.ko.yaml | 4 + .../knative => knative.dev}/serving/AUTHORS | 1 + .../knative.dev/serving/CONTRIBUTING.md | 5 + .../vendor/knative.dev/serving/DEVELOPMENT.md | 305 + test/vendor/knative.dev/serving/Gopkg.lock | 2121 +++ test/vendor/knative.dev/serving/Gopkg.toml | 171 + test/vendor/knative.dev/serving/LICENSE | 202 + test/vendor/knative.dev/serving/Makefile | 42 + test/vendor/knative.dev/serving/OWNERS | 7 + .../vendor/knative.dev/serving/OWNERS_ALIASES | 162 + test/vendor/knative.dev/serving/README.md | 29 + test/vendor/knative.dev/serving/ci | 1 + .../knative.dev/serving/cmd/activator/OWNERS | 10 + .../serving/cmd/activator/kodata/HEAD | 0 .../serving/cmd/activator/kodata/LICENSE | 0 .../cmd/activator/kodata/VENDOR-LICENSE | 0 .../serving/cmd/activator/kodata/refs | 1 + .../knative.dev/serving/cmd/activator/main.go | 332 + .../serving/cmd/activator/request_log.go | 62 + .../serving/cmd/activator/request_log_test.go | 197 + .../serving/cmd/autoscaler-hpa/OWNERS | 10 + .../serving/cmd/autoscaler-hpa}/kodata/HEAD | 0 .../cmd/autoscaler-hpa}/kodata/LICENSE | 0 .../cmd/autoscaler-hpa}/kodata/VENDOR-LICENSE | 0 .../serving/cmd/autoscaler-hpa/kodata/refs | 1 + .../serving/cmd/autoscaler-hpa/main.go | 29 + .../knative.dev/serving/cmd/autoscaler/OWNERS | 10 + .../serving/cmd/autoscaler}/kodata/HEAD | 0 .../serving/cmd/autoscaler}/kodata/LICENSE | 0 .../cmd/autoscaler}/kodata/VENDOR-LICENSE | 0 .../serving/cmd/autoscaler/kodata/refs | 1 + .../serving/cmd/autoscaler/main.go | 227 + .../serving/cmd/autoscaler/main_test.go | 149 + .../knative.dev/serving/cmd/controller/OWNERS | 10 + .../serving/cmd/controller}/kodata/HEAD | 0 .../serving/cmd/controller}/kodata/LICENSE | 0 .../cmd/controller}/kodata/VENDOR-LICENSE | 0 .../serving/cmd/controller/kodata/refs | 1 + .../serving/cmd/controller/main.go | 43 + .../serving/cmd/default-domain/main.go | 216 + .../knative.dev/serving/cmd/networking/OWNERS | 10 + .../cmd/networking/certmanager/kodata/HEAD | 0 .../cmd/networking/certmanager/kodata/LICENSE | 0 .../certmanager/kodata/VENDOR-LICENSE | 0 .../cmd/networking/certmanager/kodata/refs | 1 + .../cmd/networking/certmanager/main.go | 29 + .../knative.dev/serving/cmd/networking/doc.go | 17 + .../serving/cmd/networking/istio/kodata/HEAD | 0 .../cmd/networking/istio/kodata/LICENSE | 0 .../networking/istio/kodata/VENDOR-LICENSE | 0 .../serving/cmd/networking/istio/kodata/refs | 1 + .../serving/cmd/networking/istio/main.go | 28 + .../serving/cmd/networking/nscert/kodata/HEAD | 1 + .../cmd/networking/nscert/kodata/LICENSE | 1 + .../networking/nscert/kodata/VENDOR-LICENSE | 1 + .../serving/cmd/networking/nscert/main.go | 28 + .../knative.dev/serving/cmd/queue/OWNERS | 10 + .../serving/cmd/queue}/kodata/HEAD | 0 .../serving/cmd/queue}/kodata/LICENSE | 0 .../serving/cmd/queue}/kodata/VENDOR-LICENSE | 0 .../knative.dev/serving/cmd/queue/kodata/refs | 1 + .../knative.dev/serving/cmd/queue/main.go | 628 + .../serving/cmd/queue/main_test.go | 452 + .../knative.dev/serving/cmd/webhook/OWNERS | 10 + .../serving/cmd/webhook/kodata/HEAD | 1 + .../serving/cmd/webhook/kodata/LICENSE | 1 + .../serving/cmd/webhook/kodata/VENDOR-LICENSE | 1 + .../serving/cmd/webhook/kodata/refs | 1 + .../knative.dev/serving/cmd/webhook/main.go | 166 + .../knative.dev/serving/code-of-conduct.md | 75 + .../knative.dev/serving/community/README.md | 5 + .../serving/config/100-namespace.yaml | 1 + ...200-addressable-resolvers-clusterrole.yaml | 1 + .../config/200-clusterrole-certmanager.yaml | 1 + .../serving/config/200-clusterrole-istio.yaml | 1 + .../config/200-clusterrole-metrics.yaml | 1 + .../config/200-clusterrole-namespaced.yaml | 1 + .../serving/config/200-clusterrole.yaml | 1 + .../200-podspecable-bindings-clusterrole.yaml | 1 + .../serving/config/200-serviceaccount.yaml | 1 + ...201-clusterrolebinding-metrics-server.yaml | 1 + .../201-clusterrolebinding-metrics.yaml | 1 + .../config/201-clusterrolebinding.yaml | 1 + .../201-rolebinding-metrics-server.yaml | 1 + .../serving/config/202-gateway.yaml | 1 + .../serving/config/203-local-gateway.yaml | 1 + .../serving/config/300-certificate.yaml | 1 + .../serving/config/300-configuration.yaml | 1 + .../serving/config/300-imagecache.yaml | 1 + .../serving/config/300-ingress.yaml | 1 + .../serving/config/300-metric.yaml | 1 + .../knative.dev/serving/config/300-pa.yaml | 1 + .../serving/config/300-revision.yaml | 1 + .../knative.dev/serving/config/300-route.yaml | 1 + .../serving/config/300-service.yaml | 1 + .../knative.dev/serving/config/300-sks.yaml | 1 + .../500-webhook-configmap-validation.yaml | 1 + .../config/500-webhook-defaulting.yaml | 1 + .../500-webhook-resource-validation.yaml | 1 + .../serving/config/500-webhook-secret.yaml | 1 + .../knative.dev/serving/config/999-cache.yaml | 1 + test/vendor/knative.dev/serving/config/OWNERS | 9 + .../knative.dev/serving/config/README.md | 31 + .../serving/config/activator-hpa.yaml | 1 + .../knative.dev/serving/config/activator.yaml | 1 + .../serving/config/autoscaler-hpa.yaml | 1 + .../serving/config/autoscaler.yaml | 1 + .../config/cert-manager/200-clusterrole.yaml | 30 + .../serving/config/cert-manager/config.yaml | 46 + .../config/cert-manager/controller.yaml | 93 + .../serving/config/config-autoscaler.yaml | 1 + .../serving/config/config-certmanager.yaml | 1 + .../serving/config/config-defaults.yaml | 1 + .../serving/config/config-deployment.yaml | 1 + .../serving/config/config-domain.yaml | 1 + .../knative.dev/serving/config/config-gc.yaml | 1 + .../serving/config/config-istio.yaml | 1 + .../serving/config/config-logging.yaml | 1 + .../serving/config/config-network.yaml | 1 + .../serving/config/config-observability.yaml | 1 + .../serving/config/config-tracing.yaml | 1 + .../serving/config/controller.yaml | 1 + .../serving/config/core/100-namespace.yaml | 22 + .../serving/config/core/999-cache.yaml | 25 + .../config/core/configmaps/autoscaler.yaml | 137 + .../config/core/configmaps/defaults.yaml | 83 + .../config/core/configmaps/deployment.yaml | 45 + .../config/core/configmaps/domain.yaml | 60 + .../serving/config/core/configmaps/gc.yaml | 55 + .../config/core/configmaps/logging.yaml | 71 + .../config/core/configmaps/network.yaml | 137 + .../config/core/configmaps/observability.yaml | 112 + .../config/core/configmaps/tracing.yaml | 57 + .../core/deployments/activator-hpa.yaml | 34 + .../config/core/deployments/activator.yaml | 133 + .../config/core/deployments/autoscaler.yaml | 113 + .../config/core/deployments/controller.yaml | 91 + .../config/core/deployments/webhook.yaml | 96 + ...200-addressable-resolvers-clusterrole.yaml | 36 + .../core/rbac/200-clusterrole-namespaced.yaml | 49 + .../config/core/rbac/200-clusterrole.yaml | 58 + .../200-podspecable-bindings-clusterrole.yaml | 34 + .../config/core/rbac/200-serviceaccount.yaml | 21 + .../core/rbac/201-clusterrolebinding.yaml | 28 + .../config/core/resources/certificate.yaml | 43 + .../config/core/resources/configuration.yaml | 61 + .../config/core/resources/ingress.yaml | 46 + .../serving/config/core/resources/metric.yaml | 41 + .../config/core/resources/podautoscaler.yaml | 53 + .../config/core/resources/revision.yaml | 62 + .../serving/config/core/resources/route.yaml | 57 + .../core/resources/serverlessservice.yaml | 55 + .../config/core/resources/service.yaml | 65 + .../core/webhooks/configmap-validation.yaml | 34 + .../config/core/webhooks/defaulting.yaml | 30 + .../core/webhooks/resource-validation.yaml | 30 + .../serving/config/core/webhooks/secret.yaml | 22 + .../config/custom-metrics-apiservice.yaml | 1 + .../200-clusterrole-metrics.yaml | 25 + ...201-clusterrolebinding-metrics-server.yaml | 29 + .../201-clusterrolebinding-metrics.yaml | 29 + .../201-rolebinding-metrics-server.yaml | 30 + .../config/hpa-autoscaling/controller.yaml | 93 + .../custom-metrics-apiservice.yaml | 30 + .../config/istio-ingress/200-clusterrole.yaml | 27 + .../config/istio-ingress/202-gateway.yaml | 33 + .../istio-ingress/203-local-gateway.yaml | 35 + .../serving/config/istio-ingress/config.yaml | 69 + .../config/istio-ingress/controller.yaml | 78 + .../config/monitoring/100-namespace.yaml | 20 + .../serving/config/monitoring/OWNERS | 10 + .../serving/config/monitoring/README.md | 26 + .../elasticsearch/100-fluentd-configmap.yaml | 119 + .../logging/elasticsearch/200-fluentd.yaml | 165 + .../stackdriver/100-fluentd-configmap.yaml | 116 + .../logging/stackdriver/200-fluentd.yaml | 165 + .../prometheus/100-grafana-custom-config.yaml | 27 + .../100-grafana-dash-knative-efficiency.yaml | 518 + .../100-grafana-dash-knative-reconciler.yaml | 624 + .../100-grafana-dash-knative-scaling.yaml | 1183 ++ .../prometheus/100-grafana-dash-knative.yaml | 1196 ++ .../metrics/prometheus/100-grafana.yaml | 260 + .../100-prometheus-scrape-config.yaml | 417 + .../200-kube-controller-metrics.yaml | 32 + .../metrics/prometheus/300-prometheus.yaml | 308 + .../100-stackdriver-serviceentry.yaml | 52 + .../tracing/jaeger/105-zipkin-service.yaml | 29 + .../jaeger/elasticsearch/100-jaeger.yaml | 30 + .../tracing/jaeger/memory/100-jaeger.yaml | 23 + .../tracing/zipkin-in-mem/100-zipkin.yaml | 67 + .../monitoring/tracing/zipkin/100-zipkin.yaml | 75 + .../namespace-wildcard-certs/controller.yaml | 93 + .../config/networking-certmanager.yaml | 1 + .../serving/config/networking-istio.yaml | 1 + .../serving/config/networking-ns-cert.yaml | 1 + .../config/post-install/default-domain.yaml | 72 + .../knative.dev/serving/config/webhook.yaml | 1 + .../vendor/knative.dev/serving/container.yaml | 7 + .../knative.dev/serving/content_sets.yml | 3 + .../serving/docs/client-conventions.md | 4 + .../serving/docs/product/personas.md | 65 + .../serving/docs/resources-overview.md | 116 + .../serving/docs/roadmap/scaling-2019.md | 193 + .../serving/docs/runtime-contract.md | 538 + .../serving/docs/scaling/DEVELOPMENT.md | 207 + .../knative.dev/serving/docs/scaling/OWNERS | 10 + .../serving/docs/scaling/README.md | 30 + .../knative.dev/serving/docs/spec/README.md | 13 + .../knative.dev/serving/docs/spec/errors.md | 9 + .../serving/docs/spec/images/object_model.png | Bin 0 -> 16672 bytes .../serving/docs/spec/motivation.md | 19 + .../serving/docs/spec/normative_examples.md | 4 + .../knative.dev/serving/docs/spec/overview.md | 89 + .../knative.dev/serving/docs/spec/spec.md | 4 + test/vendor/knative.dev/serving/hack/OWNERS | 10 + .../vendor/knative.dev/serving/hack/README.md | 13 + .../hack/boilerplate/add-boilerplate.sh | 38 + .../hack/boilerplate/boilerplate.go.txt | 15 + .../hack/boilerplate/boilerplate.sh.txt | 15 + .../serving/hack/generate-yamls.sh | 156 + .../knative.dev/serving/hack/release.sh | 35 + .../serving/hack/update-codegen.sh | 89 + .../knative.dev/serving/hack/update-deps.sh | 34 + .../serving/hack/verify-codegen.sh | 60 + .../knative.dev/serving/install/CONFIG.md | 56 + .../openshift/ci-operator/Dockerfile.in | 6 + .../ci-operator/build-image/Dockerfile | 11 + .../ci-operator/build-image/kubernetes.repo | 7 + .../ci-operator/generate-ci-config.sh | 67 + .../ci-operator/generate-dockerfiles.sh | 14 + .../knative-images/activator/Dockerfile | 6 + .../knative-images/autoscaler-hpa/Dockerfile | 6 + .../knative-images/autoscaler/Dockerfile | 6 + .../knative-images/certmanager/Dockerfile | 6 + .../knative-images/controller/Dockerfile | 6 + .../knative-images/istio/Dockerfile | 6 + .../knative-images/nscert/Dockerfile | 6 + .../knative-images/queue/Dockerfile | 6 + .../knative-images/webhook/Dockerfile | 6 + .../knative-test-images/autoscale/Dockerfile | 6 + .../knative-test-images/failing/Dockerfile | 6 + .../knative-test-images/flaky/Dockerfile | 6 + .../knative-test-images/grpc-ping/Dockerfile | 6 + .../hellovolume/Dockerfile | 6 + .../knative-test-images/helloworld/Dockerfile | 6 + .../knative-test-images/httpproxy/Dockerfile | 6 + .../observed-concurrency/Dockerfile | 6 + .../pizzaplanetv1/Dockerfile | 6 + .../pizzaplanetv2/Dockerfile | 6 + .../knative-test-images/runtime/Dockerfile | 6 + .../singlethreaded/Dockerfile | 6 + .../knative-test-images/timeout/Dockerfile | 6 + .../knative-test-images/wsserver/Dockerfile | 6 + .../serving/openshift/e2e-tests-openshift.sh | 187 + .../serving/openshift/olm/README.md | 49 + .../olm/knative-serving.catalogsource.yaml | 498 + .../openshift/patches/003-routeretry.patch | 79 + .../serving/openshift/patches/004-grpc.patch | 27 + .../openshift/patches/005-disablehpa.patch | 20 + .../productization/dist-git/.gitkeep | 1 + .../dist-git/Dockerfile.activator | 19 + .../dist-git/Dockerfile.autoscaler | 19 + .../dist-git/Dockerfile.autoscaler-hpa | 19 + .../dist-git/Dockerfile.controller | 19 + .../Dockerfile.networking-certmanager | 19 + .../dist-git/Dockerfile.networking-istio | 19 + .../dist-git/Dockerfile.networking-nscert | 19 + .../productization/dist-git/Dockerfile.queue | 19 + .../dist-git/Dockerfile.webhook | 19 + .../generate-dockerfiles/Dockerfile.in | 19 + .../generate-dockerfiles/gen_dockerfiles.sh | 20 + .../serving/openshift/release/README.md | 35 + .../release/create-release-branch.sh | 26 + .../openshift/release/generate-release.sh | 16 + .../openshift/release/knative-serving-ci.yaml | 1396 ++ .../release/knative-serving-v0.12.1.yaml | 1396 ++ .../serving/openshift/release/resolve.sh | 46 + .../openshift/release/update-to-head.sh | 38 + .../knative.dev/serving/pkg/activator/OWNERS | 13 + .../serving/pkg/activator/README.md | 11 + .../serving/pkg/activator/activator.go | 26 + .../serving/pkg/activator/config}/doc.go | 5 +- .../serving/pkg/activator/config/store.go | 92 + .../activator/config/zz_generated.deepcopy.go | 46 + .../activator/handler/concurrency_reporter.go | 136 + .../handler/concurrency_reporter_test.go | 273 + .../pkg/activator/handler/context_handler.go | 99 + .../activator/handler/context_handler_test.go | 87 + .../serving/pkg/activator/handler/handler.go | 144 + .../pkg/activator/handler/handler_test.go | 509 + .../pkg/activator/handler/healthz_handler.go | 42 + .../activator/handler/healthz_handler_test.go | 79 + .../pkg/activator/handler/metric_handler.go | 63 + .../activator/handler/metric_handler_test.go | 118 + .../pkg/activator/handler/probe_handler.go | 42 + .../activator/handler/probe_handler_test.go | 112 + .../activator/handler/requestevent_handler.go | 63 + .../handler/requestevent_handler_test.go | 61 + .../handler/testdata/config-tracing.yaml | 57 + .../images/activator_activeRevision.png | Bin 0 -> 20137 bytes .../images/activator_reserveRevision.png | Bin 0 -> 24820 bytes .../pkg/activator/images/routeTraffic.png | Bin 0 -> 28562 bytes .../serving/pkg/activator/net/doc.go | 19 + .../serving/pkg/activator/net/helpers.go | 60 + .../serving/pkg/activator/net/helpers_test.go | 147 + .../pkg/activator/net/revision_backends.go | 482 + .../activator/net/revision_backends_test.go | 1104 ++ .../serving/pkg/activator/net/throttler.go | 677 + .../pkg/activator/net/throttler_test.go | 932 ++ .../serving/pkg/activator/stats_reporter.go | 191 + .../pkg/activator/stats_reporter_test.go | 173 + .../pkg/activator/testing/roundtripper.go | 162 + .../serving/pkg/activator/util/header.go} | 32 +- .../serving/pkg/activator/util/header_test.go | 76 + .../knative.dev/serving/pkg/apis/OWNERS | 13 + .../serving/pkg/apis/autoscaling/OWNERS | 10 + .../apis/autoscaling/annotation_validation.go | 146 + .../autoscaling/annotation_validation_test.go | 246 + .../serving/pkg/apis/autoscaling/register.go | 25 + .../pkg/apis/autoscaling/v1alpha1/doc.go | 0 .../autoscaling/v1alpha1/metric_defaults.go | 31 + .../autoscaling/v1alpha1/metric_lifecycle.go | 73 + .../v1alpha1/metric_lifecycle_test.go | 175 + .../apis/autoscaling/v1alpha1/metric_types.go | 79 + .../autoscaling/v1alpha1/metric_validation.go | 39 + .../v1alpha1/metric_validation_test.go | 110 + .../apis/autoscaling/v1alpha1/pa_defaults.go | 4 +- .../autoscaling/v1alpha1/pa_defaults_test.go | 112 + .../apis/autoscaling/v1alpha1/pa_lifecycle.go | 114 +- .../autoscaling/v1alpha1/pa_lifecycle_test.go | 986 ++ .../pkg/apis/autoscaling/v1alpha1/pa_types.go | 51 +- .../autoscaling/v1alpha1/pa_validation.go | 42 + .../v1alpha1/pa_validation_test.go | 248 + .../v1alpha1/podscalable_implements_test.go | 39 + .../autoscaling/v1alpha1/podscalable_types.go | 5 +- .../pkg/apis/autoscaling/v1alpha1/register.go | 4 +- .../autoscaling/v1alpha1/register_test.go | 43 + .../v1alpha1/zz_generated.deepcopy.go | 110 +- .../serving/pkg/apis/config/defaults.go | 21 +- .../serving/pkg/apis/config/defaults_test.go | 227 + .../serving/pkg/apis/config/doc.go | 0 .../serving/pkg/apis/config/store.go | 2 +- .../serving/pkg/apis/config/store_test.go | 78 + .../apis/config/testdata/config-defaults.yaml | 1 + .../pkg/apis/config/zz_generated.deepcopy.go | 2 +- .../knative.dev/serving/pkg/apis/doc.go | 22 + .../serving/pkg/apis/networking/OWNERS | 10 + .../pkg/apis/networking/generic_types.go | 4 +- .../pkg/apis/networking/generic_types_test.go | 56 + .../serving/pkg/apis/networking/ports.go | 0 .../serving/pkg/apis/networking/ports_test.go | 67 + .../serving/pkg/apis/networking/register.go | 44 +- .../v1alpha1/certificate_defaults.go | 0 .../v1alpha1/certificate_lifecycle.go | 16 +- .../v1alpha1/certificate_lifecycle_test.go | 138 + .../networking/v1alpha1/certificate_types.go | 8 +- .../v1alpha1/certificate_validation.go | 2 +- .../v1alpha1/certificate_validation_test.go | 95 + .../pkg/apis/networking/v1alpha1/doc.go | 6 +- .../networking/v1alpha1/ingress_defaults.go | 17 +- .../v1alpha1/ingress_defaults_test.go | 248 + .../networking/v1alpha1/ingress_lifecycle.go | 33 +- .../v1alpha1/ingress_lifecycle_test.go | 151 + .../apis/networking/v1alpha1/ingress_types.go | 42 +- .../networking/v1alpha1/ingress_validation.go | 6 +- .../v1alpha1/ingress_validation_test.go | 391 + .../pkg/apis/networking/v1alpha1/register.go | 4 +- .../apis/networking/v1alpha1/register_test.go | 56 + .../v1alpha1/serverlessservice_defaults.go | 0 .../v1alpha1/serverlessservice_lifecycle.go | 44 +- .../serverlessservice_lifecycle_test.go | 107 + .../v1alpha1/serverlessservice_types.go | 18 +- .../v1alpha1/serverlessservice_validation.go | 4 +- .../serverlessservice_validation_test.go | 150 + .../v1alpha1/zz_generated.deepcopy.go | 81 +- .../serving/pkg/apis/roadmap-2018.md | 69 + .../serving/pkg/apis/serving/fieldmask.go | 2 +- .../pkg/apis/serving/fieldmask_test.go | 648 + .../serving/pkg/apis/serving/k8s_lifecycle.go | 78 + .../pkg/apis/serving/k8s_lifecycle_test.go | 185 + .../pkg/apis/serving/k8s_validation.go | 107 +- .../pkg/apis/serving/k8s_validation_test.go | 1153 ++ .../pkg/apis/serving/metadata_validation.go | 171 + .../apis/serving/metadata_validation_test.go | 495 + .../serving/pkg/apis/serving/register.go | 10 +- .../serving/v1/configuration_conversion.go | 34 + .../v1/configuration_conversion_test.go | 34 + .../serving/v1}/configuration_defaults.go | 12 +- .../serving/v1/configuration_defaults_test.go | 247 + .../serving/v1}/configuration_lifecycle.go | 4 +- .../v1/configuration_lifecycle_test.go | 111 + .../apis/serving/v1}/configuration_types.go | 10 +- .../serving/v1}/configuration_validation.go | 45 +- .../v1/configuration_validation_test.go | 887 ++ .../serving/pkg/apis/serving/v1}/contexts.go | 6 +- .../pkg/apis/serving/v1/contexts_test.go | 62 + .../serving/pkg/apis/serving/v1/doc.go | 21 + .../serving/pkg/apis/serving/v1/register.go | 61 + .../pkg/apis/serving/v1/register_test.go | 41 + .../apis/serving/v1/revision_conversion.go | 34 + .../serving/v1/revision_conversion_test.go | 34 + .../pkg/apis/serving/v1/revision_defaults.go | 102 + .../apis/serving/v1/revision_defaults_test.go | 342 + .../pkg/apis/serving/v1/revision_lifecycle.go | 55 + .../serving/v1/revision_lifecycle_test.go | 139 + .../pkg/apis/serving/v1}/revision_types.go | 25 +- .../apis/serving/v1}/revision_validation.go | 73 +- .../serving/v1/revision_validation_test.go | 906 ++ .../pkg/apis/serving/v1/route_conversion.go | 34 + .../apis/serving/v1/route_conversion_test.go | 34 + .../pkg/apis/serving/v1}/route_defaults.go | 19 +- .../apis/serving/v1/route_defaults_test.go | 262 + .../pkg/apis/serving/v1}/route_lifecycle.go | 4 +- .../apis/serving/v1/route_lifecycle_test.go | 111 + .../pkg/apis/serving/v1}/route_types.go | 25 +- .../pkg/apis/serving/v1}/route_validation.go | 99 +- .../apis/serving/v1/route_validation_test.go | 830 + .../pkg/apis/serving/v1/service_conversion.go | 34 + .../serving/v1/service_conversion_test.go | 34 + .../pkg/apis/serving/v1}/service_defaults.go | 29 +- .../apis/serving/v1/service_defaults_test.go | 351 + .../pkg/apis/serving/v1}/service_lifecycle.go | 4 +- .../apis/serving/v1/service_lifecycle_test.go | 111 + .../pkg/apis/serving/v1}/service_types.go | 10 +- .../apis/serving/v1}/service_validation.go | 28 +- .../serving/v1/service_validation_test.go | 787 + .../apis/serving/v1}/zz_generated.deepcopy.go | 28 +- .../pkg/apis/serving/v1alpha1/README.md | 9 + .../v1alpha1/configuration_conversion.go | 29 +- .../v1alpha1/configuration_conversion_test.go | 237 + .../v1alpha1/configuration_defaults.go | 22 +- .../v1alpha1/configuration_defaults_test.go | 319 + .../v1alpha1/configuration_lifecycle.go | 10 +- .../v1alpha1/configuration_lifecycle_test.go | 338 + .../serving/v1alpha1/configuration_types.go | 10 +- .../v1alpha1/configuration_validation.go | 11 +- .../v1alpha1/configuration_validation_test.go | 860 + .../apis/serving/v1alpha1/conversion_error.go | 2 +- .../serving/v1alpha1/conversion_error_test.go | 27 + .../serving/pkg/apis/serving/v1alpha1/doc.go | 0 .../pkg/apis/serving/v1alpha1/register.go | 2 +- .../apis/serving/v1alpha1/register_test.go | 41 + .../serving/v1alpha1/revision_conversion.go | 25 +- .../v1alpha1/revision_conversion_test.go | 293 + .../serving/v1alpha1/revision_defaults.go | 25 +- .../v1alpha1/revision_defaults_test.go | 312 + .../serving/v1alpha1/revision_lifecycle.go | 168 +- .../v1alpha1/revision_lifecycle_test.go | 843 + .../apis/serving/v1alpha1/revision_types.go | 29 +- .../serving/v1alpha1/revision_validation.go | 89 +- .../v1alpha1/revision_validation_test.go | 887 ++ .../apis/serving/v1alpha1/route_conversion.go | 38 +- .../serving/v1alpha1/route_conversion_test.go | 326 + .../apis/serving/v1alpha1/route_defaults.go | 29 +- .../serving/v1alpha1/route_defaults_test.go | 325 + .../apis/serving/v1alpha1/route_lifecycle.go | 12 +- .../serving/v1alpha1/route_lifecycle_test.go | 385 + .../pkg/apis/serving/v1alpha1/route_types.go | 22 +- .../apis/serving/v1alpha1/route_validation.go | 22 +- .../serving/v1alpha1/route_validation_test.go | 669 + .../serving/v1alpha1/service_conversion.go | 57 +- .../v1alpha1/service_conversion_test.go | 706 + .../apis/serving/v1alpha1/service_defaults.go | 39 +- .../serving/v1alpha1/service_defaults_test.go | 890 ++ .../serving/v1alpha1/service_lifecycle.go | 6 +- .../v1alpha1/service_lifecycle_test.go | 614 + .../apis/serving/v1alpha1/service_types.go | 8 +- .../serving/v1alpha1/service_validation.go | 13 +- .../v1alpha1/service_validation_test.go | 1343 ++ .../serving/v1alpha1/zz_generated.deepcopy.go | 14 +- .../v1beta1/configuration_conversion.go | 2 +- .../v1beta1/configuration_conversion_test.go | 34 + .../serving/v1beta1/configuration_defaults.go | 37 + .../v1beta1/configuration_defaults_test.go | 180 + .../v1beta1/configuration_lifecycle.go | 24 + .../v1beta1/configuration_lifecycle_test.go | 55 + .../serving/v1beta1/configuration_types.go | 73 + .../v1beta1/configuration_validation.go | 83 + .../v1beta1/configuration_validation_test.go | 863 + .../serving/pkg/apis/serving/v1beta1/doc.go | 0 .../pkg/apis/serving/v1beta1/register.go | 2 +- .../pkg/apis/serving/v1beta1/register_test.go | 41 + .../serving/v1beta1/revision_conversion.go | 2 +- .../v1beta1/revision_conversion_test.go | 34 + .../apis/serving/v1beta1/revision_defaults.go | 26 + .../serving/v1beta1/revision_defaults_test.go | 298 + .../serving/v1beta1/revision_lifecycle.go | 9 - .../v1beta1/revision_lifecycle_test.go | 185 + .../apis/serving/v1beta1/revision_types.go | 73 + .../serving/v1beta1/revision_validation.go | 68 + .../v1beta1/revision_validation_test.go | 858 + .../apis/serving/v1beta1/route_conversion.go | 2 +- .../serving/v1beta1/route_conversion_test.go | 34 + .../apis/serving/v1beta1/route_defaults.go | 36 + .../serving/v1beta1/route_defaults_test.go | 262 + .../apis/serving/v1beta1/route_lifecycle.go | 24 + .../serving/v1beta1/route_lifecycle_test.go | 55 + .../pkg/apis/serving/v1beta1/route_types.go | 77 + .../apis/serving/v1beta1/route_validation.go | 60 + .../serving/v1beta1/route_validation_test.go | 814 + .../serving/v1beta1/service_conversion.go | 2 +- .../v1beta1/service_conversion_test.go | 34 + .../apis/serving/v1beta1/service_defaults.go | 35 + .../serving/v1beta1/service_defaults_test.go | 352 + .../apis/serving/v1beta1/service_lifecycle.go | 24 + .../serving/v1beta1/service_lifecycle_test.go | 55 + .../pkg/apis/serving/v1beta1/service_types.go | 82 + .../serving/v1beta1/service_validation.go | 65 + .../v1beta1/service_validation_test.go | 788 + .../serving/v1beta1/zz_generated.deepcopy.go | 269 + .../knative.dev/serving/pkg/autoscaler/OWNERS | 10 + .../serving/pkg/autoscaler/README.md | 1 + .../pkg/autoscaler/aggregation/aggregation.go | 54 + .../aggregation/aggregation_test.go | 108 + .../pkg/autoscaler/aggregation/bucketing.go | 214 + .../autoscaler/aggregation/bucketing_test.go | 474 + .../serving/pkg/autoscaler/autoscaler.go | 253 + .../serving/pkg/autoscaler/autoscaler_test.go | 505 + .../serving/pkg/autoscaler/collector.go | 361 + .../serving/pkg/autoscaler/collector_test.go | 448 + .../serving/pkg/autoscaler/config.go | 234 + .../serving/pkg/autoscaler/config_test.go | 277 + .../knative.dev/serving/pkg/autoscaler/doc.go | 39 + .../pkg/autoscaler/fake/fake_metric_client.go | 60 + .../pkg/autoscaler/http_scrape_client.go | 122 + .../pkg/autoscaler/http_scrape_client_test.go | 229 + .../pkg/autoscaler/metrics_provider.go | 99 + .../pkg/autoscaler/metrics_provider_test.go | 143 + .../serving/pkg/autoscaler/multiscaler.go | 341 + .../pkg/autoscaler/multiscaler_test.go | 516 + .../serving/pkg/autoscaler/sample_size.go | 52 + .../pkg/autoscaler/sample_size_test.go | 55 + .../serving/pkg/autoscaler/stats_reporter.go | 272 + .../pkg/autoscaler/stats_reporter_test.go | 143 + .../serving/pkg/autoscaler/stats_scraper.go | 226 + .../pkg/autoscaler/stats_scraper_test.go | 271 + .../serving/pkg/autoscaler/statserver/doc.go | 23 + .../pkg/autoscaler/statserver/server.go | 205 + .../pkg/autoscaler/statserver/server_test.go | 271 + .../testdata/config-autoscaler.yaml | 1 + .../pkg/autoscaler/zz_generated.deepcopy.go | 56 + .../clientset/versioned/clientset.go | 111 + .../certmanager}/clientset/versioned/doc.go | 2 +- .../versioned/fake/clientset_generated.go | 89 + .../clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 58 + .../clientset/versioned/scheme/doc.go | 2 +- .../clientset/versioned/scheme/register.go | 58 + .../typed/acme/v1alpha2/acme_client.go | 94 + .../typed/acme/v1alpha2/challenge.go | 191 + .../versioned/typed/acme/v1alpha2/doc.go | 20 + .../versioned/typed/acme/v1alpha2/fake/doc.go | 20 + .../acme/v1alpha2/fake/fake_acme_client.go | 44 + .../acme/v1alpha2/fake/fake_challenge.go | 140 + .../typed/acme/v1alpha2/fake/fake_order.go | 140 + .../acme/v1alpha2/generated_expansion.go | 23 + .../versioned/typed/acme/v1alpha2/order.go | 191 + .../typed/certmanager/v1alpha2/certificate.go | 191 + .../v1alpha2/certificaterequest.go | 191 + .../v1alpha2/certmanager_client.go | 104 + .../certmanager/v1alpha2/clusterissuer.go | 180 + .../typed/certmanager/v1alpha2/doc.go | 20 + .../typed/certmanager/v1alpha2/fake/doc.go | 20 + .../v1alpha2/fake/fake_certificate.go | 140 + .../v1alpha2/fake/fake_certificaterequest.go | 140 + .../v1alpha2/fake/fake_certmanager_client.go | 52 + .../v1alpha2/fake/fake_clusterissuer.go | 131 + .../certmanager/v1alpha2/fake/fake_issuer.go | 140 + .../v1alpha2/generated_expansion.go | 27 + .../typed/certmanager/v1alpha2/issuer.go | 191 + .../externalversions/acme/interface.go | 46 + .../acme/v1alpha2/challenge.go | 89 + .../acme/v1alpha2/interface.go | 52 + .../externalversions/acme/v1alpha2/order.go | 89 + .../externalversions/certmanager/interface.go | 46 + .../certmanager/v1alpha2/certificate.go | 89 + .../v1alpha2/certificaterequest.go | 89 + .../certmanager/v1alpha2/clusterissuer.go | 88 + .../certmanager/v1alpha2/interface.go | 66 + .../certmanager/v1alpha2/issuer.go | 89 + .../informers/externalversions/factory.go | 186 + .../informers/externalversions/generic.go | 75 + .../internalinterfaces/factory_interfaces.go | 40 + .../certmanager/injection/client/client.go | 49 + .../certmanager/injection/client/fake/fake.go | 54 + .../acme/v1alpha2/challenge/challenge.go | 52 + .../acme/v1alpha2/challenge/fake/fake.go | 40 + .../acme/v1alpha2/order/fake/fake.go | 40 + .../informers/acme/v1alpha2/order/order.go | 52 + .../v1alpha2/certificate/certificate.go | 52 + .../v1alpha2/certificate/fake/fake.go | 40 + .../certificaterequest/certificaterequest.go | 52 + .../v1alpha2/certificaterequest/fake/fake.go | 40 + .../v1alpha2/clusterissuer/clusterissuer.go | 52 + .../v1alpha2/clusterissuer/fake/fake.go | 40 + .../certmanager/v1alpha2/issuer/fake/fake.go | 40 + .../certmanager/v1alpha2/issuer/issuer.go | 52 + .../injection/informers/factory/factory.go | 56 + .../injection/informers/factory/fake/fake.go | 45 + .../listers/acme/v1alpha2/challenge.go | 94 + .../acme/v1alpha2/expansion_generated.go | 35 + .../listers/acme/v1alpha2/order.go | 94 + .../certmanager/v1alpha2/certificate.go | 94 + .../v1alpha2/certificaterequest.go | 94 + .../certmanager/v1alpha2/clusterissuer.go | 65 + .../v1alpha2/expansion_generated.go | 47 + .../listers/certmanager/v1alpha2/issuer.go | 94 + .../client/clientset/versioned/clientset.go | 51 +- .../pkg/client/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 110 + .../client/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 64 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 12 +- .../v1alpha1/autoscaling_client.go | 14 +- .../typed/autoscaling}/v1alpha1/doc.go | 2 +- .../typed/autoscaling/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_autoscaling_client.go | 44 + .../autoscaling/v1alpha1/fake/fake_metric.go | 140 + .../v1alpha1/fake/fake_podautoscaler.go | 140 + .../v1alpha1/generated_expansion.go | 4 +- .../typed/autoscaling/v1alpha1/metric.go | 191 + .../autoscaling/v1alpha1/podautoscaler.go | 23 +- .../typed/networking/v1alpha1/certificate.go | 23 +- .../typed/networking}/v1alpha1/doc.go | 2 +- .../typed/networking/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_certificate.go | 140 + .../networking/v1alpha1/fake/fake_ingress.go | 140 + .../v1alpha1/fake/fake_networking_client.go | 48 + .../v1alpha1/fake/fake_serverlessservice.go | 140 + .../v1alpha1/generated_expansion.go | 4 +- .../typed/networking/v1alpha1/ingress.go | 23 +- .../networking/v1alpha1/networking_client.go | 14 +- .../networking/v1alpha1/serverlessservice.go | 23 +- .../typed/serving/v1/configuration.go | 191 + .../versioned/typed/serving/v1/doc.go | 20 + .../versioned/typed/serving/v1/fake/doc.go | 20 + .../serving/v1/fake/fake_configuration.go | 140 + .../typed/serving/v1/fake/fake_revision.go | 140 + .../typed/serving/v1/fake/fake_route.go | 140 + .../typed/serving/v1/fake/fake_service.go | 140 + .../serving/v1/fake/fake_serving_client.go | 52 + .../typed/serving/v1/generated_expansion.go | 27 + .../versioned/typed/serving/v1/revision.go | 191 + .../versioned/typed/serving/v1/route.go | 191 + .../versioned/typed/serving/v1/service.go | 191 + .../typed/serving/v1/serving_client.go | 104 + .../typed/serving/v1alpha1/configuration.go | 23 +- .../versioned/typed/serving}/v1alpha1/doc.go | 2 +- .../typed/serving/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_configuration.go | 140 + .../serving/v1alpha1/fake/fake_revision.go | 140 + .../typed/serving/v1alpha1/fake/fake_route.go | 140 + .../serving/v1alpha1/fake/fake_service.go | 140 + .../v1alpha1/fake/fake_serving_client.go | 52 + .../serving/v1alpha1/generated_expansion.go | 2 +- .../typed/serving/v1alpha1/revision.go | 23 +- .../versioned/typed/serving/v1alpha1/route.go | 23 +- .../typed/serving/v1alpha1/service.go | 23 +- .../typed/serving/v1alpha1/serving_client.go | 9 +- .../typed/serving/v1beta1/configuration.go | 23 +- .../versioned/typed/serving/v1beta1/doc.go | 2 +- .../typed/serving/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_configuration.go | 140 + .../serving/v1beta1/fake/fake_revision.go | 140 + .../typed/serving/v1beta1/fake/fake_route.go | 140 + .../serving/v1beta1/fake/fake_service.go | 140 + .../v1beta1/fake/fake_serving_client.go | 52 + .../serving/v1beta1/generated_expansion.go | 2 +- .../typed/serving/v1beta1/revision.go | 23 +- .../versioned/typed/serving/v1beta1/route.go | 23 +- .../typed/serving/v1beta1/service.go | 23 +- .../typed/serving/v1beta1/serving_client.go | 9 +- .../externalversions/autoscaling/interface.go | 46 + .../autoscaling/v1alpha1/interface.go | 52 + .../autoscaling/v1alpha1/metric.go | 89 + .../autoscaling/v1alpha1/podautoscaler.go | 89 + .../informers/externalversions/factory.go | 192 + .../informers/externalversions/generic.go | 106 + .../internalinterfaces/factory_interfaces.go | 40 + .../externalversions/networking/interface.go | 46 + .../networking/v1alpha1/certificate.go | 89 + .../networking/v1alpha1/ingress.go | 89 + .../networking/v1alpha1/interface.go | 59 + .../networking/v1alpha1/serverlessservice.go | 89 + .../externalversions/serving/interface.go | 62 + .../serving/v1/configuration.go | 89 + .../externalversions/serving/v1/interface.go | 66 + .../externalversions/serving/v1/revision.go | 89 + .../externalversions/serving/v1/route.go | 89 + .../externalversions/serving/v1/service.go | 89 + .../serving/v1alpha1/configuration.go | 89 + .../serving/v1alpha1/interface.go | 66 + .../serving/v1alpha1/revision.go | 89 + .../serving/v1alpha1/route.go | 89 + .../serving/v1alpha1/service.go | 89 + .../serving/v1beta1/configuration.go | 89 + .../serving/v1beta1/interface.go | 66 + .../serving/v1beta1/revision.go | 89 + .../externalversions/serving/v1beta1/route.go | 89 + .../serving/v1beta1/service.go | 89 + .../pkg/client/injection/client/client.go | 49 + .../pkg/client/injection/client/fake/fake.go | 54 + .../v1alpha1/podscalable/fake/fake.go | 30 + .../v1alpha1/podscalable/podscalable.go | 60 + .../autoscaling/v1alpha1/metric/fake/fake.go | 40 + .../autoscaling/v1alpha1/metric/metric.go | 52 + .../v1alpha1/podautoscaler/fake/fake.go | 40 + .../v1alpha1/podautoscaler/podautoscaler.go | 52 + .../injection/informers/factory/factory.go | 56 + .../injection/informers/factory/fake/fake.go | 45 + .../v1alpha1/certificate/certificate.go | 52 + .../v1alpha1/certificate/fake/fake.go | 40 + .../networking/v1alpha1/ingress/fake/fake.go | 40 + .../networking/v1alpha1/ingress/ingress.go | 52 + .../v1alpha1/serverlessservice/fake/fake.go | 40 + .../serverlessservice/serverlessservice.go | 52 + .../serving/v1/configuration/configuration.go | 52 + .../serving/v1/configuration/fake/fake.go | 40 + .../serving/v1/revision/fake/fake.go | 40 + .../informers/serving/v1/revision/revision.go | 52 + .../informers/serving/v1/route/fake/fake.go | 40 + .../informers/serving/v1/route/route.go | 52 + .../informers/serving/v1/service/fake/fake.go | 40 + .../informers/serving/v1/service/service.go | 52 + .../v1alpha1/configuration/configuration.go | 52 + .../v1alpha1/configuration/fake/fake.go | 40 + .../serving/v1alpha1/revision/fake/fake.go | 40 + .../serving/v1alpha1/revision/revision.go | 52 + .../serving/v1alpha1/route/fake/fake.go | 40 + .../informers/serving/v1alpha1/route/route.go | 52 + .../serving/v1alpha1/service/fake/fake.go | 40 + .../serving/v1alpha1/service/service.go | 52 + .../v1beta1/configuration/configuration.go | 52 + .../v1beta1/configuration/fake/fake.go | 40 + .../serving/v1beta1/revision/fake/fake.go | 40 + .../serving/v1beta1/revision/revision.go | 52 + .../serving/v1beta1/route/fake/fake.go | 40 + .../informers/serving/v1beta1/route/route.go | 52 + .../serving/v1beta1/service/fake/fake.go | 40 + .../serving/v1beta1/service/service.go | 52 + .../istio/clientset/versioned/clientset.go | 97 + .../client/istio/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 82 + .../istio/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 56 + .../istio/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../networking/v1alpha3/destinationrule.go | 174 + .../typed/networking/v1alpha3/doc.go | 20 + .../typed/networking/v1alpha3/envoyfilter.go | 174 + .../typed/networking/v1alpha3/fake/doc.go | 20 + .../v1alpha3/fake/fake_destinationrule.go | 128 + .../v1alpha3/fake/fake_envoyfilter.go | 128 + .../networking/v1alpha3/fake/fake_gateway.go | 128 + .../v1alpha3/fake/fake_networking_client.go | 60 + .../v1alpha3/fake/fake_serviceentry.go | 128 + .../networking/v1alpha3/fake/fake_sidecar.go | 128 + .../v1alpha3/fake/fake_virtualservice.go | 128 + .../typed/networking/v1alpha3/gateway.go | 174 + .../v1alpha3/generated_expansion.go | 31 + .../networking/v1alpha3/networking_client.go | 114 + .../typed/networking/v1alpha3/serviceentry.go | 174 + .../typed/networking/v1alpha3/sidecar.go | 174 + .../networking/v1alpha3/virtualservice.go | 174 + .../informers/externalversions/factory.go | 180 + .../informers/externalversions/generic.go | 72 + .../internalinterfaces/factory_interfaces.go | 40 + .../externalversions/networking/interface.go | 46 + .../networking/v1alpha3/destinationrule.go | 89 + .../networking/v1alpha3/envoyfilter.go | 89 + .../networking/v1alpha3/gateway.go | 89 + .../networking/v1alpha3/interface.go | 80 + .../networking/v1alpha3/serviceentry.go | 89 + .../networking/v1alpha3/sidecar.go | 89 + .../networking/v1alpha3/virtualservice.go | 89 + .../client/istio/injection/client/client.go | 49 + .../istio/injection/client/fake/fake.go | 54 + .../injection/informers/factory/factory.go | 56 + .../injection/informers/factory/fake/fake.go | 45 + .../destinationrule/destinationrule.go | 52 + .../v1alpha3/destinationrule/fake/fake.go | 40 + .../v1alpha3/envoyfilter/envoyfilter.go | 52 + .../v1alpha3/envoyfilter/fake/fake.go | 40 + .../networking/v1alpha3/gateway/fake/fake.go | 40 + .../networking/v1alpha3/gateway/gateway.go | 52 + .../v1alpha3/serviceentry/fake/fake.go | 40 + .../v1alpha3/serviceentry/serviceentry.go | 52 + .../networking/v1alpha3/sidecar/fake/fake.go | 40 + .../networking/v1alpha3/sidecar/sidecar.go | 52 + .../v1alpha3/virtualservice/fake/fake.go | 40 + .../v1alpha3/virtualservice/virtualservice.go | 52 + .../networking/v1alpha3/destinationrule.go | 94 + .../networking/v1alpha3/envoyfilter.go | 94 + .../v1alpha3/expansion_generated.go | 67 + .../listers/networking/v1alpha3/gateway.go | 94 + .../networking/v1alpha3/serviceentry.go | 94 + .../listers/networking/v1alpha3/sidecar.go | 94 + .../networking/v1alpha3/virtualservice.go | 94 + .../v1alpha1/expansion_generated.go | 35 + .../listers/autoscaling/v1alpha1/metric.go | 94 + .../autoscaling/v1alpha1/podautoscaler.go | 94 + .../networking/v1alpha1/certificate.go | 94 + .../v1alpha1/expansion_generated.go | 43 + .../listers/networking/v1alpha1/ingress.go | 94 + .../networking/v1alpha1/serverlessservice.go | 94 + .../listers/serving/v1/configuration.go | 94 + .../listers/serving/v1/expansion_generated.go | 51 + .../pkg/client/listers/serving/v1/revision.go | 94 + .../pkg/client/listers/serving/v1/route.go | 94 + .../pkg/client/listers/serving/v1/service.go | 94 + .../listers/serving/v1alpha1/configuration.go | 94 + .../serving/v1alpha1/expansion_generated.go | 51 + .../listers/serving/v1alpha1/revision.go | 94 + .../client/listers/serving/v1alpha1/route.go | 94 + .../listers/serving/v1alpha1/service.go | 94 + .../listers/serving/v1beta1/configuration.go | 94 + .../serving/v1beta1/expansion_generated.go | 51 + .../listers/serving/v1beta1/revision.go | 94 + .../client/listers/serving/v1beta1/route.go | 94 + .../client/listers/serving/v1beta1/service.go | 94 + .../serving/pkg/deployment/config.go | 67 + .../serving/pkg/deployment/config_test.go | 109 + .../knative.dev/serving/pkg/deployment/doc.go | 19 + .../testdata/config-deployment.yaml | 1 + .../pkg/deployment/zz_generated.deepcopy.go | 48 + test/vendor/knative.dev/serving/pkg/gc/OWNERS | 10 + .../knative.dev/serving/pkg/gc/config.go | 95 + .../knative.dev/serving/pkg/gc/config_test.go | 126 + test/vendor/knative.dev/serving/pkg/gc/doc.go | 20 + .../serving/pkg/gc/testdata/config-gc.yaml | 1 + .../serving/pkg/gc/zz_generated.deepcopy.go | 37 + .../pkg/gc/zz_generated.deepcopy_test.go | 52 + .../knative.dev/serving/pkg/http/OWNERS | 10 + .../knative.dev/serving/pkg/http/header.go | 37 + .../serving/pkg/http/header_test.go | 61 + .../serving/pkg/http/request_log.go | 177 + .../serving/pkg/http/request_log_test.go | 289 + .../serving/pkg/http/response_recorder.go | 93 + .../pkg/http/response_recorder_test.go | 85 + .../knative.dev/serving/pkg/logging/OWNERS | 10 + .../serving/pkg/logging/config_test.go | 245 + .../serving/pkg/logging/sync_file_writer.go | 44 + .../pkg/logging/sync_file_writer_test.go | 43 + .../pkg/logging/testdata/config-logging.yaml | 1 + .../logging/testdata/test-config-logging.yaml | 1 + .../knative.dev/serving/pkg/metrics/OWNERS | 10 + .../knative.dev/serving/pkg/metrics/config.go | 92 + .../serving/pkg/metrics/config_test.go | 117 + .../knative.dev/serving/pkg/metrics/doc.go | 19 + .../knative.dev/serving/pkg/metrics/key.go | 41 + .../testdata/config-observability.yaml | 1 + .../pkg/metrics/zz_generated.deepcopy.go | 37 + .../knative.dev/serving/pkg/network/OWNERS | 10 + .../serving/pkg/network/bufferpool.go | 55 + .../serving/pkg/network/bufferpool_test.go | 67 + .../knative.dev/serving/pkg/network/doc.go | 20 + .../serving/pkg/network/ingress/doc.go | 19 + .../serving/pkg/network/ingress/ingress.go | 99 + .../pkg/network/ingress/ingress_test.go | 243 + .../serving/pkg/network/network.go | 406 + .../serving/pkg/network/network_test.go | 838 + .../serving/pkg/network/probe_handler.go | 52 + .../serving/pkg/network/probe_handler_test.go | 103 + .../serving/pkg/network/status/status.go | 424 + .../serving/pkg/network/status/status_test.go | 564 + .../pkg/network/testdata/config-network.yaml | 1 + .../pkg/network/zz_generated.deepcopy.go | 76 + .../knative.dev/serving/pkg/pool/OWNERS | 10 + .../knative.dev/serving/pkg/pool/doc.go | 21 + .../knative.dev/serving/pkg/pool/interface.go | 36 + .../knative.dev/serving/pkg/pool/pool.go | 117 + .../knative.dev/serving/pkg/pool/pool_test.go | 239 + .../knative.dev/serving/pkg/queue/OWNERS | 10 + .../knative.dev/serving/pkg/queue/breaker.go | 267 + .../serving/pkg/queue/breaker_test.go | 452 + .../serving/pkg/queue/constants.go} | 21 +- .../knative.dev/serving/pkg/queue/doc.go | 18 + .../serving/pkg/queue/forwarded_shim.go | 101 + .../serving/pkg/queue/forwarded_shim_test.go | 117 + .../serving/pkg/queue/health/health_state.go | 140 + .../pkg/queue/health/health_state_test.go | 217 + .../serving/pkg/queue/health/probe.go | 105 + .../serving/pkg/queue/health/probe_test.go | 191 + .../pkg/queue/prometheus_stats_reporter.go | 142 + .../queue/prometheus_stats_reporter_test.go | 202 + .../serving/pkg/queue/readiness/probe.go | 135 + .../pkg/queue/readiness/probe_encoding.go | 46 + .../queue/readiness/probe_encoding_test.go | 98 + .../serving/pkg/queue/readiness/probe_test.go | 583 + .../serving/pkg/queue/request_metric.go | 77 + .../serving/pkg/queue/request_metric_test.go | 154 + .../knative.dev/serving/pkg/queue/stats.go | 119 + .../serving/pkg/queue/stats/stats_reporter.go | 180 + .../pkg/queue/stats/stats_reporter_test.go | 193 + .../serving/pkg/queue/stats_test.go | 349 + .../knative.dev/serving/pkg/queue/timeout.go | 172 + .../serving/pkg/queue/timeout_test.go | 139 + .../knative.dev/serving/pkg/reconciler/OWNERS | 10 + .../pkg/reconciler/accessor/core/secret.go | 74 + .../reconciler/accessor/core/secret_test.go | 194 + .../serving/pkg/reconciler/accessor/errors.go | 50 + .../pkg/reconciler/accessor/errors_test.go | 95 + .../accessor/istio/virtualservice.go | 86 + .../accessor/istio/virtualservice_test.go | 170 + .../accessor/networking/certificate.go | 80 + .../accessor/networking/certificate_test.go | 169 + .../serving/pkg/reconciler/autoscaling/OWNERS | 10 + .../pkg/reconciler/autoscaling/config/doc.go | 21 + .../reconciler/autoscaling/config/store.go | 84 + .../autoscaling/config/store_test.go | 55 + .../config/testdata/config-autoscaler.yaml | 91 + .../reconciler/autoscaling/hpa/controller.go | 103 + .../pkg/reconciler/autoscaling/hpa/hpa.go | 165 + .../reconciler/autoscaling/hpa/hpa_test.go | 585 + .../autoscaling/hpa/resources/hpa.go | 91 + .../autoscaling/hpa/resources/hpa_test.go | 278 + .../reconciler/autoscaling/kpa/controller.go | 117 + .../pkg/reconciler/autoscaling/kpa/doc.go | 23 + .../pkg/reconciler/autoscaling/kpa/kpa.go | 300 + .../reconciler/autoscaling/kpa/kpa_test.go | 1538 ++ .../autoscaling/kpa/resources/decider.go | 77 + .../autoscaling/kpa/resources/decider_test.go | 276 + .../pkg/reconciler/autoscaling/kpa/scaler.go | 292 + .../reconciler/autoscaling/kpa/scaler_test.go | 670 + .../pkg/reconciler/autoscaling/reconciler.go | 161 + .../reconciler/autoscaling/resources/doc.go | 19 + .../autoscaling/resources/metric.go | 69 + .../autoscaling/resources/metric_test.go | 185 + .../autoscaling/resources/names/doc.go | 19 + .../autoscaling/resources/names/names.go | 23 + .../autoscaling/resources/names/names_test.go | 25 + .../reconciler/autoscaling/resources/sks.go | 47 + .../autoscaling/resources/sks_test.go | 94 + .../autoscaling/resources/target.go | 65 + .../autoscaling/resources/target_test.go | 179 + .../pkg/reconciler/certificate/certificate.go | 301 + .../certificate/certificate_test.go | 573 + .../certificate/config/cert_manager.go | 55 + .../certificate/config/cert_manager_test.go | 92 + .../pkg/reconciler/certificate/config/doc.go | 20 + .../reconciler/certificate/config/store.go | 83 + .../certificate/config/store_test.go | 51 + .../config/testdata/config-certmanager.yaml | 1 + .../config/zz_generated.deepcopy.go | 46 + .../pkg/reconciler/certificate/controller.go | 97 + .../resources/cert_manager_certificate.go | 54 + .../cert_manager_certificate_test.go | 142 + .../reconciler/configuration/configuration.go | 364 + .../configuration/configuration_test.go | 493 + .../reconciler/configuration/controller.go | 58 + .../reconciler/configuration/queueing_test.go | 130 + .../reconciler/configuration/resources/doc.go | 19 + .../configuration/resources/revision.go | 90 + .../configuration/resources/revision_test.go | 302 + .../knative.dev/serving/pkg/reconciler/doc.go | 30 + .../serving/pkg/reconciler/filter.go | 100 + .../serving/pkg/reconciler/filter_test.go | 308 + .../serving/pkg/reconciler/gc/config/doc.go | 20 + .../serving/pkg/reconciler/gc/config/store.go | 68 + .../pkg/reconciler/gc/config/store_test.go | 47 + .../gc/config/testdata/config-gc.yaml | 1 + .../gc/config/zz_generated.deepcopy.go | 21 + .../serving/pkg/reconciler/gc/controller.go | 82 + .../serving/pkg/reconciler/gc/gc.go | 151 + .../serving/pkg/reconciler/gc/gc_test.go | 359 + .../serving/pkg/reconciler/ingress/OWNERS | 10 + .../pkg/reconciler/ingress/config/doc.go | 21 + .../pkg/reconciler/ingress/config/istio.go | 149 + .../reconciler/ingress/config/istio_test.go | 235 + .../pkg/reconciler/ingress/config/store.go | 87 + .../reconciler/ingress/config/store_test.go | 69 + .../ingress/config/testdata/config-istio.yaml | 1 + .../config/testdata/config-network.yaml | 1 + .../ingress/config/zz_generated.deepcopy.go | 63 + .../pkg/reconciler/ingress/controller.go | 132 + .../serving/pkg/reconciler/ingress/doc.go | 23 + .../serving/pkg/reconciler/ingress/ingress.go | 490 + .../pkg/reconciler/ingress/ingress_test.go | 1435 ++ .../serving/pkg/reconciler/ingress/lister.go | 175 + .../pkg/reconciler/ingress/lister_test.go | 1294 ++ .../pkg/reconciler/ingress/resources/doc.go | 19 + .../reconciler/ingress/resources/gateway.go | 290 + .../ingress/resources/gateway_test.go | 736 + .../reconciler/ingress/resources/names/doc.go | 18 + .../ingress/resources/names/names.go | 35 + .../ingress/resources/names/names_test.go | 82 + .../reconciler/ingress/resources/secret.go | 105 + .../ingress/resources/secret_test.go | 182 + .../ingress/resources/virtual_service.go | 291 + .../ingress/resources/virtual_service_test.go | 754 + .../pkg/reconciler/labeler/controller.go | 58 + .../serving/pkg/reconciler/labeler/doc.go} | 17 +- .../serving/pkg/reconciler/labeler/labeler.go | 67 + .../pkg/reconciler/labeler/labeler_test.go | 288 + .../serving/pkg/reconciler/labeler/labels.go | 226 + .../pkg/reconciler/metric/controller.go | 56 + .../serving/pkg/reconciler/metric/metric.go | 127 + .../pkg/reconciler/metric/metric_test.go | 259 + .../pkg/reconciler/nscert/config/store.go | 90 + .../pkg/reconciler/nscert/controller.go | 82 + .../serving/pkg/reconciler/nscert/nscert.go | 207 + .../pkg/reconciler/nscert/nscert_test.go | 477 + .../nscert/resources/names/names.go | 27 + .../nscert/resources/names/names_test.go | 31 + .../nscert/resources/wildcard_certificate.go | 43 + .../resources/wildcard_certificate_test.go | 61 + .../serving/pkg/reconciler/reconciler.go | 164 + .../serving/pkg/reconciler/reconciler_test.go | 57 + .../serving/pkg/reconciler/retry.go | 32 + .../serving/pkg/reconciler/retry_test.go | 80 + .../pkg/reconciler/revision/config/doc.go | 21 + .../revision/config/observability.go | 17 + .../pkg/reconciler/revision/config/store.go | 93 + .../reconciler/revision/config/store_test.go | 133 + .../config/testdata/config-autoscaler.yaml | 1 + .../config/testdata/config-defaults.yaml | 1 + .../config/testdata/config-deployment.yaml | 1 + .../config/testdata/config-logging.yaml | 1 + .../config/testdata/config-network.yaml | 1 + .../config/testdata/config-observability.yaml | 1 + .../config/testdata/config-tracing.yaml | 52 + .../revision/config/zz_generated.deepcopy.go | 21 + .../pkg/reconciler/revision/controller.go | 124 + .../serving/pkg/reconciler/revision/cruds.go | 120 + .../pkg/reconciler/revision/queueing_test.go | 270 + .../revision/reconcile_resources.go | 184 + .../pkg/reconciler/revision/resolve.go | 136 + .../pkg/reconciler/revision/resolve_test.go | 514 + .../revision/resources/constants.go | 44 + .../reconciler/revision/resources/deploy.go | 253 + .../revision/resources/deploy_test.go | 972 ++ .../pkg/reconciler/revision/resources/doc.go | 19 + .../reconciler/revision/resources/env_var.go | 42 + .../revision/resources/imagecache.go | 56 + .../revision/resources/imagecache_test.go | 141 + .../pkg/reconciler/revision/resources/meta.go | 54 + .../revision/resources/meta_test.go | 104 + .../revision/resources/names/doc.go | 18 + .../revision/resources/names/names.go | 34 + .../revision/resources/names/names_test.go | 108 + .../pkg/reconciler/revision/resources/pa.go | 75 + .../reconciler/revision/resources/pa_test.go | 327 + .../reconciler/revision/resources/queue.go | 371 + .../revision/resources/queue_test.go | 1163 ++ .../revision/resources/resourceboundary.go | 43 + .../resources/resourceboundary_test.go | 57 + .../pkg/reconciler/revision/revision.go | 255 + .../pkg/reconciler/revision/revision_test.go | 788 + .../pkg/reconciler/revision/table_test.go | 769 + .../serving/pkg/reconciler/route/OWNERS | 10 + .../serving/pkg/reconciler/route/README.md | 76 + .../pkg/reconciler/route/config/doc.go | 20 + .../pkg/reconciler/route/config/domain.go | 122 + .../reconciler/route/config/domain_test.go | 195 + .../pkg/reconciler/route/config/store.go | 91 + .../pkg/reconciler/route/config/store_test.go | 94 + .../route/config/testdata/config-domain.yaml | 1 + .../route/config/testdata/config-gc.yaml | 1 + .../route/config/testdata/config-network.yaml | 1 + .../route/config/zz_generated.deepcopy.go | 75 + .../pkg/reconciler/route/controller.go | 131 + .../serving/pkg/reconciler/route/doc.go | 23 + .../route/doc/images/active_revisions.svg | 1 + .../route/doc/images/inactive_revision.svg | 1 + .../route/doc/images/inactive_revisions.svg | 1 + .../pkg/reconciler/route/domains/doc.go | 18 + .../pkg/reconciler/route/domains/domains.go | 133 + .../reconciler/route/domains/domains_test.go | 282 + .../pkg/reconciler/route/queueing_test.go | 131 + .../reconciler/route/reconcile_resources.go | 300 + .../route/reconcile_resources_test.go | 311 + .../reconciler/route/resources/certificate.go | 82 + .../route/resources/certificate_test.go | 128 + .../pkg/reconciler/route/resources/doc.go | 19 + .../pkg/reconciler/route/resources/filters.go | 50 + .../route/resources/filters_test.go | 117 + .../pkg/reconciler/route/resources/ingress.go | 242 + .../route/resources/ingress_test.go | 870 + .../reconciler/route/resources/labels/doc.go | 18 + .../route/resources/labels/labels.go | 52 + .../route/resources/labels/labels_test.go | 152 + .../reconciler/route/resources/names}/doc.go | 5 +- .../reconciler/route/resources/names/names.go | 44 + .../route/resources/names/names_test.go | 75 + .../pkg/reconciler/route/resources/service.go | 201 + .../route/resources/service_test.go | 481 + .../serving/pkg/reconciler/route/route.go | 612 + .../pkg/reconciler/route/route_test.go | 1070 ++ .../pkg/reconciler/route/table_test.go | 2760 ++++ .../pkg/reconciler/route/traffic/doc.go | 24 + .../pkg/reconciler/route/traffic/errors.go | 149 + .../reconciler/route/traffic/errors_test.go | 178 + .../pkg/reconciler/route/traffic/traffic.go | 349 + .../reconciler/route/traffic/traffic_test.go | 1137 ++ .../serverlessservice/controller.go | 92 + .../serverlessservice/global_resync_test.go | 157 + .../serverlessservice/resources/services.go | 162 + .../resources/services_test.go | 698 + .../serverlessservice/serverlessservice.go | 369 + .../serverlessservice_test.go | 830 + .../pkg/reconciler/service/controller.go | 72 + .../service/resources/configuration.go | 49 + .../service/resources/configuration_test.go | 142 + .../pkg/reconciler/service/resources/doc.go | 19 + .../reconciler/service/resources/names/doc.go | 18 + .../service/resources/names/names.go | 27 + .../service/resources/names/names_test.go | 64 + .../pkg/reconciler/service/resources/route.go | 59 + .../service/resources/route_test.go | 405 + .../service/resources/shared_test.go | 126 + .../serving/pkg/reconciler/service/service.go | 388 + .../pkg/reconciler/service/service_test.go | 1463 ++ .../serving/pkg/reconciler/stats_reporter.go | 131 + .../pkg/reconciler/stats_reporter_test.go | 91 + .../reconciler/testing/v1alpha1/factory.go | 206 + .../reconciler/testing/v1alpha1/listers.go | 215 + .../knative.dev/serving/pkg/resources/OWNERS | 10 + .../knative.dev/serving/pkg/resources/doc.go | 18 + .../serving/pkg/resources/endpoints.go | 71 + .../serving/pkg/resources/endpoints_test.go | 127 + .../knative.dev/serving/pkg/resources/meta.go | 54 + .../serving/pkg/resources/meta_test.go | 138 + .../serving/pkg/resources/resources.go | 18 + .../serving/pkg/resources/scale.go | 57 + .../serving/pkg/resources/scale_test.go | 154 + .../knative.dev/serving/pkg/testing/OWNERS | 16 + .../serving/pkg/testing/functional.go | 379 + .../serving/pkg/testing/v1/configuration.go | 41 + .../serving/pkg/testing/v1/route.go | 24 + .../serving/pkg/testing/v1/service.go | 203 + .../pkg/testing/v1alpha1/configuration.go | 122 + .../pkg/testing/v1alpha1/functional.go | 40 + .../serving/pkg/testing/v1alpha1/revision.go | 178 + .../serving/pkg/testing/v1alpha1/route.go | 252 + .../serving/pkg/testing/v1alpha1/service.go | 519 + .../pkg/testing/v1beta1/configuration.go | 41 + .../serving/pkg/testing/v1beta1/route.go | 24 + .../serving/pkg/testing/v1beta1/service.go | 204 + .../knative.dev/serving/sample/README.md | 4 + test/vendor/knative.dev/serving/test/OWNERS | 12 + .../vendor/knative.dev/serving/test/README.md | 239 + .../knative.dev/serving/test/adding_tests.md | 287 + .../knative.dev/serving/test/apicoverage.sh | 59 + .../serving/test/apicoverage/image/README.md | 7 + .../image/apicoverage-webhook.yaml | 71 + .../test/apicoverage/image/common/common.go | 38 + .../image/kodata/ignoredfields.yaml | 120 + .../serving/test/apicoverage/image/main.go | 25 + .../apicoverage/image/rules/coverage_rules.go | 54 + .../apicoverage/image/rules/display_rules.go | 45 + .../apicoverage/image/service-account.yaml | 32 + .../image/webhook/webhook_server.go | 63 + .../serving/test/apicoverage/tools/main.go | 108 + .../knative.dev/serving/test/cleanup.go | 48 + .../knative.dev/serving/test/clients.go | 250 + .../serving/test/config/100-namespace.yaml | 23 + .../serving/test/config/300-configmap.yaml | 21 + .../serving/test/config/300-secret.yaml | 22 + .../serving/test/config/config-logging.yaml | 103 + .../test/config/mtls/destinationrule.yaml | 35 + .../serving/test/config/mtls/policy.yaml | 33 + .../knative.dev/serving/test/conformance.go | 65 + .../serving/test/conformance/README.md | 34 + .../conformance/api/v1/blue_green_test.go | 164 + .../conformance/api/v1/configuration_test.go | 168 + .../conformance/api/v1/errorcondition_test.go | 231 + .../conformance/api/v1/generatename_test.go | 204 + .../test/conformance/api/v1/main_test.go | 33 + .../test/conformance/api/v1/migration_test.go | 117 + .../test/conformance/api/v1/resources_test.go | 117 + .../api/v1/revision_timeout_test.go | 241 + .../test/conformance/api/v1/route_test.go | 157 + .../api/v1/service_account_test.go | 57 + .../test/conformance/api/v1/service_test.go | 586 + .../api/v1/single_threaded_test.go | 107 + .../serving/test/conformance/api/v1/util.go | 339 + .../test/conformance/api/v1/volumes_test.go | 419 + .../api/v1alpha1/blue_green_test.go | 169 + .../api/v1alpha1/configuration_test.go | 168 + .../api/v1alpha1/errorcondition_test.go | 229 + .../api/v1alpha1/generatename_test.go | 206 + .../conformance/api/v1alpha1/main_test.go | 33 + .../api/v1alpha1/resources_test.go | 118 + .../api/v1alpha1/revision_timeout_test.go | 246 + .../conformance/api/v1alpha1/route_test.go | 154 + .../conformance/api/v1alpha1/service_test.go | 626 + .../api/v1alpha1/single_threaded_test.go | 108 + .../test/conformance/api/v1alpha1/util.go | 339 + .../conformance/api/v1alpha1/volumes_test.go | 429 + .../api/v1beta1/blue_green_test.go | 164 + .../api/v1beta1/configuration_test.go | 168 + .../api/v1beta1/errorcondition_test.go | 231 + .../api/v1beta1/generatename_test.go | 204 + .../test/conformance/api/v1beta1/main_test.go | 33 + .../conformance/api/v1beta1/migration_test.go | 117 + .../conformance/api/v1beta1/resources_test.go | 117 + .../api/v1beta1/revision_timeout_test.go | 242 + .../conformance/api/v1beta1/route_test.go | 157 + .../api/v1beta1/service_account_test.go | 57 + .../conformance/api/v1beta1/service_test.go | 587 + .../api/v1beta1/single_threaded_test.go | 107 + .../test/conformance/api/v1beta1/util.go | 339 + .../conformance/api/v1beta1/volumes_test.go | 419 + .../test/conformance/ingress/basic_test.go | 98 + .../test/conformance/ingress/grpc_test.go | 216 + .../test/conformance/ingress/headers_test.go | 177 + .../test/conformance/ingress/hosts_test.go | 70 + .../test/conformance/ingress/path_test.go | 236 + .../conformance/ingress/percentage_test.go | 119 + .../test/conformance/ingress/retry_test.go | 140 + .../test/conformance/ingress/timeout_test.go | 111 + .../test/conformance/ingress/tls_test.go | 76 + .../test/conformance/ingress/update_test.go | 200 + .../serving/test/conformance/ingress/util.go | 768 + .../conformance/ingress/websocket_test.go | 198 + .../test/conformance/runtime/cgroup_test.go | 138 + .../test/conformance/runtime/cmd_args_test.go | 54 + .../conformance/runtime/container_test.go | 199 + .../runtime/envpropagation_test.go | 117 + .../test/conformance/runtime/envvars_test.go | 78 + .../runtime/file_descriptor_test.go | 52 + .../conformance/runtime/filesystem_test.go | 105 + .../test/conformance/runtime/header_test.go | 191 + .../test/conformance/runtime/main_test.go | 33 + .../test/conformance/runtime/protocol_test.go | 78 + .../runtime/readiness_probe_test.go | 95 + .../test/conformance/runtime/sysctl_test.go | 57 + .../test/conformance/runtime/user_test.go | 102 + .../serving/test/conformance/runtime/util.go | 96 + .../conformance/runtime/workingdir_test.go | 49 + test/vendor/knative.dev/serving/test/crd.go | 58 + .../knative.dev/serving/test/e2e-common.sh | 522 + .../serving/test/e2e-smoke-tests.sh | 61 + .../knative.dev/serving/test/e2e-tests.sh | 77 + .../serving/test/e2e-upgrade-tests.sh | 108 + .../knative.dev/serving/test/e2e/README.md | 23 + .../serving/test/e2e/activator_test.go | 124 + .../serving/test/e2e/autoscale_test.go | 600 + .../serving/test/e2e/autotls/auto_tls_test.go | 248 + .../serving/test/e2e/destroypod_test.go | 315 + .../knative.dev/serving/test/e2e/e2e.go | 125 + .../serving/test/e2e/egress_traffic_test.go | 73 + .../knative.dev/serving/test/e2e/grpc_test.go | 239 + .../serving/test/e2e/helloworld_test.go | 205 + .../serving/test/e2e/image_pull_error_test.go | 107 + .../serving/test/e2e/istio/probing_test.go | 471 + .../test/e2e/minscale_readiness_test.go | 152 + .../serving/test/e2e/namespace_test.go | 133 + .../test/e2e/pod_schedule_error_test.go | 124 + .../serving/test/e2e/rollback_byo_test.go | 138 + .../serving/test/e2e/route_service_test.go | 199 + .../knative.dev/serving/test/e2e/scale.go | 217 + .../test/e2e/service_to_service_test.go | 338 + .../serving/test/e2e/subroutes_test.go | 399 + .../serving/test/e2e/websocket_test.go | 359 + .../knative.dev/serving/test/e2e_flags.go | 71 + .../serving/test/example-build.yaml | 8 + .../serving/test/performance/Benchmarks.md | 164 + .../serving/test/performance/README.md | 24 + .../benchmarks/dataplane-probe/cluster.yaml | 21 + .../continuous/dataplane-probe-setup.yaml | 214 + .../continuous/dataplane-probe.yaml | 466 + .../dataplane-probe/continuous/kodata/HEAD | 1 + .../continuous/kodata/dev.config | 1 + .../continuous/kodata/prod.config | 1 + .../dataplane-probe/continuous/kodata/refs | 1 + .../dataplane-probe/continuous/main.go | 138 + .../dataplane-probe/continuous/sla.go | 186 + .../benchmarks/dataplane-probe/dev.config | 116 + .../benchmarks/dataplane-probe/prod.config | 112 + .../benchmarks/deployment-probe/cluster.yaml | 21 + .../continuous/benchmark.yaml | 96 + .../deployment-probe/continuous/kodata/HEAD | 1 + .../continuous/kodata/basic-template.yaml | 32 + .../continuous/kodata/dev.config | 1 + .../continuous/kodata/prod.config | 1 + .../deployment-probe/continuous/kodata/refs | 1 + .../deployment-probe/continuous/main.go | 295 + .../deployment-probe/continuous/sla.go | 70 + .../benchmarks/deployment-probe/dev.config | 59 + .../benchmarks/deployment-probe/prod.config | 55 + .../benchmarks/load-test/cluster.yaml | 21 + .../load-test/continuous/kodata/HEAD | 1 + .../load-test/continuous/kodata/dev.config | 1 + .../load-test/continuous/kodata/prod.config | 1 + .../load-test/continuous/kodata/refs | 1 + .../load-test/continuous/load-test-setup.yaml | 55 + .../load-test/continuous/load-test.yaml | 178 + .../benchmarks/load-test/continuous/main.go | 172 + .../benchmarks/load-test/continuous/sla.go | 86 + .../benchmarks/load-test/dev.config | 59 + .../benchmarks/load-test/prod.config | 55 + .../benchmarks/scale-from-zero/cluster.yaml | 21 + .../scale-from-zero/continuous/kodata/HEAD | 1 + .../continuous/kodata/dev.config | 1 + .../continuous/kodata/prod.config | 1 + .../scale-from-zero/continuous/kodata/refs | 1 + .../scale-from-zero/continuous/main.go | 284 + .../continuous/scale-from-zero.yaml | 172 + .../scale-from-zero/continuous/sla.go | 19 + .../benchmarks/scale-from-zero/dev.config | 69 + .../benchmarks/scale-from-zero/prod.config | 65 + .../serving/test/performance/config/README.md | 65 + .../test/performance/config/config-mako.yaml | 47 + .../serving/test/performance/config/job.yaml | 52 + .../serving/test/performance/dev.md | 85 + .../serving/test/performance/latency_test.go | 127 + .../test/performance/metrics/request.go | 67 + .../test/performance/metrics/runtime.go | 144 + .../performance/observed_concurency_test.go | 226 + .../test/performance/performance-tests.sh | 104 + .../serving/test/performance/performance.go | 142 + .../serving/test/performance/profiling.md | 83 + .../serving/test/performance/scale_test.go | 164 + .../serving/test/presubmit-tests.sh | 32 + .../vendor/knative.dev/serving/test/prober.go | 293 + .../serving/test/scale/scale_test.go | 65 + .../vendor/knative.dev/serving/test/system.go | 27 + .../serving/test/test_images/README.md | 7 + .../test/test_images/autoscale/README.md | 31 + .../test/test_images/autoscale/autoscale.go | 222 + .../test/test_images/autoscale/service.yaml | 10 + .../test/test_images/failing/README.md | 17 + .../test/test_images/failing/failing.go | 34 + .../test/test_images/failing/service.yaml | 10 + .../serving/test/test_images/flaky/README.md | 16 + .../serving/test/test_images/flaky/main.go | 62 + .../test/test_images/flaky/service.yaml | 14 + .../test/test_images/grpc-ping/main.go | 90 + .../test_images/grpc-ping/proto/ping.pb.go | 211 + .../test_images/grpc-ping/proto/ping.proto | 17 + .../test/test_images/grpc-ping/service.yaml | 13 + .../test_images/hellovolume/hellovolume.go | 56 + .../test/test_images/hellovolume/service.yaml | 51 + .../test/test_images/helloworld/README.md | 23 + .../test/test_images/helloworld/helloworld.go | 38 + .../test_images/helloworld/helloworld.yaml | 40 + .../test/test_images/helloworld/service.yaml | 10 + .../test/test_images/httpproxy/README.md | 23 + .../test/test_images/httpproxy/httpproxy.go | 104 + .../test/test_images/httpproxy/service.yaml | 25 + .../observed-concurrency/README.md | 20 + .../observed_concurrency.go | 43 + .../observed-concurrency/service.yaml | 10 + .../test/test_images/pizzaplanetv1/README.md | 19 + .../test/test_images/pizzaplanetv1/main.go | 35 + .../test_images/pizzaplanetv1/service.yaml | 10 + .../test/test_images/pizzaplanetv2/README.md | 20 + .../test/test_images/pizzaplanetv2/main.go | 35 + .../test_images/pizzaplanetv2/service.yaml | 10 + .../test/test_images/runtime/handlers/args.go | 22 + .../test_images/runtime/handlers/cgroup.go | 69 + .../test/test_images/runtime/handlers/env.go | 28 + .../test/test_images/runtime/handlers/file.go | 47 + .../runtime/handlers/file_access_attempt.go | 106 + .../test_images/runtime/handlers/handler.go | 72 + .../test_images/runtime/handlers/mount.go | 55 + .../test/test_images/runtime/handlers/proc.go | 37 + .../test_images/runtime/handlers/request.go | 33 + .../test_images/runtime/handlers/runtime.go | 75 + .../test/test_images/runtime/handlers/user.go | 41 + .../serving/test/test_images/runtime/main.go | 50 + .../test/test_images/runtime/service.yaml | 12 + .../test/test_images/singlethreaded/README.md | 21 + .../test/test_images/singlethreaded/main.go | 51 + .../test_images/singlethreaded/service.yaml | 10 + .../test/test_images/timeout/README.md | 19 + .../test/test_images/timeout/service.yaml | 10 + .../test/test_images/timeout/timeout.go | 55 + .../test/test_images/wsserver/README.md | 11 + .../serving/test/test_images/wsserver/echo.go | 90 + .../test/test_images/wsserver/service.yaml | 23 + .../knative.dev/serving/test/types/runtime.go | 228 + .../serving/test/upgrade/README.md | 64 + .../serving/test/upgrade/probe_test.go | 71 + .../upgrade/service_postdowngrade_test.go | 72 + .../test/upgrade/service_postupgrade_test.go | 104 + .../test/upgrade/service_preupgrade_test.go | 95 + .../serving/test/upgrade/upgrade.go | 56 + .../serving/test/upload-test-images.sh | 38 + test/vendor/knative.dev/serving/test/util.go | 65 + .../serving/test/v1/configuration.go | 158 + .../vendor/knative.dev/serving/test/v1/crd.go | 36 + .../knative.dev/serving/test/v1/revision.go | 97 + .../knative.dev/serving/test/v1/route.go | 151 + .../knative.dev/serving/test/v1/service.go | 256 + .../serving/test/v1alpha1/configuration.go | 178 + .../knative.dev/serving/test/v1alpha1/crd.go | 36 + .../serving/test/v1alpha1/ingress.go | 58 + .../serving/test/v1alpha1/revision.go | 97 + .../serving/test/v1alpha1/route.go | 138 + .../serving/test/v1alpha1/service.go | 578 + .../serving/test/v1beta1/configuration.go | 159 + .../knative.dev/serving/test/v1beta1/crd.go | 36 + .../serving/test/v1beta1/revision.go | 97 + .../knative.dev/serving/test/v1beta1/route.go | 153 + .../serving/test/v1beta1/service.go | 255 + .../knative.dev/serving/third_party/OWNERS | 14 + .../serving/third_party/VENDOR-LICENSE | 13188 ++++++++++++++++ .../third_party/ambassador-latest/README.md | 6 + .../ambassador-latest/ambassador-rbac.yaml | 338 + .../ambassador-latest/ambassador-service.yaml | 19 + .../cert-manager-crds.yaml | 5425 +++++++ .../cert-manager-0.12.0/cert-manager.yaml | 6406 ++++++++ .../download-cert-manager.sh | 40 + .../cert-manager-0.9.1/cert-manager-crds.yaml | 1426 ++ .../cert-manager-0.9.1/cert-manager.yaml | 2405 +++ .../download-cert-manager.sh | 40 + .../third_party/config/monitoring/OWNERS | 10 + .../monitoring/logging/elasticsearch/LICENSE | 201 + .../logging/elasticsearch/elasticsearch.yaml | 121 + .../logging/elasticsearch/kibana.yaml | 57 + .../metrics/prometheus/kubernetes/LICENSE | 201 + .../kubernetes/kube-state-metrics.yaml | 172 + .../prometheus/prometheus-operator/LICENSE | 0 .../prometheus/prometheus-operator/NOTICE | 0 .../dashboards/deployment-dashboard.json | 738 + ...ubernetes-capacity-planning-dashboard.json | 981 ++ .../kubernetes-cluster-health-dashboard.json | 702 + .../kubernetes-cluster-status-dashboard.json | 817 + ...rnetes-control-plane-status-dashboard.json | 633 + ...ubernetes-resource-requests-dashboard.json | 410 + .../dashboards/nodes-dashboard.json | 829 + .../dashboards/pods-dashboard.json | 425 + .../dashboards/statefulset-dashboard.json | 716 + .../prometheus-operator/node-exporter.yaml | 125 + .../third_party/contour-latest/README.md | 5 + .../third_party/contour-latest/contour.yaml | 1293 ++ .../serving/third_party/gloo-latest/README.md | 7 + .../third_party/gloo-latest/download-gloo.sh | 22 + .../serving/third_party/gloo-latest/gloo.yaml | 769 + .../gloo-latest/value-overrides.yaml | 17 + .../serving/third_party/istio-1.3-latest | 1 + .../serving/third_party/istio-1.3.6/README.md | 17 + .../third_party/istio-1.3.6/download-istio.sh | 62 + .../istio-1.3.6/drain-seconds.yaml.patch | 5 + .../istio-1.3.6/istio-ci-mesh.yaml | 3380 ++++ .../istio-1.3.6/istio-ci-no-mesh.yaml | 1680 ++ .../third_party/istio-1.3.6/istio-crds.yaml | 846 + .../istio-1.3.6/istio-knative-extras.yaml | 262 + .../istio-1.3.6/istio-minimal.yaml | 920 ++ .../istio-1.3.6/namespace.yaml.patch | 10 + .../istio-1.3.6/values-extras.yaml | 86 + .../third_party/istio-1.3.6/values-lean.yaml | 96 + .../third_party/istio-1.3.6/values-local.yaml | 91 + .../third_party/istio-1.3.6/values.yaml | 85 + .../serving/third_party/istio-1.4-latest | 1 + .../serving/third_party/istio-1.4.2/README.md | 17 + .../third_party/istio-1.4.2/download-istio.sh | 76 + .../istio-1.4.2/drain-seconds.yaml.patch | 5 + .../istio-1.4.2/istio-ci-mesh.yaml | 3511 ++++ .../istio-1.4.2/istio-ci-no-mesh.yaml | 1736 ++ .../third_party/istio-1.4.2/istio-crds.yaml | 5255 ++++++ .../istio-1.4.2/istio-knative-extras.yaml | 268 + .../istio-1.4.2/istio-minimal.yaml | 960 ++ .../istio-1.4.2/namespace.yaml.patch | 10 + .../istio-1.4.2/values-extras.yaml | 86 + .../third_party/istio-1.4.2/values-lean.yaml | 96 + .../third_party/istio-1.4.2/values-local.yaml | 91 + .../third_party/istio-1.4.2/values.yaml | 85 + .../third_party/kourier-latest/README.md | 5 + .../kourier-latest/download-kourier.sh | 54 + .../third_party/kourier-latest/kourier.yaml | 256 + test/vendor/knative.dev/test-infra/LICENSE | 202 + .../test-infra/scripts/README.md | 92 +- .../test-infra/scripts/dummy.go | 0 .../test-infra/scripts/e2e-tests.sh | 63 +- .../test-infra/scripts/library.sh | 105 +- .../scripts/markdown-link-check-config.rc | 0 .../scripts/markdown-lint-config.rc | 0 .../test-infra/scripts/performance-tests.sh | 156 + .../test-infra/scripts/presubmit-tests.sh | 16 +- .../test-infra/scripts/release.sh | 56 +- .../test-infra/tools/dep-collector/README.md | 0 .../test-infra/tools/dep-collector/imports.go | 0 .../tools/dep-collector/licenses.go | 0 .../test-infra/tools/dep-collector/main.go | 0 2453 files changed, 455123 insertions(+), 3707 deletions(-) rename test/vendor/{github.com/knative/serving => cloud.google.com/go}/LICENSE (100%) create mode 100644 test/vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 test/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go create mode 100644 test/vendor/cloud.google.com/go/container/apiv1/doc.go create mode 100644 test/vendor/cloud.google.com/go/internal/version/version.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/doc.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go create mode 100644 test/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go create mode 100644 test/vendor/cloud.google.com/go/trace/apiv2/doc.go create mode 100644 test/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go create mode 100644 test/vendor/cloud.google.com/go/trace/apiv2/trace_client.go rename test/vendor/{github.com/knative/pkg => contrib.go.opencensus.io/exporter/ocagent}/LICENSE (100%) create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS rename test/vendor/{github.com/knative/test-infra => contrib.go.opencensus.io/exporter/stackdriver}/LICENSE (100%) create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go create mode 100644 test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 test/vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE create mode 100644 test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 test/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go create mode 100644 test/vendor/github.com/beorn7/perks/LICENSE create mode 100644 test/vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go create mode 100644 test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go create mode 100644 test/vendor/github.com/ghodss/yaml/LICENSE create mode 100644 test/vendor/github.com/ghodss/yaml/fields.go create mode 100644 test/vendor/github.com/ghodss/yaml/yaml.go create mode 100644 test/vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 test/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100755 test/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go create mode 100644 test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/any.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/any.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/api.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/doc.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/duration.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/duration.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/duration_gogo.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/empty.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/field_mask.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/protosize.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/source_context.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/struct.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/timestamp.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/timestamp.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/type.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/wrappers.pb.go create mode 100644 test/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go create mode 100644 test/vendor/github.com/golang/protobuf/descriptor/descriptor.go create mode 100644 test/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go create mode 100644 test/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 test/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 test/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 test/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 test/vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 test/vendor/github.com/google/uuid/LICENSE create mode 100644 test/vendor/github.com/google/uuid/dce.go create mode 100644 test/vendor/github.com/google/uuid/doc.go create mode 100644 test/vendor/github.com/google/uuid/hash.go create mode 100644 test/vendor/github.com/google/uuid/marshal.go create mode 100644 test/vendor/github.com/google/uuid/node.go create mode 100644 test/vendor/github.com/google/uuid/node_js.go create mode 100644 test/vendor/github.com/google/uuid/node_net.go create mode 100644 test/vendor/github.com/google/uuid/sql.go create mode 100644 test/vendor/github.com/google/uuid/time.go create mode 100644 test/vendor/github.com/google/uuid/util.go create mode 100644 test/vendor/github.com/google/uuid/uuid.go create mode 100644 test/vendor/github.com/google/uuid/version1.go create mode 100644 test/vendor/github.com/google/uuid/version4.go create mode 100644 test/vendor/github.com/googleapis/gax-go/LICENSE create mode 100644 test/vendor/github.com/googleapis/gax-go/v2/call_option.go create mode 100644 test/vendor/github.com/googleapis/gax-go/v2/gax.go create mode 100644 test/vendor/github.com/googleapis/gax-go/v2/header.go create mode 100644 test/vendor/github.com/googleapis/gax-go/v2/invoke.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go create mode 100644 test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 test/vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 test/vendor/github.com/jmespath/go-jmespath/util.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/condition_set.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/condition_types.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/contexts.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/deprecated.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/field_error.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/interfaces.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/kind2resource.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/metadata_validation.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/url.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/volatile_time.go delete mode 100644 test/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go delete mode 100644 test/vendor/github.com/knative/pkg/kmeta/names.go delete mode 100644 test/vendor/github.com/knative/pkg/kmp/diff.go delete mode 100644 test/vendor/github.com/knative/pkg/kmp/reporters.go delete mode 120000 test/vendor/github.com/knative/serving/config/300-imagecache.yaml delete mode 100644 test/vendor/github.com/knative/serving/pkg/apis/autoscaling/annotation_validation.go delete mode 100644 test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go delete mode 120000 test/vendor/github.com/knative/serving/pkg/apis/config/testdata/config-defaults.yaml delete mode 100644 test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_lifecycle.go delete mode 100644 test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_types.go delete mode 100644 test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_defaults.go delete mode 120000 test/vendor/github.com/knative/serving/pkg/autoscaler/testdata/config-autoscaler.yaml delete mode 100644 test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/clusteringress.go delete mode 120000 test/vendor/github.com/knative/serving/pkg/deployment/testdata/config-deployment.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/gc/testdata/config-gc.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/logging/testdata/config-logging.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/metrics/testdata/config-observability.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/network/testdata/config-network.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/configuration/config/testdata/config-gc.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-network.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-domain.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-gc.yaml delete mode 120000 test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-network.yaml delete mode 120000 test/vendor/github.com/knative/serving/test/config/100-istio-default-domain.yaml delete mode 120000 test/vendor/github.com/knative/serving/third_party/istio-1.0-latest delete mode 120000 test/vendor/github.com/knative/serving/third_party/istio-1.1-latest rename test/vendor/github.com/{knative/serving/third_party/config/monitoring/logging/elasticsearch => matttproud/golang_protobuf_extensions}/LICENSE (100%) create mode 100644 test/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 test/vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 test/vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/build_info.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 test/vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 test/vendor/github.com/prometheus/client_model/LICENSE create mode 100644 test/vendor/github.com/prometheus/client_model/NOTICE create mode 100644 test/vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 test/vendor/github.com/prometheus/common/LICENSE create mode 100644 test/vendor/github.com/prometheus/common/NOTICE create mode 100644 test/vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 test/vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 test/vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 test/vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 test/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go create mode 100644 test/vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 test/vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 test/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 test/vendor/github.com/prometheus/common/model/alert.go create mode 100644 test/vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 test/vendor/github.com/prometheus/common/model/fnv.go create mode 100644 test/vendor/github.com/prometheus/common/model/labels.go create mode 100644 test/vendor/github.com/prometheus/common/model/labelset.go create mode 100644 test/vendor/github.com/prometheus/common/model/metric.go create mode 100644 test/vendor/github.com/prometheus/common/model/model.go create mode 100644 test/vendor/github.com/prometheus/common/model/signature.go create mode 100644 test/vendor/github.com/prometheus/common/model/silence.go create mode 100644 test/vendor/github.com/prometheus/common/model/time.go create mode 100644 test/vendor/github.com/prometheus/common/model/value.go create mode 100644 test/vendor/github.com/prometheus/procfs/LICENSE create mode 100644 test/vendor/github.com/prometheus/procfs/NOTICE create mode 100644 test/vendor/github.com/prometheus/procfs/arp.go create mode 100644 test/vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 test/vendor/github.com/prometheus/procfs/cpuinfo.go create mode 100644 test/vendor/github.com/prometheus/procfs/crypto.go create mode 100644 test/vendor/github.com/prometheus/procfs/doc.go create mode 100644 test/vendor/github.com/prometheus/procfs/fs.go create mode 100644 test/vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 test/vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 test/vendor/github.com/prometheus/procfs/internal/util/readfile.go create mode 100644 test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go create mode 100644 test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go create mode 100644 test/vendor/github.com/prometheus/procfs/internal/util/valueparser.go create mode 100644 test/vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 test/vendor/github.com/prometheus/procfs/loadavg.go create mode 100644 test/vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 test/vendor/github.com/prometheus/procfs/meminfo.go create mode 100644 test/vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 test/vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 test/vendor/github.com/prometheus/procfs/net_conntrackstat.go create mode 100644 test/vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 test/vendor/github.com/prometheus/procfs/net_sockstat.go create mode 100644 test/vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 test/vendor/github.com/prometheus/procfs/net_udp.go create mode 100644 test/vendor/github.com/prometheus/procfs/net_unix.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_fdinfo.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 test/vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 test/vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 test/vendor/github.com/prometheus/procfs/stat.go create mode 100644 test/vendor/github.com/prometheus/procfs/swaps.go create mode 100644 test/vendor/github.com/prometheus/procfs/vm.go create mode 100644 test/vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 test/vendor/github.com/prometheus/procfs/zoneinfo.go create mode 100644 test/vendor/go.opencensus.io/metric/metricexport/doc.go create mode 100644 test/vendor/go.opencensus.io/metric/metricexport/export.go create mode 100644 test/vendor/go.opencensus.io/metric/metricexport/reader.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/client.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/doc.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/server.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go create mode 100644 test/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go create mode 100644 test/vendor/go.opencensus.io/resource/resourcekeys/const.go create mode 100644 test/vendor/golang.org/x/net/http2/h2c/h2c.go create mode 100644 test/vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 test/vendor/golang.org/x/net/trace/events.go create mode 100644 test/vendor/golang.org/x/net/trace/histogram.go create mode 100644 test/vendor/golang.org/x/net/trace/trace.go create mode 100644 test/vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 test/vendor/golang.org/x/oauth2/google/appengine_gen1.go create mode 100644 test/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go create mode 100644 test/vendor/golang.org/x/oauth2/google/default.go create mode 100644 test/vendor/golang.org/x/oauth2/google/doc.go create mode 100644 test/vendor/golang.org/x/oauth2/google/google.go create mode 100644 test/vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 test/vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 test/vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 test/vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 test/vendor/golang.org/x/sync/AUTHORS create mode 100644 test/vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 test/vendor/golang.org/x/sync/LICENSE create mode 100644 test/vendor/golang.org/x/sync/PATENTS create mode 100644 test/vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 test/vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 test/vendor/google.golang.org/api/AUTHORS create mode 100644 test/vendor/google.golang.org/api/CONTRIBUTORS create mode 100644 test/vendor/google.golang.org/api/LICENSE create mode 100644 test/vendor/google.golang.org/api/googleapi/transport/apikey.go create mode 100644 test/vendor/google.golang.org/api/internal/conn_pool.go create mode 100644 test/vendor/google.golang.org/api/internal/creds.go create mode 100644 test/vendor/google.golang.org/api/internal/pool.go create mode 100644 test/vendor/google.golang.org/api/internal/settings.go create mode 100644 test/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE create mode 100644 test/vendor/google.golang.org/api/iterator/iterator.go create mode 100644 test/vendor/google.golang.org/api/option/credentials_go19.go create mode 100644 test/vendor/google.golang.org/api/option/credentials_notgo19.go create mode 100644 test/vendor/google.golang.org/api/option/option.go create mode 100644 test/vendor/google.golang.org/api/support/bundler/bundler.go create mode 100644 test/vendor/google.golang.org/api/transport/dial.go create mode 100644 test/vendor/google.golang.org/api/transport/doc.go create mode 100644 test/vendor/google.golang.org/api/transport/go19.go create mode 100644 test/vendor/google.golang.org/api/transport/grpc/dial.go create mode 100644 test/vendor/google.golang.org/api/transport/grpc/dial_appengine.go create mode 100644 test/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go create mode 100644 test/vendor/google.golang.org/api/transport/grpc/pool.go create mode 100644 test/vendor/google.golang.org/api/transport/http/dial.go create mode 100644 test/vendor/google.golang.org/api/transport/http/dial_appengine.go create mode 100644 test/vendor/google.golang.org/api/transport/http/internal/propagation/http.go create mode 100644 test/vendor/google.golang.org/api/transport/not_go19.go create mode 100644 test/vendor/google.golang.org/appengine/appengine.go create mode 100644 test/vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 test/vendor/google.golang.org/appengine/errors.go create mode 100644 test/vendor/google.golang.org/appengine/identity.go create mode 100644 test/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 test/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 test/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go create mode 100644 test/vendor/google.golang.org/appengine/namespace.go create mode 100644 test/vendor/google.golang.org/appengine/socket/doc.go create mode 100644 test/vendor/google.golang.org/appengine/socket/socket_classic.go create mode 100644 test/vendor/google.golang.org/appengine/socket/socket_vm.go create mode 100644 test/vendor/google.golang.org/appengine/timeout.go create mode 100644 test/vendor/google.golang.org/genproto/LICENSE create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 test/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go create mode 100644 test/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go create mode 100644 test/vendor/google.golang.org/grpc/AUTHORS create mode 100644 test/vendor/google.golang.org/grpc/LICENSE create mode 100644 test/vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 test/vendor/google.golang.org/grpc/backoff.go create mode 100644 test/vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 test/vendor/google.golang.org/grpc/balancer.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go create mode 100644 test/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 test/vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 test/vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 test/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 test/vendor/google.golang.org/grpc/call.go create mode 100644 test/vendor/google.golang.org/grpc/clientconn.go create mode 100644 test/vendor/google.golang.org/grpc/codec.go create mode 100644 test/vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 test/vendor/google.golang.org/grpc/codes/codes.go create mode 100644 test/vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/alts.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/common.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/alts/utils.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/go12.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/google/google.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 test/vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 test/vendor/google.golang.org/grpc/dialoptions.go create mode 100644 test/vendor/google.golang.org/grpc/doc.go create mode 100644 test/vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 test/vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 test/vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 test/vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 test/vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 test/vendor/google.golang.org/grpc/interceptor.go create mode 100644 test/vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 test/vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 test/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 test/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 test/vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 test/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 test/vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 test/vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 test/vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 test/vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 test/vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 test/vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 test/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 test/vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 test/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 test/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 test/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 test/vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 test/vendor/google.golang.org/grpc/internal/internal.go create mode 100644 test/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 test/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go create mode 100644 test/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 test/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 test/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 test/vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 test/vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 test/vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 test/vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 test/vendor/google.golang.org/grpc/naming/naming.go create mode 100644 test/vendor/google.golang.org/grpc/peer/peer.go create mode 100644 test/vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 test/vendor/google.golang.org/grpc/pickfirst.go create mode 100644 test/vendor/google.golang.org/grpc/preloader.go create mode 100644 test/vendor/google.golang.org/grpc/proxy.go create mode 100644 test/vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 test/vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 test/vendor/google.golang.org/grpc/rpc_util.go create mode 100644 test/vendor/google.golang.org/grpc/server.go create mode 100644 test/vendor/google.golang.org/grpc/service_config.go create mode 100644 test/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 test/vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 test/vendor/google.golang.org/grpc/stats/stats.go create mode 100644 test/vendor/google.golang.org/grpc/status/status.go create mode 100644 test/vendor/google.golang.org/grpc/stream.go create mode 100644 test/vendor/google.golang.org/grpc/tap/tap.go create mode 100644 test/vendor/google.golang.org/grpc/trace.go create mode 100644 test/vendor/google.golang.org/grpc/version.go create mode 100644 test/vendor/istio.io/api/LICENSE create mode 100644 test/vendor/istio.io/api/common/config/license-lint.yml create mode 100644 test/vendor/istio.io/api/licenses/cloud.google.com/go/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/COPYING create mode 100644 test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING create mode 100644 test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING create mode 100644 test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING create mode 100644 test/vendor/istio.io/api/licenses/github.com/client9/misspell/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/gogo/protobuf/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/golang/glog/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/golang/mock/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/golang/protobuf/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/google/go-cmp/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/kisielk/errcheck/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/github.com/kisielk/gotool/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/crypto/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/lint/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/net/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/oauth2/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/sync/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/sys/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/text/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/tools/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/golang.org/x/tools/cmd/getgo/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/google.golang.org/appengine/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/google.golang.org/genproto/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/google.golang.org/grpc/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/honnef.co/go/tools/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/honnef.co/go/tools/gcsizes/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/honnef.co/go/tools/lint/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/honnef.co/go/tools/ssa/LICENSE create mode 100644 test/vendor/istio.io/api/licenses/istio.io/gogo-genproto/LICENSE create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/destination_rule.pb.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/destination_rule_deepcopy.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/destination_rule_json.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/envoy_filter.pb.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_deepcopy.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_json.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/gateway.pb.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/gateway_deepcopy.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/gateway_json.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/service_entry.pb.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/service_entry_deepcopy.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/service_entry_json.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/sidecar.pb.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/sidecar_deepcopy.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/sidecar_json.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/virtual_service.pb.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/virtual_service_deepcopy.gen.go create mode 100644 test/vendor/istio.io/api/networking/v1alpha3/virtual_service_json.gen.go create mode 100644 test/vendor/istio.io/client-go/LICENSE create mode 100644 test/vendor/istio.io/client-go/common/config/license-lint.yml create mode 100644 test/vendor/istio.io/client-go/licenses/cloud.google.com/go/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/COPYING create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/NYTimes/gziphandler/LICENSE.md create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/purell/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/urlesc/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/client9/misspell/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/davecgh/go-spew/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/docopt/docopt-go/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/emicklei/go-restful/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/evanphx/json-patch/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/ghodss/yaml/LICENSE rename test/vendor/{github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/kubernetes => istio.io/client-go/licenses/github.com/go-logr/logr}/LICENSE (100%) create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonpointer/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonreference/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/go-openapi/spec/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/go-openapi/swag/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/gogo/protobuf/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/golang/glog/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/golang/groupcache/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/golang/mock/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/golang/protobuf/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/google/btree/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/google/go-cmp/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/google/gofuzz/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/googleapis/gnostic/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/hashicorp/golang-lru/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/json-iterator/go/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/kisielk/errcheck/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/kisielk/gotool/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/kr/pretty/License create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/kr/pty/License create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/kr/text/License create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/mailru/easyjson/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/modern-go/concurrent/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/modern-go/reflect2/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/onsi/gomega/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/peterbourgon/diskv/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/pkg/errors/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/pmezard/go-difflib/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/spf13/pflag/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/stretchr/objx/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/github.com/stretchr/testify/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/crypto/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/exp/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/lint/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/net/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/oauth2/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/sync/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/sys/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/text/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/time/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/tools/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/golang.org/x/tools/cmd/getgo/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/google.golang.org/appengine/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/google.golang.org/genproto/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/google.golang.org/grpc/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/gopkg.in/check.v1/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/gopkg.in/inf.v0/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/gopkg.in/yaml.v2/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/gcsizes/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/lint/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/ssa/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/istio.io/api/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/istio.io/gogo-genproto/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/k8s.io/api/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/k8s.io/apimachinery/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/k8s.io/client-go/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/k8s.io/gengo/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/k8s.io/klog/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/k8s.io/kube-openapi/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/sigs.k8s.io/structured-merge-diff/LICENSE create mode 100644 test/vendor/istio.io/client-go/licenses/sigs.k8s.io/yaml/LICENSE create mode 100644 test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/doc.go create mode 100644 test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/register.gen.go create mode 100644 test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/types.gen.go create mode 100644 test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/zz_generated.deepcopy.gen.go create mode 100644 test/vendor/istio.io/gogo-genproto/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/common/config/license-lint.yml create mode 100644 test/vendor/istio.io/gogo-genproto/googleapis/google/api/annotations.pb.go create mode 100644 test/vendor/istio.io/gogo-genproto/googleapis/google/api/field_behavior.pb.go create mode 100644 test/vendor/istio.io/gogo-genproto/googleapis/google/api/http.pb.go create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/cloud.google.com/go/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/COPYING create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/client9/misspell/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/gogo/protobuf/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/glog/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/mock/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/protobuf/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/google/go-cmp/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/errcheck/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/gotool/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/crypto/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/lint/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/net/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/oauth2/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sync/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sys/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/text/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/cmd/getgo/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/appengine/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/genproto/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/grpc/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/gcsizes/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/lint/LICENSE create mode 100644 test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/ssa/LICENSE create mode 100644 test/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go create mode 100644 test/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go create mode 100644 test/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go create mode 100644 test/vendor/k8s.io/client-go/util/jsonpath/doc.go create mode 100644 test/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go create mode 100644 test/vendor/k8s.io/client-go/util/jsonpath/node.go create mode 100644 test/vendor/k8s.io/client-go/util/jsonpath/parser.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/doc.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/metrics.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/parallelizer.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/queue.go create mode 100644 test/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/cached.go (100%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/const.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/enqueue.go (100%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/interface.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/patch.go (75%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/proxy.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/register.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/typed.go (92%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/unstructured.go rename test/vendor/{github.com/knative/pkg/apis/duck/v1beta1 => knative.dev/pkg/apis/duck/v1}/addressable_types.go (79%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/destination.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/doc.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/podspec_types.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/register.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/source_types.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/status_types.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/addressable_types.go (72%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/condition_set.go (99%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/conditions_types.go (97%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/legacy_targetable_types.go (97%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/register.go (98%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/retired_targetable_types.go (97%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go (83%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/register.go (97%) create mode 100644 test/vendor/knative.dev/pkg/apis/duck/v1beta1/source_types.go rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/status_types.go (98%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go (58%) rename test/vendor/{github.com/knative => knative.dev}/pkg/apis/duck/verify.go (98%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/filter.go (58%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/informed_watcher.go (70%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/load.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/manual_watcher.go (82%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/static_watcher.go (83%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/store.go (88%) rename test/vendor/{github.com/knative => knative.dev}/pkg/configmap/watcher.go (86%) create mode 100644 test/vendor/knative.dev/pkg/controller/controller.go create mode 100644 test/vendor/knative.dev/pkg/controller/helper.go create mode 100644 test/vendor/knative.dev/pkg/controller/stats_reporter.go rename test/vendor/{github.com/knative => knative.dev}/pkg/kmeta/accessor.go (59%) rename test/vendor/{github.com/knative => knative.dev}/pkg/kmeta/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/kmeta/labels.go (100%) create mode 100644 test/vendor/knative.dev/pkg/kmeta/names.go rename test/vendor/{github.com/knative => knative.dev}/pkg/kmeta/owner_references.go (100%) create mode 100644 test/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go create mode 100644 test/vendor/knative.dev/pkg/metrics/client.go create mode 100644 test/vendor/knative.dev/pkg/metrics/config.go create mode 100644 test/vendor/knative.dev/pkg/metrics/config_observability.go create mode 100644 test/vendor/knative.dev/pkg/metrics/doc.go create mode 100644 test/vendor/knative.dev/pkg/metrics/exporter.go create mode 100644 test/vendor/knative.dev/pkg/metrics/gcp_metadata.go create mode 100644 test/vendor/knative.dev/pkg/metrics/memstats.go create mode 100644 test/vendor/knative.dev/pkg/metrics/metrics.go create mode 100644 test/vendor/knative.dev/pkg/metrics/metricskey/constants.go create mode 100644 test/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go create mode 100644 test/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go create mode 100644 test/vendor/knative.dev/pkg/metrics/monitored_resources.go create mode 100644 test/vendor/knative.dev/pkg/metrics/monitored_resources_eventing.go create mode 100644 test/vendor/knative.dev/pkg/metrics/monitored_resources_serving.go create mode 100644 test/vendor/knative.dev/pkg/metrics/opencensus_exporter.go create mode 100644 test/vendor/knative.dev/pkg/metrics/prometheus_exporter.go create mode 100644 test/vendor/knative.dev/pkg/metrics/record.go create mode 100644 test/vendor/knative.dev/pkg/metrics/reflector.go create mode 100644 test/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go create mode 100644 test/vendor/knative.dev/pkg/metrics/utils.go create mode 100644 test/vendor/knative.dev/pkg/metrics/workqueue.go create mode 100644 test/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/pkg/network/doc.go create mode 100644 test/vendor/knative.dev/pkg/network/domain.go create mode 100644 test/vendor/knative.dev/pkg/network/error_handler.go create mode 100644 test/vendor/knative.dev/pkg/network/h2c.go create mode 100644 test/vendor/knative.dev/pkg/network/network.go create mode 100644 test/vendor/knative.dev/pkg/network/transports.go create mode 100644 test/vendor/knative.dev/pkg/profiling/server.go rename test/vendor/{github.com/knative => knative.dev}/pkg/ptr/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/pkg/ptr/ptr.go (76%) create mode 100644 test/vendor/knative.dev/pkg/signals/signal.go rename test/vendor/{github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_defaults.go => knative.dev/pkg/signals/signal_posix.go} (78%) create mode 100644 test/vendor/knative.dev/pkg/signals/signal_windows.go create mode 100644 test/vendor/knative.dev/pkg/system/clock.go create mode 100644 test/vendor/knative.dev/pkg/system/env.go create mode 100644 test/vendor/knative.dev/pkg/test/helpers/dryrun.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/metadata_validation.go => knative.dev/pkg/test/helpers/error.go} (57%) create mode 100644 test/vendor/knative.dev/pkg/test/helpers/name.go create mode 100644 test/vendor/knative.dev/pkg/test/tinterface.go create mode 100644 test/vendor/knative.dev/pkg/tracker/doc.go create mode 100644 test/vendor/knative.dev/pkg/tracker/enqueue.go create mode 100644 test/vendor/knative.dev/pkg/tracker/interface.go create mode 100644 test/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/.gitattributes create mode 100644 test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/ask-question.md create mode 100644 test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/bug-report.md create mode 100644 test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/feature-request.md create mode 100644 test/vendor/knative.dev/serving/.github/issue-template.md create mode 100644 test/vendor/knative.dev/serving/.github/pull-request-template.md create mode 100644 test/vendor/knative.dev/serving/.gitignore create mode 100644 test/vendor/knative.dev/serving/.ko.yaml rename test/vendor/{github.com/knative => knative.dev}/serving/AUTHORS (96%) create mode 100644 test/vendor/knative.dev/serving/CONTRIBUTING.md create mode 100644 test/vendor/knative.dev/serving/DEVELOPMENT.md create mode 100644 test/vendor/knative.dev/serving/Gopkg.lock create mode 100644 test/vendor/knative.dev/serving/Gopkg.toml create mode 100644 test/vendor/knative.dev/serving/LICENSE create mode 100644 test/vendor/knative.dev/serving/Makefile create mode 100644 test/vendor/knative.dev/serving/OWNERS create mode 100644 test/vendor/knative.dev/serving/OWNERS_ALIASES create mode 100644 test/vendor/knative.dev/serving/README.md create mode 100644 test/vendor/knative.dev/serving/ci create mode 100644 test/vendor/knative.dev/serving/cmd/activator/OWNERS rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/activator/kodata/HEAD (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/activator/kodata/LICENSE (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/activator/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/activator/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/activator/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/activator/request_log.go create mode 100644 test/vendor/knative.dev/serving/cmd/activator/request_log_test.go create mode 100644 test/vendor/knative.dev/serving/cmd/autoscaler-hpa/OWNERS rename test/vendor/{github.com/knative/serving/cmd/autoscaler => knative.dev/serving/cmd/autoscaler-hpa}/kodata/HEAD (100%) rename test/vendor/{github.com/knative/serving/cmd/autoscaler => knative.dev/serving/cmd/autoscaler-hpa}/kodata/LICENSE (100%) rename test/vendor/{github.com/knative/serving/cmd/autoscaler => knative.dev/serving/cmd/autoscaler-hpa}/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/autoscaler-hpa/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/autoscaler/OWNERS rename test/vendor/{github.com/knative/serving/cmd/controller => knative.dev/serving/cmd/autoscaler}/kodata/HEAD (100%) rename test/vendor/{github.com/knative/serving/cmd/controller => knative.dev/serving/cmd/autoscaler}/kodata/LICENSE (100%) rename test/vendor/{github.com/knative/serving/cmd/controller => knative.dev/serving/cmd/autoscaler}/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/autoscaler/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/autoscaler/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/autoscaler/main_test.go create mode 100644 test/vendor/knative.dev/serving/cmd/controller/OWNERS rename test/vendor/{github.com/knative/serving/cmd/queue => knative.dev/serving/cmd/controller}/kodata/HEAD (100%) rename test/vendor/{github.com/knative/serving/cmd/queue => knative.dev/serving/cmd/controller}/kodata/LICENSE (100%) rename test/vendor/{github.com/knative/serving/cmd/queue => knative.dev/serving/cmd/controller}/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/controller/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/controller/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/default-domain/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/networking/OWNERS rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/networking/certmanager/kodata/HEAD (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/networking/certmanager/kodata/LICENSE (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/networking/certmanager/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/networking/certmanager/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/networking/doc.go rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/networking/istio/kodata/HEAD (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/networking/istio/kodata/LICENSE (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/cmd/networking/istio/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/networking/istio/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/networking/istio/main.go create mode 120000 test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/HEAD create mode 120000 test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/LICENSE create mode 120000 test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/VENDOR-LICENSE create mode 100644 test/vendor/knative.dev/serving/cmd/networking/nscert/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/queue/OWNERS rename test/vendor/{github.com/knative/serving/cmd/webhook => knative.dev/serving/cmd/queue}/kodata/HEAD (100%) rename test/vendor/{github.com/knative/serving/cmd/webhook => knative.dev/serving/cmd/queue}/kodata/LICENSE (100%) rename test/vendor/{github.com/knative/serving/cmd/webhook => knative.dev/serving/cmd/queue}/kodata/VENDOR-LICENSE (100%) create mode 120000 test/vendor/knative.dev/serving/cmd/queue/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/queue/main.go create mode 100644 test/vendor/knative.dev/serving/cmd/queue/main_test.go create mode 100644 test/vendor/knative.dev/serving/cmd/webhook/OWNERS create mode 120000 test/vendor/knative.dev/serving/cmd/webhook/kodata/HEAD create mode 120000 test/vendor/knative.dev/serving/cmd/webhook/kodata/LICENSE create mode 120000 test/vendor/knative.dev/serving/cmd/webhook/kodata/VENDOR-LICENSE create mode 120000 test/vendor/knative.dev/serving/cmd/webhook/kodata/refs create mode 100644 test/vendor/knative.dev/serving/cmd/webhook/main.go create mode 100644 test/vendor/knative.dev/serving/code-of-conduct.md create mode 100644 test/vendor/knative.dev/serving/community/README.md create mode 120000 test/vendor/knative.dev/serving/config/100-namespace.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-addressable-resolvers-clusterrole.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-clusterrole-certmanager.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-clusterrole-istio.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-clusterrole-metrics.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-clusterrole-namespaced.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-clusterrole.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-podspecable-bindings-clusterrole.yaml create mode 120000 test/vendor/knative.dev/serving/config/200-serviceaccount.yaml create mode 120000 test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics-server.yaml create mode 120000 test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics.yaml create mode 120000 test/vendor/knative.dev/serving/config/201-clusterrolebinding.yaml create mode 120000 test/vendor/knative.dev/serving/config/201-rolebinding-metrics-server.yaml create mode 120000 test/vendor/knative.dev/serving/config/202-gateway.yaml create mode 120000 test/vendor/knative.dev/serving/config/203-local-gateway.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-certificate.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-configuration.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-imagecache.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-ingress.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-metric.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-pa.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-revision.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-route.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-service.yaml create mode 120000 test/vendor/knative.dev/serving/config/300-sks.yaml create mode 120000 test/vendor/knative.dev/serving/config/500-webhook-configmap-validation.yaml create mode 120000 test/vendor/knative.dev/serving/config/500-webhook-defaulting.yaml create mode 120000 test/vendor/knative.dev/serving/config/500-webhook-resource-validation.yaml create mode 120000 test/vendor/knative.dev/serving/config/500-webhook-secret.yaml create mode 120000 test/vendor/knative.dev/serving/config/999-cache.yaml create mode 100644 test/vendor/knative.dev/serving/config/OWNERS create mode 100644 test/vendor/knative.dev/serving/config/README.md create mode 120000 test/vendor/knative.dev/serving/config/activator-hpa.yaml create mode 120000 test/vendor/knative.dev/serving/config/activator.yaml create mode 120000 test/vendor/knative.dev/serving/config/autoscaler-hpa.yaml create mode 120000 test/vendor/knative.dev/serving/config/autoscaler.yaml create mode 100644 test/vendor/knative.dev/serving/config/cert-manager/200-clusterrole.yaml create mode 100644 test/vendor/knative.dev/serving/config/cert-manager/config.yaml create mode 100644 test/vendor/knative.dev/serving/config/cert-manager/controller.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-autoscaler.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-certmanager.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-defaults.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-deployment.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-domain.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-gc.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-istio.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-logging.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-network.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-observability.yaml create mode 120000 test/vendor/knative.dev/serving/config/config-tracing.yaml create mode 120000 test/vendor/knative.dev/serving/config/controller.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/100-namespace.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/999-cache.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/autoscaler.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/defaults.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/deployment.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/domain.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/gc.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/logging.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/network.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/observability.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/configmaps/tracing.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/deployments/activator-hpa.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/deployments/activator.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/deployments/autoscaler.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/deployments/controller.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/deployments/webhook.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/rbac/200-addressable-resolvers-clusterrole.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole-namespaced.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/rbac/200-podspecable-bindings-clusterrole.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/rbac/200-serviceaccount.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/rbac/201-clusterrolebinding.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/certificate.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/configuration.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/ingress.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/metric.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/podautoscaler.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/revision.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/route.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/serverlessservice.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/resources/service.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/webhooks/configmap-validation.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/webhooks/defaulting.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/webhooks/resource-validation.yaml create mode 100644 test/vendor/knative.dev/serving/config/core/webhooks/secret.yaml create mode 120000 test/vendor/knative.dev/serving/config/custom-metrics-apiservice.yaml create mode 100644 test/vendor/knative.dev/serving/config/hpa-autoscaling/200-clusterrole-metrics.yaml create mode 100644 test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics-server.yaml create mode 100644 test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics.yaml create mode 100644 test/vendor/knative.dev/serving/config/hpa-autoscaling/201-rolebinding-metrics-server.yaml create mode 100644 test/vendor/knative.dev/serving/config/hpa-autoscaling/controller.yaml create mode 100644 test/vendor/knative.dev/serving/config/hpa-autoscaling/custom-metrics-apiservice.yaml create mode 100644 test/vendor/knative.dev/serving/config/istio-ingress/200-clusterrole.yaml create mode 100644 test/vendor/knative.dev/serving/config/istio-ingress/202-gateway.yaml create mode 100644 test/vendor/knative.dev/serving/config/istio-ingress/203-local-gateway.yaml create mode 100644 test/vendor/knative.dev/serving/config/istio-ingress/config.yaml create mode 100644 test/vendor/knative.dev/serving/config/istio-ingress/controller.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/100-namespace.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/OWNERS create mode 100644 test/vendor/knative.dev/serving/config/monitoring/README.md create mode 100644 test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/100-fluentd-configmap.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/200-fluentd.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/100-fluentd-configmap.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/200-fluentd.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-custom-config.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-efficiency.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-reconciler.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-scaling.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-prometheus-scrape-config.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/200-kube-controller-metrics.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/300-prometheus.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/metrics/stackdriver/100-stackdriver-serviceentry.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/105-zipkin-service.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/elasticsearch/100-jaeger.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/memory/100-jaeger.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin-in-mem/100-zipkin.yaml create mode 100644 test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin/100-zipkin.yaml create mode 100644 test/vendor/knative.dev/serving/config/namespace-wildcard-certs/controller.yaml create mode 120000 test/vendor/knative.dev/serving/config/networking-certmanager.yaml create mode 120000 test/vendor/knative.dev/serving/config/networking-istio.yaml create mode 120000 test/vendor/knative.dev/serving/config/networking-ns-cert.yaml create mode 100644 test/vendor/knative.dev/serving/config/post-install/default-domain.yaml create mode 120000 test/vendor/knative.dev/serving/config/webhook.yaml create mode 100644 test/vendor/knative.dev/serving/container.yaml create mode 100644 test/vendor/knative.dev/serving/content_sets.yml create mode 100644 test/vendor/knative.dev/serving/docs/client-conventions.md create mode 100644 test/vendor/knative.dev/serving/docs/product/personas.md create mode 100644 test/vendor/knative.dev/serving/docs/resources-overview.md create mode 100644 test/vendor/knative.dev/serving/docs/roadmap/scaling-2019.md create mode 100644 test/vendor/knative.dev/serving/docs/runtime-contract.md create mode 100644 test/vendor/knative.dev/serving/docs/scaling/DEVELOPMENT.md create mode 100644 test/vendor/knative.dev/serving/docs/scaling/OWNERS create mode 100644 test/vendor/knative.dev/serving/docs/scaling/README.md create mode 100644 test/vendor/knative.dev/serving/docs/spec/README.md create mode 100644 test/vendor/knative.dev/serving/docs/spec/errors.md create mode 100644 test/vendor/knative.dev/serving/docs/spec/images/object_model.png create mode 100644 test/vendor/knative.dev/serving/docs/spec/motivation.md create mode 100644 test/vendor/knative.dev/serving/docs/spec/normative_examples.md create mode 100644 test/vendor/knative.dev/serving/docs/spec/overview.md create mode 100644 test/vendor/knative.dev/serving/docs/spec/spec.md create mode 100644 test/vendor/knative.dev/serving/hack/OWNERS create mode 100644 test/vendor/knative.dev/serving/hack/README.md create mode 100755 test/vendor/knative.dev/serving/hack/boilerplate/add-boilerplate.sh create mode 100644 test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.go.txt create mode 100755 test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.sh.txt create mode 100755 test/vendor/knative.dev/serving/hack/generate-yamls.sh create mode 100755 test/vendor/knative.dev/serving/hack/release.sh create mode 100755 test/vendor/knative.dev/serving/hack/update-codegen.sh create mode 100755 test/vendor/knative.dev/serving/hack/update-deps.sh create mode 100755 test/vendor/knative.dev/serving/hack/verify-codegen.sh create mode 100644 test/vendor/knative.dev/serving/install/CONFIG.md create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/Dockerfile.in create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/build-image/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/build-image/kubernetes.repo create mode 100755 test/vendor/knative.dev/serving/openshift/ci-operator/generate-ci-config.sh create mode 100755 test/vendor/knative.dev/serving/openshift/ci-operator/generate-dockerfiles.sh create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/activator/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler-hpa/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/certmanager/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/controller/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/istio/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/nscert/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/queue/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/webhook/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/autoscale/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/failing/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/flaky/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/grpc-ping/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/hellovolume/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/helloworld/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/httpproxy/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/observed-concurrency/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv1/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv2/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/runtime/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/singlethreaded/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/timeout/Dockerfile create mode 100644 test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/wsserver/Dockerfile create mode 100755 test/vendor/knative.dev/serving/openshift/e2e-tests-openshift.sh create mode 100644 test/vendor/knative.dev/serving/openshift/olm/README.md create mode 100644 test/vendor/knative.dev/serving/openshift/olm/knative-serving.catalogsource.yaml create mode 100644 test/vendor/knative.dev/serving/openshift/patches/003-routeretry.patch create mode 100644 test/vendor/knative.dev/serving/openshift/patches/004-grpc.patch create mode 100644 test/vendor/knative.dev/serving/openshift/patches/005-disablehpa.patch create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/.gitkeep create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.activator create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler-hpa create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.controller create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-certmanager create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-istio create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-nscert create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.queue create mode 100644 test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.webhook create mode 100644 test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/Dockerfile.in create mode 100755 test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/gen_dockerfiles.sh create mode 100644 test/vendor/knative.dev/serving/openshift/release/README.md create mode 100755 test/vendor/knative.dev/serving/openshift/release/create-release-branch.sh create mode 100755 test/vendor/knative.dev/serving/openshift/release/generate-release.sh create mode 100644 test/vendor/knative.dev/serving/openshift/release/knative-serving-ci.yaml create mode 100644 test/vendor/knative.dev/serving/openshift/release/knative-serving-v0.12.1.yaml create mode 100755 test/vendor/knative.dev/serving/openshift/release/resolve.sh create mode 100755 test/vendor/knative.dev/serving/openshift/release/update-to-head.sh create mode 100644 test/vendor/knative.dev/serving/pkg/activator/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/activator/README.md create mode 100644 test/vendor/knative.dev/serving/pkg/activator/activator.go rename test/vendor/{github.com/knative/pkg/apis => knative.dev/serving/pkg/activator/config}/doc.go (91%) create mode 100644 test/vendor/knative.dev/serving/pkg/activator/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/config/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/context_handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/context_handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/handler/testdata/config-tracing.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/activator/images/activator_activeRevision.png create mode 100644 test/vendor/knative.dev/serving/pkg/activator/images/activator_reserveRevision.png create mode 100644 test/vendor/knative.dev/serving/pkg/activator/images/routeTraffic.png create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/helpers.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/helpers_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/revision_backends.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/revision_backends_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/throttler.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/net/throttler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/stats_reporter.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/stats_reporter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/activator/testing/roundtripper.go rename test/vendor/{github.com/knative/pkg/apis/duck/unstructured.go => knative.dev/serving/pkg/activator/util/header.go} (53%) create mode 100644 test/vendor/knative.dev/serving/pkg/activator/util/header_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/register.go (84%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/doc.go (100%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_defaults.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_types.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go (68%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go (74%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_implements_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go (97%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/register.go (95%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go (68%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/config/defaults.go (84%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/config/defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/config/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/config/store.go (98%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/config/store_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/apis/config/testdata/config-defaults.yaml rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/config/zz_generated.deepcopy.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/OWNERS rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/generic_types.go (93%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/generic_types_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/ports.go (100%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/ports_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/register.go (58%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/certificate_defaults.go (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go (87%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/certificate_types.go (96%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/certificate_validation.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/doc.go (79%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go (86%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go (68%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/ingress_types.go (90%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/ingress_validation.go (96%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/register.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/serverlessservice_defaults.go (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go (62%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go (88%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go (95%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go (89%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/roadmap-2018.md rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/fieldmask.go (99%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/k8s_validation.go (82%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/register.go (86%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/configuration_defaults.go (76%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/configuration_lifecycle.go (95%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/configuration_types.go (95%) rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/configuration_validation.go (55%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/contexts.go (96%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/register.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/register_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/revision_types.go (86%) rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/revision_validation.go (65%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/route_defaults.go (75%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/route_lifecycle.go (95%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/route_types.go (88%) rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/route_validation.go (66%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/service_defaults.go (62%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/service_lifecycle.go (95%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/service_types.go (95%) rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/service_validation.go (72%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation_test.go rename test/vendor/{github.com/knative/serving/pkg/apis/serving/v1beta1 => knative.dev/serving/pkg/apis/serving/v1}/zz_generated.deepcopy.go (96%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/README.md rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go (81%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go (63%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go (93%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/configuration_types.go (94%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/configuration_validation.go (85%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/conversion_error.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/register.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/revision_conversion.go (87%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/revision_defaults.go (69%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go (56%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/revision_types.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/revision_validation.go (65%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/route_conversion.go (82%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/route_defaults.go (61%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/route_types.go (90%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/route_validation.go (80%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/service_conversion.go (76%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/service_defaults.go (59%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/service_types.go (97%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/service_validation.go (92%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go (98%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/configuration_conversion.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_types.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/doc.go (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/register.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/revision_conversion.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go (78%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_types.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/route_conversion.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_types.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation_test.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/apis/serving/v1beta1/service_conversion.go (97%) create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_types.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/README.md create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/collector.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/collector_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/config.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/config_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/fake/fake_metric_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/sample_size.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/sample_size_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/statserver/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/autoscaler/testdata/config-autoscaler.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/autoscaler/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/clientset.go rename test/vendor/{github.com/knative/serving/pkg/client => knative.dev/serving/pkg/client/certmanager}/clientset/versioned/doc.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/register.go rename test/vendor/{github.com/knative/serving/pkg/client => knative.dev/serving/pkg/client/certmanager}/clientset/versioned/scheme/doc.go (95%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/register.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/acme_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/challenge.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_acme_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_challenge.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_order.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/generated_expansion.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/order.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificaterequest.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certmanager_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/clusterissuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificaterequest.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certmanager_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_clusterissuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_issuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/generated_expansion.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/issuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/challenge.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/order.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificaterequest.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/clusterissuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/issuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/generic.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/challenge.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/order.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/certificaterequest.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/clusterissuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/issuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/challenge.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/order.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificaterequest.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/clusterissuer.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/issuer.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/clientset.go (74%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/clientset_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/register.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/doc.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/scheme/register.go (83%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go (86%) rename test/vendor/{github.com/knative/serving/pkg/client/clientset/versioned/typed/serving => knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling}/v1alpha1/doc.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_autoscaling_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_podautoscaler.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go (90%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/metric.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go (90%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go (89%) rename test/vendor/{github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling => knative.dev/serving/pkg/client/clientset/versioned/typed/networking}/v1alpha1/doc.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_networking_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_serverlessservice.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go (90%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go (85%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go (90%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_serving_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/generated_expansion.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/serving_client.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go (90%) rename test/vendor/{github.com/knative/serving/pkg/client/clientset/versioned/typed/networking => knative.dev/serving/pkg/client/clientset/versioned/typed/serving}/v1alpha1/doc.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_serving_client.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go (95%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go (90%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go (94%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_serving_client.go rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go (95%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go (89%) rename test/vendor/{github.com/knative => knative.dev}/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go (89%) create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/podautoscaler.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/generic.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/serverlessservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/client/client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/client/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/podscalable.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/podautoscaler.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/serverlessservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/clientset.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/clientset_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/register.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/register.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/destinationrule.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/envoyfilter.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_destinationrule.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_envoyfilter.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_gateway.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_networking_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_serviceentry.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_sidecar.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_virtualservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/gateway.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/generated_expansion.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/networking_client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/serviceentry.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/sidecar.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/virtualservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/generic.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/destinationrule.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/envoyfilter.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/gateway.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/serviceentry.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/sidecar.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/virtualservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/client/client.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/client/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/destinationrule.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/envoyfilter.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/gateway.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/serviceentry.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/sidecar.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/fake/fake.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/virtualservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/destinationrule.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/envoyfilter.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/gateway.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/serviceentry.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/sidecar.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/virtualservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/podautoscaler.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/serverlessservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/expansion_generated.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/deployment/config.go create mode 100644 test/vendor/knative.dev/serving/pkg/deployment/config_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/deployment/doc.go create mode 120000 test/vendor/knative.dev/serving/pkg/deployment/testdata/config-deployment.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/deployment/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/gc/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/gc/config.go create mode 100644 test/vendor/knative.dev/serving/pkg/gc/config_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/gc/doc.go create mode 120000 test/vendor/knative.dev/serving/pkg/gc/testdata/config-gc.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/http/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/http/header.go create mode 100644 test/vendor/knative.dev/serving/pkg/http/header_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/http/request_log.go create mode 100644 test/vendor/knative.dev/serving/pkg/http/request_log_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/http/response_recorder.go create mode 100644 test/vendor/knative.dev/serving/pkg/http/response_recorder_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/logging/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/logging/config_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/logging/sync_file_writer.go create mode 100644 test/vendor/knative.dev/serving/pkg/logging/sync_file_writer_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/logging/testdata/config-logging.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/logging/testdata/test-config-logging.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/metrics/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/metrics/config.go create mode 100644 test/vendor/knative.dev/serving/pkg/metrics/config_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/metrics/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/metrics/key.go create mode 120000 test/vendor/knative.dev/serving/pkg/metrics/testdata/config-observability.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/metrics/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/network/bufferpool.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/bufferpool_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/ingress/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/ingress/ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/ingress/ingress_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/network.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/network_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/probe_handler.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/probe_handler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/status/status.go create mode 100644 test/vendor/knative.dev/serving/pkg/network/status/status_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/network/testdata/config-network.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/network/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/pool/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/pool/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/pool/interface.go create mode 100644 test/vendor/knative.dev/serving/pkg/pool/pool.go create mode 100644 test/vendor/knative.dev/serving/pkg/pool/pool_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/queue/breaker.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/breaker_test.go rename test/vendor/{github.com/knative/pkg/apis/duck/interface.go => knative.dev/serving/pkg/queue/constants.go} (56%) create mode 100644 test/vendor/knative.dev/serving/pkg/queue/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/forwarded_shim.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/forwarded_shim_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/health/health_state.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/health/health_state_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/health/probe.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/health/probe_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/readiness/probe.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/readiness/probe_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/request_metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/request_metric_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/stats.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/stats_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/timeout.go create mode 100644 test/vendor/knative.dev/serving/pkg/queue/timeout_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/testdata/config-autoscaler.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/queueing_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/filter.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/filter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/config/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/gc/config/testdata/config-gc.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/config/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/gc/gc_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/labeler/controller.go rename test/vendor/{github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_validation.go => knative.dev/serving/pkg/reconciler/labeler/doc.go} (67%) create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/labeler/labels.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/metric/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/metric/metric.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/metric/metric_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/reconciler.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/reconciler_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/retry.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/retry_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/observability.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-defaults.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-network.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-tracing.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/config/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/queueing_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/reconcile_resources.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/constants.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/env_var.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/revision_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/revision/table_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/README.md create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/config/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/config/store_test.go create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-domain.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-gc.yaml create mode 120000 test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-network.yaml create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/config/zz_generated.deepcopy.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/active_revisions.svg create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revision.svg create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revisions.svg create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/domains/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/queueing_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels_test.go rename test/vendor/{github.com/knative/pkg/kmp => knative.dev/serving/pkg/reconciler/route/resources/names}/doc.go (78%) create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/route_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/table_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/global_resync_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/controller.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/resources/shared_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/service/service_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/factory.go create mode 100644 test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/listers.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/resources/doc.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/endpoints.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/endpoints_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/meta.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/meta_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/resources.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/scale.go create mode 100644 test/vendor/knative.dev/serving/pkg/resources/scale_test.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/OWNERS create mode 100644 test/vendor/knative.dev/serving/pkg/testing/functional.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1alpha1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1alpha1/functional.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1alpha1/revision.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1alpha1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1alpha1/service.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1beta1/configuration.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1beta1/route.go create mode 100644 test/vendor/knative.dev/serving/pkg/testing/v1beta1/service.go create mode 100644 test/vendor/knative.dev/serving/sample/README.md create mode 100644 test/vendor/knative.dev/serving/test/OWNERS create mode 100644 test/vendor/knative.dev/serving/test/README.md create mode 100644 test/vendor/knative.dev/serving/test/adding_tests.md create mode 100755 test/vendor/knative.dev/serving/test/apicoverage.sh create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/README.md create mode 100755 test/vendor/knative.dev/serving/test/apicoverage/image/apicoverage-webhook.yaml create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/common/common.go create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/kodata/ignoredfields.yaml create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/main.go create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/rules/coverage_rules.go create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/rules/display_rules.go create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/service-account.yaml create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/image/webhook/webhook_server.go create mode 100644 test/vendor/knative.dev/serving/test/apicoverage/tools/main.go create mode 100644 test/vendor/knative.dev/serving/test/cleanup.go create mode 100644 test/vendor/knative.dev/serving/test/clients.go create mode 100644 test/vendor/knative.dev/serving/test/config/100-namespace.yaml create mode 100644 test/vendor/knative.dev/serving/test/config/300-configmap.yaml create mode 100644 test/vendor/knative.dev/serving/test/config/300-secret.yaml create mode 100644 test/vendor/knative.dev/serving/test/config/config-logging.yaml create mode 100644 test/vendor/knative.dev/serving/test/config/mtls/destinationrule.yaml create mode 100644 test/vendor/knative.dev/serving/test/config/mtls/policy.yaml create mode 100644 test/vendor/knative.dev/serving/test/conformance.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/README.md create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/blue_green_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/configuration_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/errorcondition_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/generatename_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/main_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/migration_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/resources_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/revision_timeout_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/route_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/service_account_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/service_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/single_threaded_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/util.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1/volumes_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/blue_green_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/configuration_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/errorcondition_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/generatename_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/main_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/resources_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/revision_timeout_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/route_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/service_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/single_threaded_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/util.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/volumes_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/blue_green_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/configuration_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/errorcondition_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/generatename_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/main_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/migration_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/resources_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/revision_timeout_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/route_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_account_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/single_threaded_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/util.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/api/v1beta1/volumes_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/basic_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/grpc_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/headers_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/hosts_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/path_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/percentage_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/retry_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/timeout_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/tls_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/update_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/util.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/ingress/websocket_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/cgroup_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/cmd_args_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/container_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/envpropagation_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/envvars_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/file_descriptor_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/filesystem_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/header_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/main_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/protocol_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/readiness_probe_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/sysctl_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/user_test.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/util.go create mode 100644 test/vendor/knative.dev/serving/test/conformance/runtime/workingdir_test.go create mode 100644 test/vendor/knative.dev/serving/test/crd.go create mode 100644 test/vendor/knative.dev/serving/test/e2e-common.sh create mode 100755 test/vendor/knative.dev/serving/test/e2e-smoke-tests.sh create mode 100755 test/vendor/knative.dev/serving/test/e2e-tests.sh create mode 100755 test/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh create mode 100644 test/vendor/knative.dev/serving/test/e2e/README.md create mode 100644 test/vendor/knative.dev/serving/test/e2e/activator_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/autoscale_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/autotls/auto_tls_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/destroypod_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/e2e.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/egress_traffic_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/grpc_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/helloworld_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/image_pull_error_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/istio/probing_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/minscale_readiness_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/namespace_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/pod_schedule_error_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/rollback_byo_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/route_service_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/scale.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/service_to_service_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/subroutes_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e/websocket_test.go create mode 100644 test/vendor/knative.dev/serving/test/e2e_flags.go create mode 100644 test/vendor/knative.dev/serving/test/example-build.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/Benchmarks.md create mode 100644 test/vendor/knative.dev/serving/test/performance/README.md create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/cluster.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe-setup.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/HEAD create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/dev.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/prod.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/refs create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/main.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/sla.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/dev.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/prod.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/cluster.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/benchmark.yaml create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/HEAD create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/basic-template.yaml create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/dev.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/prod.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/refs create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/main.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/sla.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/dev.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/prod.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/cluster.yaml create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/HEAD create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/dev.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/prod.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/refs create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test-setup.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/main.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/sla.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/dev.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/prod.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/cluster.yaml create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/HEAD create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/dev.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/prod.config create mode 120000 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/refs create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/main.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/scale-from-zero.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/sla.go create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/dev.config create mode 100644 test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/prod.config create mode 100644 test/vendor/knative.dev/serving/test/performance/config/README.md create mode 100644 test/vendor/knative.dev/serving/test/performance/config/config-mako.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/config/job.yaml create mode 100644 test/vendor/knative.dev/serving/test/performance/dev.md create mode 100644 test/vendor/knative.dev/serving/test/performance/latency_test.go create mode 100644 test/vendor/knative.dev/serving/test/performance/metrics/request.go create mode 100644 test/vendor/knative.dev/serving/test/performance/metrics/runtime.go create mode 100644 test/vendor/knative.dev/serving/test/performance/observed_concurency_test.go create mode 100755 test/vendor/knative.dev/serving/test/performance/performance-tests.sh create mode 100644 test/vendor/knative.dev/serving/test/performance/performance.go create mode 100644 test/vendor/knative.dev/serving/test/performance/profiling.md create mode 100644 test/vendor/knative.dev/serving/test/performance/scale_test.go create mode 100755 test/vendor/knative.dev/serving/test/presubmit-tests.sh create mode 100644 test/vendor/knative.dev/serving/test/prober.go create mode 100644 test/vendor/knative.dev/serving/test/scale/scale_test.go create mode 100644 test/vendor/knative.dev/serving/test/system.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/autoscale/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/autoscale/autoscale.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/autoscale/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/failing/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/failing/failing.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/failing/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/flaky/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/flaky/main.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/flaky/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/grpc-ping/main.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.pb.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.proto create mode 100644 test/vendor/knative.dev/serving/test/test_images/grpc-ping/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/hellovolume/hellovolume.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/hellovolume/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/helloworld/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/helloworld/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/httpproxy/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/httpproxy/httpproxy.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/httpproxy/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/observed-concurrency/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/observed-concurrency/observed_concurrency.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/observed-concurrency/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/main.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/main.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/args.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/cgroup.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/env.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file_access_attempt.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/handler.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/mount.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/proc.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/request.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/runtime.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/handlers/user.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/main.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/runtime/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/singlethreaded/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/singlethreaded/main.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/singlethreaded/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/timeout/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/timeout/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/test_images/timeout/timeout.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/wsserver/README.md create mode 100644 test/vendor/knative.dev/serving/test/test_images/wsserver/echo.go create mode 100644 test/vendor/knative.dev/serving/test/test_images/wsserver/service.yaml create mode 100644 test/vendor/knative.dev/serving/test/types/runtime.go create mode 100644 test/vendor/knative.dev/serving/test/upgrade/README.md create mode 100644 test/vendor/knative.dev/serving/test/upgrade/probe_test.go create mode 100644 test/vendor/knative.dev/serving/test/upgrade/service_postdowngrade_test.go create mode 100644 test/vendor/knative.dev/serving/test/upgrade/service_postupgrade_test.go create mode 100644 test/vendor/knative.dev/serving/test/upgrade/service_preupgrade_test.go create mode 100644 test/vendor/knative.dev/serving/test/upgrade/upgrade.go create mode 100755 test/vendor/knative.dev/serving/test/upload-test-images.sh create mode 100644 test/vendor/knative.dev/serving/test/util.go create mode 100644 test/vendor/knative.dev/serving/test/v1/configuration.go create mode 100644 test/vendor/knative.dev/serving/test/v1/crd.go create mode 100644 test/vendor/knative.dev/serving/test/v1/revision.go create mode 100644 test/vendor/knative.dev/serving/test/v1/route.go create mode 100644 test/vendor/knative.dev/serving/test/v1/service.go create mode 100644 test/vendor/knative.dev/serving/test/v1alpha1/configuration.go create mode 100644 test/vendor/knative.dev/serving/test/v1alpha1/crd.go create mode 100644 test/vendor/knative.dev/serving/test/v1alpha1/ingress.go create mode 100644 test/vendor/knative.dev/serving/test/v1alpha1/revision.go create mode 100644 test/vendor/knative.dev/serving/test/v1alpha1/route.go create mode 100644 test/vendor/knative.dev/serving/test/v1alpha1/service.go create mode 100644 test/vendor/knative.dev/serving/test/v1beta1/configuration.go create mode 100644 test/vendor/knative.dev/serving/test/v1beta1/crd.go create mode 100644 test/vendor/knative.dev/serving/test/v1beta1/revision.go create mode 100644 test/vendor/knative.dev/serving/test/v1beta1/route.go create mode 100644 test/vendor/knative.dev/serving/test/v1beta1/service.go create mode 100644 test/vendor/knative.dev/serving/third_party/OWNERS create mode 100644 test/vendor/knative.dev/serving/third_party/VENDOR-LICENSE create mode 100644 test/vendor/knative.dev/serving/third_party/ambassador-latest/README.md create mode 100644 test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-rbac.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-service.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager-crds.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager.yaml create mode 100755 test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/download-cert-manager.sh create mode 100644 test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager-crds.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager.yaml create mode 100755 test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/download-cert-manager.sh create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/OWNERS create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/LICENSE create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/elasticsearch.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/kibana.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/LICENSE create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/kube-state-metrics.yaml rename test/vendor/{github.com/knative => knative.dev}/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/LICENSE (100%) rename test/vendor/{github.com/knative => knative.dev}/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/NOTICE (100%) create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/deployment-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-capacity-planning-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-health-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-status-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-control-plane-status-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-resource-requests-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/nodes-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/pods-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/statefulset-dashboard.json create mode 100644 test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/node-exporter.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/contour-latest/README.md create mode 100644 test/vendor/knative.dev/serving/third_party/contour-latest/contour.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/gloo-latest/README.md create mode 100755 test/vendor/knative.dev/serving/third_party/gloo-latest/download-gloo.sh create mode 100644 test/vendor/knative.dev/serving/third_party/gloo-latest/gloo.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/gloo-latest/value-overrides.yaml create mode 120000 test/vendor/knative.dev/serving/third_party/istio-1.3-latest create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/README.md create mode 100755 test/vendor/knative.dev/serving/third_party/istio-1.3.6/download-istio.sh create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/drain-seconds.yaml.patch create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-mesh.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-no-mesh.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-crds.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-knative-extras.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-minimal.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/namespace.yaml.patch create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-extras.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-lean.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-local.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.3.6/values.yaml create mode 120000 test/vendor/knative.dev/serving/third_party/istio-1.4-latest create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/README.md create mode 100755 test/vendor/knative.dev/serving/third_party/istio-1.4.2/download-istio.sh create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/drain-seconds.yaml.patch create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-mesh.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-no-mesh.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-crds.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-knative-extras.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-minimal.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/namespace.yaml.patch create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-extras.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-lean.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-local.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/istio-1.4.2/values.yaml create mode 100644 test/vendor/knative.dev/serving/third_party/kourier-latest/README.md create mode 100755 test/vendor/knative.dev/serving/third_party/kourier-latest/download-kourier.sh create mode 100644 test/vendor/knative.dev/serving/third_party/kourier-latest/kourier.yaml create mode 100644 test/vendor/knative.dev/test-infra/LICENSE rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/README.md (77%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/dummy.go (100%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/e2e-tests.sh (91%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/library.sh (83%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/markdown-link-check-config.rc (100%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/markdown-lint-config.rc (100%) create mode 100755 test/vendor/knative.dev/test-infra/scripts/performance-tests.sh rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/presubmit-tests.sh (97%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/scripts/release.sh (92%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/tools/dep-collector/README.md (100%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/tools/dep-collector/imports.go (100%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/tools/dep-collector/licenses.go (100%) rename test/vendor/{github.com/knative => knative.dev}/test-infra/tools/dep-collector/main.go (100%) diff --git a/hack/lib/vars.bash b/hack/lib/vars.bash index ed266f7094..6b9ea3b295 100644 --- a/hack/lib/vars.bash +++ b/hack/lib/vars.bash @@ -3,7 +3,7 @@ readonly BUILD_NUMBER=${BUILD_NUMBER:-$(uuidgen)} # shellcheck disable=SC1091,SC1090 -source "$(dirname "${BASH_SOURCE[0]}")/../../test/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh" +source "$(dirname "${BASH_SOURCE[0]}")/../../test/vendor/knative.dev/test-infra/scripts/e2e-tests.sh" readonly KNATIVE_VERSION="${KNATIVE_VERSION:-v0.12.1}" diff --git a/test/Gopkg.lock b/test/Gopkg.lock index 6dd991d42c..b8e7c6a8ee 100644 --- a/test/Gopkg.lock +++ b/test/Gopkg.lock @@ -1,6 +1,46 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:1e2e5200803c058bc348766b58e16721b30a037c656b6499599064fbaeac24fd" + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "container/apiv1", + "internal/version", + "monitoring/apiv3", + "trace/apiv2", + ] + pruneopts = "NUT" + revision = "d96ccb2ba7586bb79a416471882d347754a78ce5" + version = "v0.53.0" + +[[projects]] + digest = "1:0a936e17f4fc0c65615913199ce0a387a67a4c72e017747b6f38c898168b1b75" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "NUT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:642cf8e80572f9dc0677b0f241c8ab2e715c9dccc215270ea873c86ddca0062c" + name = "contrib.go.opencensus.io/exporter/prometheus" + packages = ["."] + pruneopts = "NUT" + revision = "f4a2c1e53ec45636355d35fb9022b64e4bdd4a91" + version = "v0.1.0" + +[[projects]] + digest = "1:c3fd5ddaad733530174bba5dd787d98a45d181851a95a0b7362be7bce7144f56" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = [ + ".", + "monitoredresource", + ] + pruneopts = "NUT" + revision = "59d068f8d8ff5b653916aa30cdc4e13c7f15d56e" + [[projects]] digest = "1:5d72bbcc9c8667b11c3dc3cbe681c5a6f71e5096744c0bf7726ab5c6425d5dc4" name = "github.com/BurntSushi/toml" @@ -25,6 +65,58 @@ pruneopts = "NUT" revision = "de5bf2ad457846296e2031421a34e2568e304e35" +[[projects]] + digest = "1:872f441afa7e53dcd20734bedce76b915b8fc61724349eef96ac1fdf6f9fe885" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/context", + "internal/ini", + "internal/sdkio", + "internal/sdkmath", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "internal/strings", + "internal/sync/singleflight", + "private/protocol", + "private/protocol/json/jsonutil", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/sts", + "service/sts/stsiface", + ] + pruneopts = "NUT" + revision = "a9494254b255c4db77d30a6c0a23a82390063845" + version = "v1.29.4" + +[[projects]] + digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NUT" + revision = "37c8de3658fcb183f997c4e13e8337516ab753e6" + version = "v1.0.1" + [[projects]] digest = "1:45c41cd27a8d986998680bfc86da0bbff5fa4f90d0f446c00636c8b099028ffe" name = "github.com/blang/semver" @@ -33,6 +125,21 @@ revision = "ba2c2ddd89069b46a7011d4106f6868f17ee1705" version = "v3.6.1" +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "NUT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -60,6 +167,14 @@ revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f" version = "v4.5.0" +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + [[projects]] digest = "1:3758c86e787dfe5792a23430f34636106a16da914446724399c9c12f121a225d" name = "github.com/go-openapi/jsonpointer" @@ -93,11 +208,15 @@ version = "v0.19.5" [[projects]] - digest = "1:633dde05baa7afd3544cfe4ac16fe3c5450b868cfc6b1558621ee39ebf2273ea" + digest = "1:ddeb4ef861c31ebe86546fa009a05f271454342eb89d99245f90531c327c7ac3" name = "github.com/gogo/protobuf" packages = [ + "gogoproto", + "jsonpb", "proto", + "protoc-gen-gogo/descriptor", "sortkeys", + "types", ] pruneopts = "NUT" revision = "0ca988a254f991240804bf9821f3450d87ccbb1b" @@ -120,14 +239,20 @@ revision = "869f871628b6baa9cfbc11732cdf6546b17c1298" [[projects]] - digest = "1:796f9c63c68774a89eade387a8476e45ec2b34f5649b0726983204202c3649d6" + digest = "1:a677057cef8b68b66003c2775ed1126bbd7e9e372b54b7c1a7c5201a2f1f3eb0" name = "github.com/golang/protobuf" packages = [ + "descriptor", + "jsonpb", "proto", + "protoc-gen-go/descriptor", "ptypes", "ptypes/any", "ptypes/duration", + "ptypes/empty", + "ptypes/struct", "ptypes/timestamp", + "ptypes/wrappers", ] pruneopts = "NUT" revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" @@ -147,11 +272,11 @@ revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b" [[projects]] - digest = "1:7915c656a10819b240db94ffd6afc6a621af4687db735341d9919668750485ec" + digest = "1:10aa62acd56f635de190110a836aa261b3774e697b2b6a7af27d8263cf856d47" name = "github.com/google/go-containerregistry" packages = ["pkg/name"] pruneopts = "NUT" - revision = "abf9ef06abd9f532944e28e8942644ea4b710a40" + revision = "b02d448a3705facf11018efff34f1d2830be5724" [[projects]] digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" @@ -176,6 +301,22 @@ pruneopts = "NUT" revision = "c3068f13fcc3961fd05f96f13c8250e350db4209" +[[projects]] + digest = "1:ab3ec1fe3e39bac4b3ab63390767766622be35b7cab03f47f787f9ec60522a53" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NUT" + revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" + version = "v1.1.1" + +[[projects]] + digest = "1:4b76f3e067eed897a45242383a2aa4d0a2fdbf73a8d00c03167dba80c43630b1" + name = "github.com/googleapis/gax-go" + packages = ["v2"] + pruneopts = "NUT" + revision = "bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2" + version = "v2.0.5" + [[projects]] digest = "1:5e092394bed250d7fda36cef8b7e1d22bb2d5f71878bbb137be5fc1c2705f965" name = "github.com/googleapis/gnostic" @@ -188,6 +329,18 @@ revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab" version = "v0.3.1" +[[projects]] + digest = "1:0b548dd80464826f0170656d981e5057255b5e1da8b725dd56fb2a0d70816b21" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "NUT" + revision = "4c2cec4158f65aebe0290d3bee883b13f7b07c6f" + version = "v1.13.0" + [[projects]] digest = "1:ed860d2b2c1d066d36a89c982eefc7d019badd534f60e87ab65d3d94f0797ef0" name = "github.com/hashicorp/golang-lru" @@ -207,6 +360,13 @@ revision = "7c29201646fa3de8506f701213473dd407f19646" version = "v0.3.7" +[[projects]] + digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "NUT" + revision = "c2b33e84" + [[projects]] digest = "1:92f6419f388bd31a433b1388910f15a882c9980e383e89ebf8fb2524583707ac" name = "github.com/json-iterator/go" @@ -215,57 +375,6 @@ revision = "27518f6661eba504be5a7a9a9f6d9460d892ade3" version = "v1.1.7" -[[projects]] - branch = "release-0.7" - digest = "1:ec18e8243516c57aa703d40cbe376e9b440382f40ad7fda781dbaf0a67d7050f" - name = "github.com/knative/pkg" - packages = [ - "apis", - "apis/duck", - "apis/duck/v1alpha1", - "apis/duck/v1beta1", - "configmap", - "kmeta", - "kmp", - "ptr", - ] - pruneopts = "NUT" - revision = "d82505e6c5b4ce46562d6c242be0e706791f35bd" - -[[projects]] - branch = "release-0.7" - digest = "1:258c15a37cc2925f081f45bf01e2e8857035528455edae8c6880476d6437e108" - name = "github.com/knative/serving" - packages = [ - "pkg/apis/autoscaling", - "pkg/apis/autoscaling/v1alpha1", - "pkg/apis/config", - "pkg/apis/networking", - "pkg/apis/networking/v1alpha1", - "pkg/apis/serving", - "pkg/apis/serving/v1alpha1", - "pkg/apis/serving/v1beta1", - "pkg/client/clientset/versioned", - "pkg/client/clientset/versioned/scheme", - "pkg/client/clientset/versioned/typed/autoscaling/v1alpha1", - "pkg/client/clientset/versioned/typed/networking/v1alpha1", - "pkg/client/clientset/versioned/typed/serving/v1alpha1", - "pkg/client/clientset/versioned/typed/serving/v1beta1", - ] - pruneopts = "NUT" - revision = "ce4ce706afb5d9c323d1c073746d64562a722f8f" - -[[projects]] - branch = "master" - digest = "1:615f3c6b974179c583edf50234fd87c5731d02ef941f314cbd4dd2766d3a619a" - name = "github.com/knative/test-infra" - packages = [ - "scripts", - "tools/dep-collector", - ] - pruneopts = "UT" - revision = "ca04b8453a8779f3c800c5e4bcda43ff53ec6f80" - [[projects]] digest = "1:58999a98719fddbac6303cb17e8d85b945f60b72f48e3a2df6b950b97fa926f1" name = "github.com/konsorten/go-windows-terminal-sequences" @@ -294,6 +403,14 @@ pruneopts = "NUT" revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" +[[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NUT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + [[projects]] digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" @@ -367,6 +484,50 @@ revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" version = "v0.8.1" +[[projects]] + digest = "1:097cc61836050f45cbb712ae3bb45d66fba464c16b8fac09907fa3c1f753eff6" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "NUT" + revision = "170205fb58decfd011f1550d4cfb737230d7ae4f" + version = "v1.1.0" + +[[projects]] + digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NUT" + revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9" + version = "v0.2.0" + +[[projects]] + digest = "1:0c9d56afe4f5b3ab5658a3270a680db8f6936d7794377fa0b9880663958b18b0" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NUT" + revision = "d978bcb1309602d68bb4ba69cf3f8ed900e07308" + version = "v0.9.1" + +[[projects]] + digest = "1:dacb29568d7b8e145b953ca0a9334e516ce02e56c61e902372c6c074c4846107" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/fs", + "internal/util", + ] + pruneopts = "NUT" + revision = "4850c197847aa06aecc37570077fc16e9215d565" + version = "v0.0.10" + [[projects]] digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" name = "github.com/sergi/go-diff" @@ -392,17 +553,20 @@ version = "v1.0.3" [[projects]] - digest = "1:530b87bdf95de2b6e26a8ecba904ea814ce4d2b4c97ae5cddbd3bcac9c30fc60" + digest = "1:b3125df339c80dd88259bd401103be60df5f5181335e8e717ec8d3edbd50e90e" name = "go.opencensus.io" packages = [ ".", "internal", "internal/tagencoding", "metric/metricdata", + "metric/metricexport", "metric/metricproducer", + "plugin/ocgrpc", "plugin/ochttp", "plugin/ochttp/propagation/b3", "resource", + "resource/resourcekeys", "stats", "stats/internal", "stats/view", @@ -476,30 +640,47 @@ [[projects]] branch = "master" - digest = "1:34347fb8a028e8d5d32e1a52da0796fd1c3d8d4aad1ff0ade4a6e23d07b00d32" + digest = "1:f2ee9e215c271a2bbaa7eec11f4e460be1edf63cb7c711df92cb086afd78269b" name = "golang.org/x/net" packages = [ "context", "context/ctxhttp", "http/httpguts", "http2", + "http2/h2c", "http2/hpack", "idna", + "internal/timeseries", + "trace", ] pruneopts = "NUT" revision = "24e19bdeb0f2d062d8e2640d50a7aaf2a7f80e7a" [[projects]] branch = "master" - digest = "1:f3a2e6d7423b8c19cdb2203cda9672900cc43012ea69f30ff6874dd453f44aec" + digest = "1:1519760444b90c560eb01373869bc66fd539e6fe1bf77af22047c43edc40ab35" name = "golang.org/x/oauth2" packages = [ ".", + "google", "internal", + "jws", + "jwt", ] pruneopts = "NUT" revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" +[[projects]] + branch = "master" + digest = "1:a2fc247e64b5dafd3251f12d396ec85f163d5bb38763c4997856addddf6e78d8" + name = "golang.org/x/sync" + packages = [ + "errgroup", + "semaphore", + ] + pruneopts = "NUT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + [[projects]] branch = "master" digest = "1:8cc4634b9c72bbb85daadb12f5a3f93f320b378618e216121f632055c1fe8aa5" @@ -570,21 +751,122 @@ revision = "4191b8cbba092238a318a71cdff48b20b4e1e5d8" [[projects]] - digest = "1:372cd8eba449f9b6db06677d0e73fa193ec5b19aaee148f355503ab6127045ca" + digest = "1:285d952d0e451dbf779fcf3faad3dd32eda53f55505fff6c310ba7254d160f87" + name = "google.golang.org/api" + packages = [ + "googleapi/transport", + "internal", + "iterator", + "option", + "support/bundler", + "transport", + "transport/grpc", + "transport/http", + "transport/http/internal/propagation", + ] + pruneopts = "NUT" + revision = "6f5c88b9e8c709c0f1fff128c30b041c71d14da4" + version = "v0.17.0" + +[[projects]] + digest = "1:a955e7c44c2be14b61aa2ddda744edfdfbc6817e993703a16e303c277ba84449" name = "google.golang.org/appengine" packages = [ + ".", "internal", + "internal/app_identity", "internal/base", "internal/datastore", "internal/log", + "internal/modules", "internal/remote_api", + "internal/socket", "internal/urlfetch", + "socket", "urlfetch", ] pruneopts = "NUT" revision = "5f2a59506353b8d5ba8cbbcd9f3c1f41f1eaf079" version = "v1.6.2" +[[projects]] + branch = "master" + digest = "1:3607fb6e3385809bd511e907b0ad5210c795f1ba624478212f0b6bb1538cd7e2" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api", + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/httpbody", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/container/v1", + "googleapis/devtools/cloudtrace/v2", + "googleapis/monitoring/v3", + "googleapis/rpc/status", + "googleapis/type/calendarperiod", + "protobuf/field_mask", + ] + pruneopts = "NUT" + revision = "dad8c97a84f542cf0c67e3ab67b1c09e795fb4af" + +[[projects]] + digest = "1:bed37afd01349fd9075b260777c1e5e29b6f14e8569b327f0c331f601e08807f" + name = "google.golang.org/grpc" + packages = [ + ".", + "attributes", + "backoff", + "balancer", + "balancer/base", + "balancer/grpclb", + "balancer/grpclb/grpc_lb_v1", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/alts", + "credentials/alts/internal", + "credentials/alts/internal/authinfo", + "credentials/alts/internal/conn", + "credentials/alts/internal/handshaker", + "credentials/alts/internal/handshaker/service", + "credentials/alts/internal/proto/grpc_gcp", + "credentials/google", + "credentials/internal", + "credentials/oauth", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "NUT" + revision = "f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc" + version = "v1.27.1" + [[projects]] digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" @@ -635,6 +917,30 @@ revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a" version = "2019.2.3" +[[projects]] + digest = "1:9579dd8b1ccd8cf54ed576af68438b69588cc136eac90b10aa7c8d1a1c8334ff" + name = "istio.io/api" + packages = ["networking/v1alpha3"] + pruneopts = "NUT" + revision = "ed4b507c54e129239375e3521f233aaa397d23b7" + version = "1.4.4" + +[[projects]] + digest = "1:57e8c0255e33ab72e09219665d5ed7733a694e0092ab4a69b97100aa27bcbbb4" + name = "istio.io/client-go" + packages = ["pkg/apis/networking/v1alpha3"] + pruneopts = "NUT" + revision = "9053b0f86b0359b7e66293eb6098cc551aec6f69" + version = "1.4.4" + +[[projects]] + digest = "1:9c9f11af1b1f2ae03d897ba5de27103ec1c9c43605663ccdef67831d6a462a30" + name = "istio.io/gogo-genproto" + packages = ["googleapis/google/api"] + pruneopts = "NUT" + revision = "f7d19ec0141d49ac9efc83b5e61fa81ba103b445" + version = "1.4.4" + [[projects]] digest = "1:760e08df99c3c3b53764ef7c41c03ea9d90e8594d9df42364d9209e99a0352e1" name = "k8s.io/api" @@ -735,7 +1041,7 @@ version = "kubernetes-1.15.3" [[projects]] - digest = "1:007d8d9344e11483a93ec301ccc9e9da904a615290f4cfc30f18af79ec87c0d2" + digest = "1:c086c6cc9924c12b4846baf73bf3b5b3fa183fb4d053bf3bd51a4c7ca2268517" name = "k8s.io/client-go" packages = [ "discovery", @@ -866,9 +1172,11 @@ "pkg/apis/clientauthentication/v1beta1", "pkg/version", "plugin/pkg/client/auth/exec", + "plugin/pkg/client/auth/gcp", "plugin/pkg/client/auth/oidc", "rest", "rest/watch", + "third_party/forked/golang/template", "tools/auth", "tools/cache", "tools/clientcmd", @@ -885,8 +1193,10 @@ "util/connrotation", "util/flowcontrol", "util/homedir", + "util/jsonpath", "util/keyutil", "util/retry", + "util/workqueue", ] pruneopts = "NUT" revision = "e14f31a72a77f7aa82a95eaf542d1194fb027d04" @@ -941,24 +1251,73 @@ revision = "94aeca20bf0991bf33922a5938174b9147ab8ca7" [[projects]] - branch = "master" - digest = "1:bd7eb46917507cd3a1d14f0775d92a5a4ec504e451cad8742102c2a8fa5cdfd5" + branch = "release-0.12" + digest = "1:2f2c8b4a65ab290bff2b859b7e9d82a8cf9e084269b5863bc8f1560c8d14c918" name = "knative.dev/pkg" packages = [ "apis", + "apis/duck", + "apis/duck/v1", + "apis/duck/v1alpha1", + "apis/duck/v1beta1", "changeset", + "configmap", + "controller", + "kmeta", "kmp", "logging", "logging/logkey", + "metrics", + "metrics/metricskey", + "network", + "profiling", + "ptr", + "signals", + "system", "test", + "test/helpers", "test/ingress", "test/logging", "test/monitoring", "test/spoof", "test/zipkin", + "tracker", ] pruneopts = "NUT" - revision = "34157973944a3419f1e38fce8dfb11a2fb5aadbb" + revision = "b8dc5fbc6d2f4717a69d15382921a51f93ab4cbb" + +[[projects]] + branch = "release-v0.12.1" + digest = "1:4a6ccf9751f34af3c9098c5be21582d7ff8d1133e8132421ad070be39b70d85c" + name = "knative.dev/serving" + packages = [ + "pkg/apis/autoscaling", + "pkg/apis/autoscaling/v1alpha1", + "pkg/apis/config", + "pkg/apis/networking", + "pkg/apis/networking/v1alpha1", + "pkg/apis/serving", + "pkg/apis/serving/v1", + "pkg/apis/serving/v1alpha1", + "pkg/apis/serving/v1beta1", + "pkg/client/clientset/versioned", + "pkg/client/clientset/versioned/scheme", + "pkg/client/clientset/versioned/typed/autoscaling/v1alpha1", + "pkg/client/clientset/versioned/typed/networking/v1alpha1", + "pkg/client/clientset/versioned/typed/serving/v1", + "pkg/client/clientset/versioned/typed/serving/v1alpha1", + "pkg/client/clientset/versioned/typed/serving/v1beta1", + "pkg/client/istio/clientset/versioned", + "pkg/client/istio/clientset/versioned/scheme", + "pkg/client/istio/clientset/versioned/typed/networking/v1alpha3", + "pkg/gc", + "pkg/network", + "pkg/reconciler/route/config", + "test", + ] + pruneopts = "" + revision = "6b50fe14278b845300fa87ff18542adc2a3f9161" + source = "github.com/openshift/knative-serving" [[projects]] digest = "1:523c3ac6cc9add7e48dc6677c7a58263aff6dd70036a2675417681fcde6818ad" @@ -973,6 +1332,17 @@ revision = "034c1b718821111eff15c2e199491c8ec5c51f3d" version = "v0.11.1" +[[projects]] + branch = "master" + digest = "1:043997c1e0120a3c74c981acf40438f54db2753a756e8896fc67fd9edb9ccea9" + name = "knative.dev/test-infra" + packages = [ + "scripts", + "tools/dep-collector", + ] + pruneopts = "UT" + revision = "fb304f6a7ac965fb3eb5cb16d5e0301ca60e3247" + [[projects]] digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" name = "sigs.k8s.io/yaml" @@ -985,10 +1355,6 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ - "github.com/knative/serving/pkg/apis/serving/v1beta1", - "github.com/knative/serving/pkg/client/clientset/versioned", - "github.com/knative/test-infra/scripts", - "github.com/knative/test-infra/tools/dep-collector", "github.com/openshift/api/route/v1", "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1", "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1", @@ -1011,6 +1377,11 @@ "knative.dev/serving-operator/pkg/apis/serving/v1alpha1", "knative.dev/serving-operator/pkg/client/clientset/versioned", "knative.dev/serving-operator/pkg/client/clientset/versioned/typed/serving/v1alpha1", + "knative.dev/serving/pkg/apis/serving/v1", + "knative.dev/serving/pkg/client/clientset/versioned", + "knative.dev/serving/test", + "knative.dev/test-infra/scripts", + "knative.dev/test-infra/tools/dep-collector", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/test/Gopkg.toml b/test/Gopkg.toml index 9c386c1d17..4933d1082f 100644 --- a/test/Gopkg.toml +++ b/test/Gopkg.toml @@ -1,7 +1,9 @@ # Force dep to vendor the code generators, which aren't imported just used at dev time. required = [ - "github.com/knative/test-infra/scripts", - "github.com/knative/test-infra/tools/dep-collector", + "knative.dev/test-infra/scripts", + "knative.dev/test-infra/tools/dep-collector", + # Force vendoring of the OpenShift KnativeServing tests + "knative.dev/serving/test", ] [prune] @@ -10,9 +12,17 @@ required = [ unused-packages = true [[prune.project]] - name = "github.com/knative/test-infra" + name = "knative.dev/test-infra" non-go = false + +# Override OpenShift KnativeServing settings to not prune. +[[prune.project]] + name = "knative.dev/serving" + go-tests = false + non-go = false + unused-packages = false + [[override]] name = "k8s.io/api" version = "kubernetes-1.15.3" @@ -39,11 +49,13 @@ required = [ [[constraint]] branch = "master" - name = "github.com/knative/test-infra" + name = "knative.dev/test-infra" -[[constraint]] - branch = "release-0.7" - name = "github.com/knative/serving" +# The following constraint causes incompatible versions, disallowing +# OpenShift Knative Serving being vendored. +# [[constraint]] +# branch = "release-0.7" +# name = "github.com/knative/serving" [[constraint]] branch = "master" @@ -56,3 +68,16 @@ required = [ [[ constraint ]] name = "github.com/openshift/client-go" version = "v3.9.0" + +# Check out the specifically supported OpenShift Knative Serving +# branch for testing +[[ constraint ]] + name = "knative.dev/serving" + source = "github.com/openshift/knative-serving" + branch = "release-v0.12.1" + +# Without explicitly using v0.12, the test package will fail to build with: +# knative.dev/pkg/metrics/stackdriver_exporter.go:70:3: unknown field 'GetMonitoredResource' in struct literal of type stackdriver.Options +[[override]] + name = "knative.dev/pkg" + branch = "release-0.12" diff --git a/test/clients.go b/test/clients.go index 5e8549090a..f19416160c 100644 --- a/test/clients.go +++ b/test/clients.go @@ -6,7 +6,6 @@ import ( "strings" "testing" - servingversioned "github.com/knative/serving/pkg/client/clientset/versioned" routev1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client" olmversioned "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" @@ -17,6 +16,7 @@ import ( aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" servingoperatorversioned "knative.dev/serving-operator/pkg/client/clientset/versioned" servingoperatorv1alpha1 "knative.dev/serving-operator/pkg/client/clientset/versioned/typed/serving/v1alpha1" + servingversioned "knative.dev/serving/pkg/client/clientset/versioned" ) // Context holds objects related to test execution diff --git a/test/lib.bash b/test/lib.bash index e0eaba7615..95604c2949 100644 --- a/test/lib.bash +++ b/test/lib.bash @@ -76,6 +76,13 @@ function remove_temporary_gopath { fi } +function pivot_to_knative_serving { + logger.info "Pivoting to Knative Serving Dir" + kn_serving_rootdir="$(pwd)/test/vendor/knative.dev/serving/" + logger.info "New path: $kn_serving_rootdir" + export KNATIVE_SERVING_HOME="$kn_serving_rootdir" +} + function checkout_knative_serving { local knative_version=$1 # Setup a temporary GOPATH to safely check out the repository without breaking other things. @@ -109,7 +116,7 @@ function run_knative_serving_e2e_and_conformance_tests { local knative_version=$1 if [[ -z ${KNATIVE_SERVING_HOME+x} ]]; then - checkout_knative_serving "$knative_version" + pivot_to_knative_serving "$knative_version" fi cd "$KNATIVE_SERVING_HOME" || return $? @@ -145,7 +152,7 @@ function run_knative_serving_rolling_upgrade_tests { rootdir="$(dirname "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")" if [[ -z ${KNATIVE_SERVING_HOME+x} ]]; then - checkout_knative_serving "$knative_version" + pivot_to_knative_serving "$knative_version" fi cd "$KNATIVE_SERVING_HOME" || return $? diff --git a/test/service.go b/test/service.go index e6f731429d..8d5c85de12 100644 --- a/test/service.go +++ b/test/service.go @@ -3,7 +3,6 @@ package test import ( "strings" - servingv1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" routev1 "github.com/openshift/api/route/v1" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" @@ -12,18 +11,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" ) -func Service(name, namespace, image string) *servingv1beta1.Service { - s := &servingv1beta1.Service{ +func Service(name, namespace, image string) *servingv1.Service { + s := &servingv1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: servingv1beta1.ServiceSpec{ - ConfigurationSpec: servingv1beta1.ConfigurationSpec{ - Template: servingv1beta1.RevisionTemplateSpec{ - Spec: servingv1beta1.RevisionSpec{ + Spec: servingv1.ServiceSpec{ + ConfigurationSpec: servingv1.ConfigurationSpec{ + Template: servingv1.RevisionTemplateSpec{ + Spec: servingv1.RevisionSpec{ PodSpec: corev1.PodSpec{ Containers: []corev1.Container{{ Image: image, @@ -42,7 +42,7 @@ func Service(name, namespace, image string) *servingv1beta1.Service { return s } -func WithServiceReady(ctx *Context, name, namespace, image string) (*servingv1beta1.Service, error) { +func WithServiceReady(ctx *Context, name, namespace, image string) (*servingv1.Service, error) { service, err := CreateService(ctx, name, namespace, image) if err != nil { return nil, err @@ -54,22 +54,22 @@ func WithServiceReady(ctx *Context, name, namespace, image string) (*servingv1be return service, nil } -func CreateService(ctx *Context, name, namespace, image string) (*servingv1beta1.Service, error) { - service, err := ctx.Clients.Serving.ServingV1beta1().Services(namespace).Create(Service(name, namespace, image)) +func CreateService(ctx *Context, name, namespace, image string) (*servingv1.Service, error) { + service, err := ctx.Clients.Serving.ServingV1().Services(namespace).Create(Service(name, namespace, image)) if err != nil { return nil, err } ctx.AddToCleanup(func() error { - return ctx.Clients.Serving.ServingV1beta1().Services(namespace).Delete(service.Name, &metav1.DeleteOptions{}) + return ctx.Clients.Serving.ServingV1().Services(namespace).Delete(service.Name, &metav1.DeleteOptions{}) }) return service, nil } -func WaitForServiceState(ctx *Context, name, namespace string, inState func(s *servingv1beta1.Service, err error) (bool, error)) (*servingv1beta1.Service, error) { - var lastState *servingv1beta1.Service +func WaitForServiceState(ctx *Context, name, namespace string, inState func(s *servingv1.Service, err error) (bool, error)) (*servingv1.Service, error) { + var lastState *servingv1.Service var err error waitErr := wait.PollImmediate(Interval, Timeout, func() (bool, error) { - lastState, err = ctx.Clients.Serving.ServingV1beta1().Services(namespace).Get(name, metav1.GetOptions{}) + lastState, err = ctx.Clients.Serving.ServingV1().Services(namespace).Get(name, metav1.GetOptions{}) return inState(lastState, err) }) @@ -101,7 +101,7 @@ func WaitForOperatorDepsDeleted(ctx *Context) error { return nil } -func IsServiceReady(s *servingv1beta1.Service, err error) (bool, error) { +func IsServiceReady(s *servingv1.Service, err error) (bool, error) { return s.Generation == s.Status.ObservedGeneration && s.Status.IsReady() && s.Status.URL != nil && s.Status.URL.Host != "", err } @@ -180,7 +180,7 @@ func CreateKubeService(ctx *Context, name, namespace string) (*corev1.Service, e } ctx.AddToCleanup(func() error { - return ctx.Clients.Serving.ServingV1beta1().Services(namespace).Delete(svc.Name, &metav1.DeleteOptions{}) + return ctx.Clients.Serving.ServingV1().Services(namespace).Delete(svc.Name, &metav1.DeleteOptions{}) }) return svc, nil diff --git a/test/vendor/github.com/knative/serving/LICENSE b/test/vendor/cloud.google.com/go/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/LICENSE rename to test/vendor/cloud.google.com/go/LICENSE diff --git a/test/vendor/cloud.google.com/go/compute/metadata/metadata.go b/test/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000000..9b1afb5cc2 --- /dev/null +++ b/test/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,524 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Email calls Client.Email on the default client. +func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return "", "", err + } + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// Email returns the email address associated with the service account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Email(serviceAccount string) (string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + return c.getTrimmed("instance/name") +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/test/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go b/test/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go new file mode 100644 index 0000000000..22978d3771 --- /dev/null +++ b/test/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go @@ -0,0 +1,676 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container + +import ( + "context" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + "google.golang.org/api/transport" + containerpb "google.golang.org/genproto/googleapis/container/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient. +type ClusterManagerCallOptions struct { + ListClusters []gax.CallOption + GetCluster []gax.CallOption + CreateCluster []gax.CallOption + UpdateCluster []gax.CallOption + UpdateNodePool []gax.CallOption + SetNodePoolAutoscaling []gax.CallOption + SetLoggingService []gax.CallOption + SetMonitoringService []gax.CallOption + SetAddonsConfig []gax.CallOption + SetLocations []gax.CallOption + UpdateMaster []gax.CallOption + SetMasterAuth []gax.CallOption + DeleteCluster []gax.CallOption + ListOperations []gax.CallOption + GetOperation []gax.CallOption + CancelOperation []gax.CallOption + GetServerConfig []gax.CallOption + ListNodePools []gax.CallOption + GetNodePool []gax.CallOption + CreateNodePool []gax.CallOption + DeleteNodePool []gax.CallOption + RollbackNodePoolUpgrade []gax.CallOption + SetNodePoolManagement []gax.CallOption + SetLabels []gax.CallOption + SetLegacyAbac []gax.CallOption + StartIPRotation []gax.CallOption + CompleteIPRotation []gax.CallOption + SetNodePoolSize []gax.CallOption + SetNetworkPolicy []gax.CallOption + SetMaintenancePolicy []gax.CallOption +} + +func defaultClusterManagerClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("container.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultClusterManagerCallOptions() *ClusterManagerCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ClusterManagerCallOptions{ + ListClusters: retry[[2]string{"default", "idempotent"}], + GetCluster: retry[[2]string{"default", "idempotent"}], + CreateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateNodePool: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}], + SetLoggingService: retry[[2]string{"default", "non_idempotent"}], + SetMonitoringService: retry[[2]string{"default", "non_idempotent"}], + SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}], + SetLocations: retry[[2]string{"default", "non_idempotent"}], + UpdateMaster: retry[[2]string{"default", "non_idempotent"}], + SetMasterAuth: retry[[2]string{"default", "non_idempotent"}], + DeleteCluster: retry[[2]string{"default", "idempotent"}], + ListOperations: retry[[2]string{"default", "idempotent"}], + GetOperation: retry[[2]string{"default", "idempotent"}], + CancelOperation: retry[[2]string{"default", "non_idempotent"}], + GetServerConfig: retry[[2]string{"default", "idempotent"}], + ListNodePools: retry[[2]string{"default", "idempotent"}], + GetNodePool: retry[[2]string{"default", "idempotent"}], + CreateNodePool: retry[[2]string{"default", "non_idempotent"}], + DeleteNodePool: retry[[2]string{"default", "idempotent"}], + RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}], + SetLabels: retry[[2]string{"default", "non_idempotent"}], + SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}], + StartIPRotation: retry[[2]string{"default", "non_idempotent"}], + CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}], + SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}], + SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ClusterManagerClient is a client for interacting with Google Container Engine API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type ClusterManagerClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + clusterManagerClient containerpb.ClusterManagerClient + + // The call options for this service. + CallOptions *ClusterManagerCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClusterManagerClient creates a new cluster manager client. +// +// Google Container Engine Cluster Manager v1 +func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ClusterManagerClient{ + conn: conn, + CallOptions: defaultClusterManagerCallOptions(), + + clusterManagerClient: containerpb.NewClusterManagerClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ClusterManagerClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ClusterManagerClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListClusters lists all clusters owned by a project in either the specified zone or all +// zones. +func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...) + var resp *containerpb.ListClustersResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetCluster gets the details of a specific cluster. +func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...) + var resp *containerpb.Cluster + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateCluster creates a cluster, consisting of the specified number and type of Google +// Compute Engine instances. +// +// By default, the cluster is created in the project's +// default network (at /compute/docs/networks-and-firewalls#networks). +// +// One firewall is added for the cluster. After cluster creation, +// the cluster creates routes for each node to allow the containers +// on that node to communicate with all other instances in the +// cluster. +// +// Finally, an entry is added to the project's global metadata indicating +// which CIDR range is being used by the cluster. +func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateCluster updates the settings of a specific cluster. +func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNodePool updates the version and/or image type of a specific node pool. +func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool. +func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLoggingService sets the logging service of a specific cluster. +func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMonitoringService sets the monitoring service of a specific cluster. +func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetAddonsConfig sets the addons of a specific cluster. +func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLocations sets the locations of a specific cluster. +func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateMaster updates the master of a specific cluster. +func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMasterAuth used to set master auth materials. Currently supports :- +// Changing the admin password of a specific cluster. +// This can be either via password generation or explicitly set the password. +func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker +// nodes. +// +// Firewalls and routes that were configured during cluster creation +// are also deleted. +// +// Other Google Compute Engine resources that might be in use by the cluster +// (e.g. load balancer resources) will not be deleted if they weren't present +// at the initial create time. +func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists all operations in a project in a specific zone or all zones. +func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) + var resp *containerpb.ListOperationsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetOperation gets the specified operation. +func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CancelOperation cancels the specified operation. +func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetServerConfig returns configuration info about the Container Engine service. +func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...) + var resp *containerpb.ServerConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNodePools lists the node pools for a cluster. +func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...) + var resp *containerpb.ListNodePoolsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetNodePool retrieves the node pool requested. +func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...) + var resp *containerpb.NodePool + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNodePool creates a node pool for a cluster. +func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNodePool deletes a node pool from a cluster. +func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade. +// This will be an no-op if the last upgrade successfully completed. +func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolManagement sets the NodeManagement options for a node pool. +func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLabels sets labels on a cluster. +func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster. +func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StartIPRotation start master IP rotation. +func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CompleteIPRotation completes master IP rotation. +func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolSize sets the size of a specific node pool. +func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNetworkPolicy enables/Disables Network Policy for a cluster. +func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMaintenancePolicy sets the maintenance policy for a cluster. +func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/test/vendor/cloud.google.com/go/container/apiv1/doc.go b/test/vendor/cloud.google.com/go/container/apiv1/doc.go new file mode 100644 index 0000000000..0f995054c2 --- /dev/null +++ b/test/vendor/cloud.google.com/go/container/apiv1/doc.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package container is an auto-generated package for the +// Google Container Engine API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Kubernetes Engine API is used for building and managing +// container +// based applications, powered by the open source Kubernetes technology. +package container // import "cloud.google.com/go/container/apiv1" + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/test/vendor/cloud.google.com/go/internal/version/version.go b/test/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 0000000000..6161440ea1 --- /dev/null +++ b/test/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20200212" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go new file mode 100644 index 0000000000..ddf14ca1a8 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go @@ -0,0 +1,318 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + return &AlertPolicyCallOptions{ + ListAlertPolicies: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetAlertPolicy: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateAlertPolicy: []gax.CallOption{}, + DeleteAlertPolicy: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateAlertPolicy: []gax.CallOption{}, + } +} + +// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type AlertPolicyClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewAlertPolicyClient creates a new alert policy service client. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Stackdriver Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be “unhealthy” and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the “Monitoring” tab in +// Cloud Console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultAlertPolicyClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &AlertPolicyClient{ + connPool: connPool, + CallOptions: defaultAlertPolicyCallOptions(), + + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListAlertPolicies lists the existing alerting policies for the project. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + var resp *monitoringpb.ListAlertPoliciesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.AlertPolicies, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateAlertPolicy creates a new alerting policy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteAlertPolicy deletes an alerting policy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/doc.go new file mode 100644 index 0000000000..4834741d41 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -0,0 +1,108 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Stackdriver Monitoring API. +// +// Manages your Stackdriver Monitoring data and configurations. Most projects +// must be associated with a Stackdriver account, with a few exceptions as +// noted on the individual method pages. The table entries below are +// presented in alphabetical order, not in order of common use. For +// explanations of the concepts found in the table entries, read the +// [Stackdriver Monitoring documentation](/monitoring/docs). +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit godoc.org/cloud.google.com/go. +package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "context" + "runtime" + "strings" + "unicode" + + "google.golang.org/grpc/metadata" +) + +const versionClient = "20200212" + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go new file mode 100644 index 0000000000..571e3dce1d --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -0,0 +1,432 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + return &GroupCallOptions{ + ListGroups: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetGroup: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateGroup: []gax.CallOption{}, + UpdateGroup: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteGroup: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListGroupMembers: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// GroupClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type GroupClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The call options for this service. + CallOptions *GroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewGroupClient creates a new group service client. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &GroupClient{ + connPool: connPool, + CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) + it := &GroupIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + var resp *monitoringpb.ListGroupsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + var resp *monitoringpb.ListGroupMembersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go new file mode 100644 index 0000000000..02962b7941 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -0,0 +1,544 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption +} + +func defaultMetricClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMonitoredResourceDescriptor: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListMetricDescriptors: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetMetricDescriptor: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateMetricDescriptor: []gax.CallOption{}, + DeleteMetricDescriptor: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTimeSeries: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateTimeSeries: []gax.CallOption{}, + } +} + +// MetricClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type MetricClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The call options for this service. + CallOptions *MetricCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricClient creates a new metric service client. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultMetricClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricClient{ + connPool: connPool, + CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + var resp *monitoringpb.ListMetricDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateMetricDescriptor creates a new metric descriptor. +// User-created metric descriptors define +// custom metrics (at /monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at /monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + var resp *monitoringpb.ListTimeSeriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go new file mode 100644 index 0000000000..32f5e12919 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go @@ -0,0 +1,545 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption + SendNotificationChannelVerificationCode []gax.CallOption + GetNotificationChannelVerificationCode []gax.CallOption + VerifyNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannelDescriptor: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListNotificationChannels: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetNotificationChannel: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateNotificationChannel: []gax.CallOption{}, + UpdateNotificationChannel: []gax.CallOption{}, + DeleteNotificationChannel: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + SendNotificationChannelVerificationCode: []gax.CallOption{}, + GetNotificationChannelVerificationCode: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + VerifyNotificationChannel: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type NotificationChannelClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewNotificationChannelClient creates a new notification channel service client. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultNotificationChannelClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &NotificationChannelClient{ + connPool: connPool, + CallOptions: defaultNotificationChannelCallOptions(), + + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + var resp *monitoringpb.ListNotificationChannelDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.ChannelDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + var resp *monitoringpb.ListNotificationChannelsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.NotificationChannels, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or PagerDuty service. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNotificationChannel deletes a notification channel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code +// can then be supplied in VerifyNotificationChannel to verify the channel. +func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.SendNotificationChannelVerificationCode[0:len(c.CallOptions.SendNotificationChannelVerificationCode):len(c.CallOptions.SendNotificationChannelVerificationCode)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.SendNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then +// be used in a call to VerifyNotificationChannel() on a different channel +// with an equivalent identity in the same or in a different project. This +// makes it possible to copy a channel between projects without requiring +// manual reverification of the channel. If the channel is not in the +// verified state, this method will fail (in other words, this may only be +// used if the SendNotificationChannelVerificationCode and +// VerifyNotificationChannel paths have already been used to put the given +// channel into the verified state). +// +// There is no guarantee that the verification codes returned by this method +// will be of a similar structure or form as the ones that are delivered +// to the channel via SendNotificationChannelVerificationCode; while +// VerifyNotificationChannel() will recognize both the codes delivered via +// SendNotificationChannelVerificationCode() and returned from +// GetNotificationChannelVerificationCode(), it is typically the case that +// the verification codes delivered via +// SendNotificationChannelVerificationCode() will be shorter and also +// have a shorter expiration (e.g. codes such as “G-123456”) whereas +// GetVerificationCode() will typically return a much longer, websafe base +// 64 encoded string that has a longer expiration time. +func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetNotificationChannelVerificationCode[0:len(c.CallOptions.GetNotificationChannelVerificationCode):len(c.CallOptions.GetNotificationChannelVerificationCode)], opts...) + var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelVerificationCode(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code +// delivered to the channel as a result of calling +// SendNotificationChannelVerificationCode. +func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.VerifyNotificationChannel[0:len(c.CallOptions.VerifyNotificationChannel):len(c.CallOptions.VerifyNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.VerifyNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go new file mode 100644 index 0000000000..b2b514ba52 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go @@ -0,0 +1,107 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +// GroupProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func GroupProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// GroupGroupPath returns the path for the group resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/groups/%s", project, group) +// instead. +func GroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" +} + +// MetricProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func MetricProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor) +// instead. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + return "" + + "projects/" + + project + + "/metricDescriptors/" + + metricDescriptor + + "" +} + +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor) +// instead. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + return "" + + "projects/" + + project + + "/monitoredResourceDescriptors/" + + monitoredResourceDescriptor + + "" +} + +// UptimeCheckProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func UptimeCheckProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig) +// instead. +func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string { + return "" + + "projects/" + + project + + "/uptimeCheckConfigs/" + + uptimeCheckConfig + + "" +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go new file mode 100644 index 0000000000..84de580f36 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/service_monitoring_client.go @@ -0,0 +1,505 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient. +type ServiceMonitoringCallOptions struct { + CreateService []gax.CallOption + GetService []gax.CallOption + ListServices []gax.CallOption + UpdateService []gax.CallOption + DeleteService []gax.CallOption + CreateServiceLevelObjective []gax.CallOption + GetServiceLevelObjective []gax.CallOption + ListServiceLevelObjectives []gax.CallOption + UpdateServiceLevelObjective []gax.CallOption + DeleteServiceLevelObjective []gax.CallOption +} + +func defaultServiceMonitoringClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions { + return &ServiceMonitoringCallOptions{ + CreateService: []gax.CallOption{}, + GetService: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServices: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateService: []gax.CallOption{}, + DeleteService: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateServiceLevelObjective: []gax.CallOption{}, + GetServiceLevelObjective: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListServiceLevelObjectives: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateServiceLevelObjective: []gax.CallOption{}, + DeleteServiceLevelObjective: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// ServiceMonitoringClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type ServiceMonitoringClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient + + // The call options for this service. + CallOptions *ServiceMonitoringCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewServiceMonitoringClient creates a new service monitoring service client. +// +// The Stackdriver Monitoring Service-Oriented Monitoring API has endpoints for +// managing and querying aspects of a workspace’s services. These include the +// Service's monitored resources, its Service-Level Objectives, and a taxonomy +// of categorized Health Metrics. +func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultServiceMonitoringClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ServiceMonitoringClient{ + connPool: connPool, + CallOptions: defaultServiceMonitoringCallOptions(), + + serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ServiceMonitoringClient) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// CreateService create a Service. +func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateService[0:len(c.CallOptions.CreateService):len(c.CallOptions.CreateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetService get the named Service. +func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetService[0:len(c.CallOptions.GetService):len(c.CallOptions.GetService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListServices list Services for this workspace. +func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListServices[0:len(c.CallOptions.ListServices):len(c.CallOptions.ListServices)], opts...) + it := &ServiceIterator{} + req = proto.Clone(req).(*monitoringpb.ListServicesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) { + var resp *monitoringpb.ListServicesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServices(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Services, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// UpdateService update this Service. +func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateService[0:len(c.CallOptions.UpdateService):len(c.CallOptions.UpdateService)], opts...) + var resp *monitoringpb.Service + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteService soft delete this Service. +func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteService[0:len(c.CallOptions.DeleteService):len(c.CallOptions.DeleteService)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteService(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service. +func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateServiceLevelObjective[0:len(c.CallOptions.CreateServiceLevelObjective):len(c.CallOptions.CreateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.CreateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetServiceLevelObjective get a ServiceLevelObjective by name. +func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetServiceLevelObjective[0:len(c.CallOptions.GetServiceLevelObjective):len(c.CallOptions.GetServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.GetServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service. +func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListServiceLevelObjectives[0:len(c.CallOptions.ListServiceLevelObjectives):len(c.CallOptions.ListServiceLevelObjectives)], opts...) + it := &ServiceLevelObjectiveIterator{} + req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) { + var resp *monitoringpb.ListServiceLevelObjectivesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.ListServiceLevelObjectives(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.ServiceLevelObjectives, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// UpdateServiceLevelObjective update the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateServiceLevelObjective[0:len(c.CallOptions.UpdateServiceLevelObjective):len(c.CallOptions.UpdateServiceLevelObjective)], opts...) + var resp *monitoringpb.ServiceLevelObjective + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.serviceMonitoringClient.UpdateServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceLevelObjective delete the given ServiceLevelObjective. +func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteServiceLevelObjective[0:len(c.CallOptions.DeleteServiceLevelObjective):len(c.CallOptions.DeleteServiceLevelObjective)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.serviceMonitoringClient.DeleteServiceLevelObjective(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ServiceIterator manages a stream of *monitoringpb.Service. +type ServiceIterator struct { + items []*monitoringpb.Service + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceIterator) Next() (*monitoringpb.Service, error) { + var item *monitoringpb.Service + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective. +type ServiceLevelObjectiveIterator struct { + items []*monitoringpb.ServiceLevelObjective + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) { + var item *monitoringpb.ServiceLevelObjective + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceLevelObjectiveIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/test/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/test/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go new file mode 100644 index 0000000000..f0477a1d28 --- /dev/null +++ b/test/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go @@ -0,0 +1,420 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package monitoring + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetUptimeCheckConfig: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateUptimeCheckConfig: []gax.CallOption{}, + UpdateUptimeCheckConfig: []gax.CallOption{}, + DeleteUptimeCheckConfig: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListUptimeCheckIps: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 30000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// UptimeCheckClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type UptimeCheckClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewUptimeCheckClient creates a new uptime check service client. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// Uptime check configurations in the Stackdriver Monitoring product. An Uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud Console] +// (http://console.cloud.google.com (at http://console.cloud.google.com)), selecting the appropriate project, +// clicking on “Monitoring” on the left-hand side to navigate to Stackdriver, +// and then clicking on “Uptime”. +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultUptimeCheckClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &UptimeCheckClient{ + connPool: connPool, + CallOptions: defaultUptimeCheckCallOptions(), + + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project +// (leaving out any invalid configurations). +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + var resp *monitoringpb.ListUptimeCheckConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.UptimeCheckConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// GetUptimeCheckConfig gets a single Uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateUptimeCheckConfig creates a new Uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via updateMask. +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail +// if the Uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListUptimeCheckIps returns the list of IP addresses that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + var resp *monitoringpb.ListUptimeCheckIpsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.UptimeCheckIps, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/test/vendor/cloud.google.com/go/trace/apiv2/doc.go b/test/vendor/cloud.google.com/go/trace/apiv2/doc.go new file mode 100644 index 0000000000..bae32555d0 --- /dev/null +++ b/test/vendor/cloud.google.com/go/trace/apiv2/doc.go @@ -0,0 +1,105 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is collected for all App Engine applications by default. Trace data from +// other applications can be provided using this API. This library is used to +// interact with the Trace API directly. If you are looking to instrument +// your application for Stackdriver Trace, we recommend using OpenCensus. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit godoc.org/cloud.google.com/go. +package trace // import "cloud.google.com/go/trace/apiv2" + +import ( + "context" + "runtime" + "strings" + "unicode" + + "google.golang.org/grpc/metadata" +) + +const versionClient = "20200212" + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/test/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/test/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go new file mode 100644 index 0000000000..80b8d40b58 --- /dev/null +++ b/test/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// ProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SpanPath returns the path for the span resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span) +// instead. +func SpanPath(project, trace, span string) string { + return "" + + "projects/" + + project + + "/traces/" + + trace + + "/spans/" + + span + + "" +} diff --git a/test/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/test/vendor/cloud.google.com/go/trace/apiv2/trace_client.go new file mode 100644 index 0000000000..54713c7ad4 --- /dev/null +++ b/test/vendor/cloud.google.com/go/trace/apiv2/trace_client.go @@ -0,0 +1,160 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package trace + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + BatchWriteSpans []gax.CallOption + CreateSpan []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultCallOptions() *CallOptions { + return &CallOptions{ + BatchWriteSpans: []gax.CallOption{}, + CreateSpan: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.20, + }) + }), + }, + } +} + +// Client is a client for interacting with Stackdriver Trace API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type Client struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. A single trace may +// contain span(s) from multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + connPool: connPool, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(connPool), + } + c.setGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *Client) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.connPool.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchWriteSpans sends new spans to new or existing traces. You cannot update +// existing spans. +func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateSpan creates a new span. +func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...) + var resp *cloudtracepb.Span + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/test/vendor/github.com/knative/pkg/LICENSE b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE similarity index 100% rename from test/vendor/github.com/knative/pkg/LICENSE rename to test/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE diff --git a/test/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go new file mode 100644 index 0000000000..297e44b6e7 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math/rand" + "time" +) + +var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) + +// retries function fn upto n times, if fn returns an error lest it returns nil early. +// It applies exponential backoff in units of (1< 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + traceExporter, err := traceSvcClient.Export(ctx) + if err != nil { + return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) + } + + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := traceExporter.Send(firstTraceMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + ae.mu.Lock() + ae.traceExporter = traceExporter + ae.mu.Unlock() + + // Initiate the config service by sending over node identifier info. + configStream, err := traceSvcClient.Config(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) + } + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} + if err := configStream.Send(firstCfgMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + // In the background, handle trace configurations that are beamed down + // by the agent, but also reply to it with the applied configuration. + go ae.handleConfigStreaming(configStream) + + return nil +} + +func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { + metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) + metricsExporter, err := metricsSvcClient.Export(context.Background()) + if err != nil { + return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) + } + // Initiate the metrics service by sending over the first message just containing the Node and Resource. + firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := metricsExporter.Send(firstMetricsMessage); err != nil { + return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) + } + + ae.mu.Lock() + ae.metricsExporter = metricsExporter + ae.mu.Unlock() + + // With that we are good to go and can start sending metrics + return nil +} + +func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { + addr := ae.prepareAgentAddress() + var dialOpts []grpc.DialOption + if ae.clientTransportCredentials != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) + } else if ae.canDialInsecure { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } + if ae.compressor != "" { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) + } + dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + if len(ae.grpcDialOptions) != 0 { + dialOpts = append(dialOpts, ae.grpcDialOptions...) + } + + ctx := context.Background() + if len(ae.headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + return grpc.DialContext(ctx, addr, dialOpts...) +} + +func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { + // Note: We haven't yet implemented configuration sending so we + // should NOT be changing connection states within this function for now. + for { + recv, err := configStream.Recv() + if err != nil { + // TODO: Check if this is a transient error or exponential backoff-able. + return err + } + cfg := recv.Config + if cfg == nil { + continue + } + + // Otherwise now apply the trace configuration sent down from the agent + if psamp := cfg.GetProbabilitySampler(); psamp != nil { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) + } else if csamp := cfg.GetConstantSampler(); csamp != nil { + alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON + if alwaysSample { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + } else { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + } + } else { // TODO: Add the rate limiting sampler here + } + + // Then finally send back to upstream the newly applied configuration + err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) + if err != nil { + return err + } + } +} + +// Stop shuts down all the connections and resources +// related to the exporter. +func (ae *Exporter) Stop() error { + ae.mu.RLock() + cc := ae.grpcClientConn + started := ae.started + stopped := ae.stopped + ae.mu.RUnlock() + + if !started { + return errNotStarted + } + if stopped { + // TODO: tell the user that we've already stopped, so perhaps a sentinel error? + return nil + } + + ae.Flush() + + // Now close the underlying gRPC connection. + var err error + if cc != nil { + err = cc.Close() + } + + // At this point we can change the state variables: started and stopped + ae.mu.Lock() + ae.started = false + ae.stopped = true + ae.mu.Unlock() + close(ae.stopCh) + + // Ensure that the backgroundConnector returns + <-ae.backgroundConnectionDoneCh + + return err +} + +func (ae *Exporter) ExportSpan(sd *trace.SpanData) { + if sd == nil { + return + } + _ = ae.traceBundler.Add(sd, 1) +} + +func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { + if batch == nil || len(batch.Spans) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil { + return fmt.Errorf("ExportTraceServiceRequest: no active connection, last connection error: %v", lastConnectErr) + } + + ae.senderMu.Lock() + err := ae.traceExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + if err == io.EOF { + ae.recvMu.Lock() + // Perform a .Recv to try to find out why the RPC actually ended. + // See: + // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100 + // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ + for { + _, err = ae.traceExporter.Recv() + if err != nil { + break + } + } + ae.recvMu.Unlock() + } + + ae.setStateDisconnected(err) + if err != io.EOF { + return err + } + } + return nil + } +} + +func (ae *Exporter) ExportView(vd *view.Data) { + if vd == nil { + return + } + _ = ae.viewDataBundler.Add(vd, 1) +} + +// ExportMetricsServiceRequest sends proto metrics with the metrics service client. +func (ae *Exporter) ExportMetricsServiceRequest(batch *agentmetricspb.ExportMetricsServiceRequest) error { + if batch == nil || len(batch.Metrics) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil { + return fmt.Errorf("ExportMetricsServiceRequest: no active connection, last connection error: %v", lastConnectErr) + } + + ae.senderMu.Lock() + err := ae.metricsExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + if err == io.EOF { + ae.recvMu.Lock() + // Perform a .Recv to try to find out why the RPC actually ended. + // See: + // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100 + // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ + for { + _, err = ae.metricsExporter.Recv() + if err != nil { + break + } + } + ae.recvMu.Unlock() + } + + ae.setStateDisconnected(err) + if err != io.EOF { + return err + } + } + return nil + } +} + +func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { + if len(sdl) == 0 { + return nil + } + protoSpans := make([]*tracepb.Span, 0, len(sdl)) + for _, sd := range sdl { + if sd != nil { + protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) + } + } + return protoSpans +} + +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoSpans := ocSpanDataToPbSpans(sdl) + if len(protoSpans) == 0 { + return + } + ae.senderMu.Lock() + err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + Spans: protoSpans, + Resource: resourceProtoFromEnv(), + }) + ae.senderMu.Unlock() + if err != nil { + ae.setStateDisconnected(err) + } + } +} + +func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { + if len(vdl) == 0 { + return nil + } + metrics := make([]*metricspb.Metric, 0, len(vdl)) + for _, vd := range vdl { + if vd != nil { + vmetric, err := viewDataToMetric(vd) + // TODO: (@odeke-em) somehow report this error, if it is non-nil. + if err == nil && vmetric != nil { + metrics = append(metrics, vmetric) + } + } + } + return metrics +} + +func (ae *Exporter) uploadViewData(vdl []*view.Data) { + protoMetrics := ocViewDataToPbMetrics(vdl) + if len(protoMetrics) == 0 { + return + } + req := &agentmetricspb.ExportMetricsServiceRequest{ + Metrics: protoMetrics, + Resource: resourceProtoFromEnv(), + // TODO:(@odeke-em) + // a) Figure out how to derive a Node from the environment + // or better letting users of the exporter configure it. + } + ae.ExportMetricsServiceRequest(req) +} + +func (ae *Exporter) Flush() { + ae.traceBundler.Flush() + ae.viewDataBundler.Flush() +} + +func resourceProtoFromEnv() *resourcepb.Resource { + rs, _ := resource.FromEnv(context.Background()) + if rs == nil { + return nil + } + return resourceToResourcePb(rs) +} + +func resourceToResourcePb(rs *resource.Resource) *resourcepb.Resource { + rprs := &resourcepb.Resource{ + Type: rs.Type, + } + if rs.Labels != nil { + rprs.Labels = make(map[string]string) + for k, v := range rs.Labels { + rprs.Labels[k] = v + } + } + return rprs +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go new file mode 100644 index 0000000000..6820216f3b --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -0,0 +1,161 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "time" + + "go.opencensus.io/resource" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +const ( + DefaultAgentPort uint16 = 55678 + DefaultAgentHost string = "localhost" +) + +type ExporterOption interface { + withExporter(e *Exporter) +} + +type resourceDetector resource.Detector + +var _ ExporterOption = (*resourceDetector)(nil) + +func (rd resourceDetector) withExporter(e *Exporter) { + e.resourceDetector = resource.Detector(rd) +} + +// WithResourceDetector allows one to register a resource detector. Resource Detector is used +// to detect resources associated with the application. Detected resource is exported +// along with the metrics. If the detector fails then it panics. +// If a resource detector is not provided then by default it detects from the environment. +func WithResourceDetector(rd resource.Detector) ExporterOption { + return resourceDetector(rd) +} + +type insecureGrpcConnection int + +var _ ExporterOption = (*insecureGrpcConnection)(nil) + +func (igc *insecureGrpcConnection) withExporter(e *Exporter) { + e.canDialInsecure = true +} + +// WithInsecure disables client transport security for the exporter's gRPC connection +// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure +// does. Note, by default, client security is required unless WithInsecure is used. +func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } + +type addressSetter string + +func (as addressSetter) withExporter(e *Exporter) { + e.agentAddress = string(as) +} + +var _ ExporterOption = (*addressSetter)(nil) + +// WithAddress allows one to set the address that the exporter will +// connect to the agent on. If unset, it will instead try to use +// connect to DefaultAgentHost:DefaultAgentPort +func WithAddress(addr string) ExporterOption { + return addressSetter(addr) +} + +type serviceNameSetter string + +func (sns serviceNameSetter) withExporter(e *Exporter) { + e.serviceName = string(sns) +} + +var _ ExporterOption = (*serviceNameSetter)(nil) + +// WithServiceName allows one to set/override the service name +// that the exporter will report to the agent. +func WithServiceName(serviceName string) ExporterOption { + return serviceNameSetter(serviceName) +} + +type reconnectionPeriod time.Duration + +func (rp reconnectionPeriod) withExporter(e *Exporter) { + e.reconnectionPeriod = time.Duration(rp) +} + +func WithReconnectionPeriod(rp time.Duration) ExporterOption { + return reconnectionPeriod(rp) +} + +type compressorSetter string + +func (c compressorSetter) withExporter(e *Exporter) { + e.compressor = string(c) +} + +// UseCompressor will set the compressor for the gRPC client to use when sending requests. +// It is the responsibility of the caller to ensure that the compressor set has been registered +// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some +// compressors auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"` +func UseCompressor(compressorName string) ExporterOption { + return compressorSetter(compressorName) +} + +type headerSetter map[string]string + +func (h headerSetter) withExporter(e *Exporter) { + e.headers = map[string]string(h) +} + +// WithHeaders will send the provided headers when the gRPC stream connection +// is instantiated +func WithHeaders(headers map[string]string) ExporterOption { + return headerSetter(headers) +} + +type clientCredentials struct { + credentials.TransportCredentials +} + +var _ ExporterOption = (*clientCredentials)(nil) + +// WithTLSCredentials allows the connection to use TLS credentials +// when talking to the server. It takes in grpc.TransportCredentials instead +// of say a Certificate file or a tls.Certificate, because the retrieving +// these credentials can be done in many ways e.g. plain file, in code tls.Config +// or by certificate rotation, so it is up to the caller to decide what to use. +func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { + return &clientCredentials{TransportCredentials: creds} +} + +func (cc *clientCredentials) withExporter(e *Exporter) { + e.clientTransportCredentials = cc.TransportCredentials +} + +type grpcDialOptions []grpc.DialOption + +var _ ExporterOption = (*grpcDialOptions)(nil) + +// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts +// with some other configuration the GRPC specified via the agent the ones here will +// take preference since they are set last. +func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption { + return grpcDialOptions(opts) +} + +func (opts grpcDialOptions) withExporter(e *Exporter) { + e.grpcDialOptions = opts +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go new file mode 100644 index 0000000000..983ebe7b70 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -0,0 +1,248 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math" + "time" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 +) + +func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { + if sd == nil { + return nil + } + var namePtr *tracepb.TruncatableString + if sd.Name != "" { + namePtr = &tracepb.TruncatableString{Value: sd.Name} + } + return &tracepb.Span{ + TraceId: sd.TraceID[:], + SpanId: sd.SpanID[:], + ParentSpanId: sd.ParentSpanID[:], + Status: ocStatusToProtoStatus(sd.Status), + StartTime: timeToTimestamp(sd.StartTime), + EndTime: timeToTimestamp(sd.EndTime), + Links: ocLinksToProtoLinks(sd.Links), + Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), + Name: namePtr, + Attributes: ocAttributesToProtoAttributes(sd.Attributes), + TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), + Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), + } +} + +var blankStatus trace.Status + +func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { + if status == blankStatus { + return nil + } + return &tracepb.Status{ + Code: status.Code, + Message: status.Message, + } +} + +func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, ocLink := range links { + // This redefinition is necessary to prevent ocLink.*ID[:] copies + // being reused -- in short we need a new ocLink per iteration. + ocLink := ocLink + + sl = append(sl, &tracepb.Span_Link{ + TraceId: ocLink.TraceID[:], + SpanId: ocLink.SpanID[:], + Type: ocLinkTypeToProtoLinkType(ocLink.Type), + }) + } + + return &tracepb.Span_Links{ + Link: sl, + } +} + +func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { + switch oct { + case trace.LinkTypeChild: + return tracepb.Span_Link_CHILD_LINKED_SPAN + case trace.LinkTypeParent: + return tracepb.Span_Link_PARENT_LINKED_SPAN + default: + return tracepb.Span_Link_TYPE_UNSPECIFIED + } +} + +func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { + if len(attrs) == 0 { + return nil + } + outMap := make(map[string]*tracepb.AttributeValue) + for k, v := range attrs { + switch v := v.(type) { + case bool: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} + + case int: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} + + case int64: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} + + case string: + outMap[k] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: v}, + }, + } + } + } + return &tracepb.Span_Attributes{ + AttributeMap: outMap, + } +} + +// This code is mostly copied from +// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 +func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { + if len(as) == 0 && len(es) == 0 { + return nil + } + + timeEvents := &tracepb.Span_TimeEvents{} + var annotations, droppedAnnotationsCount int + var messageEvents, droppedMessageEventsCount int + + // Transform annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotations++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(a.Time), + Value: transformAnnotationToTimeEvent(&a), + }, + ) + } + + // Transform message events + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(e.Time), + Value: transformMessageEventToTimeEvent(&e), + }, + ) + } + + // Process dropped counter + timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + + return timeEvents +} + +func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { + return &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: a.Message}, + Attributes: ocAttributesToProtoAttributes(a.Attributes), + }, + } +} + +func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { + return &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: uint64(e.MessageID), + UncompressedSize: uint64(e.UncompressedByteSize), + CompressedSize: uint64(e.CompressedByteSize), + }, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} + +func timeToTimestamp(t time.Time) *timestamp.Timestamp { + nanoTime := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: nanoTime / 1e9, + Nanos: int32(nanoTime % 1e9), + } +} + +func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindClient: + return tracepb.Span_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SERVER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} + +func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { + if ts == nil { + return nil + } + return &tracepb.Span_Tracestate{ + Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), + } +} + +func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { + protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) + for _, entry := range entries { + protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ + Key: entry.Key, + Value: entry.Value, + }) + } + return protoEntries +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go new file mode 100644 index 0000000000..43f18dec19 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go @@ -0,0 +1,274 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "errors" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/golang/protobuf/ptypes/timestamp" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" +) + +var ( + errNilMeasure = errors.New("expecting a non-nil stats.Measure") + errNilView = errors.New("expecting a non-nil view.View") + errNilViewData = errors.New("expecting a non-nil view.Data") +) + +func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { + if vd == nil { + return nil, errNilViewData + } + + descriptor, err := viewToMetricDescriptor(vd.View) + if err != nil { + return nil, err + } + + timeseries, err := viewDataToTimeseries(vd) + if err != nil { + return nil, err + } + + metric := &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: timeseries, + } + return metric, nil +} + +func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { + if v == nil { + return nil, errNilView + } + if v.Measure == nil { + return nil, errNilMeasure + } + + desc := &metricspb.MetricDescriptor{ + Name: stringOrCall(v.Name, v.Measure.Name), + Description: stringOrCall(v.Description, v.Measure.Description), + Unit: v.Measure.Unit(), + Type: aggregationToMetricDescriptorType(v), + LabelKeys: tagKeysToLabelKeys(v.TagKeys), + } + return desc, nil +} + +func stringOrCall(first string, call func() string) string { + if first != "" { + return first + } + return call() +} + +type measureType uint + +const ( + measureUnknown measureType = iota + measureInt64 + measureFloat64 +) + +func measureTypeFromMeasure(m stats.Measure) measureType { + switch m.(type) { + default: + return measureUnknown + case *stats.Float64Measure: + return measureFloat64 + case *stats.Int64Measure: + return measureInt64 + } +} + +func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { + if v == nil || v.Aggregation == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + if v.Measure == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + + switch v.Aggregation.Type { + case view.AggTypeCount: + // Cumulative on int64 + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + + case view.AggTypeDistribution: + // Cumulative types + return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + + case view.AggTypeLastValue: + // Gauge types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_GAUGE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_GAUGE_INT64 + } + + case view.AggTypeSum: + // Cumulative types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + } + } + + // For all other cases, return unspecified. + return metricspb.MetricDescriptor_UNSPECIFIED +} + +func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { + labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) + for _, tagKey := range tagKeys { + labelKeys = append(labelKeys, &metricspb.LabelKey{ + Key: tagKey.Name(), + }) + } + return labelKeys +} + +func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { + if vd == nil || len(vd.Rows) == 0 { + return nil, nil + } + + // Given that view.Data only contains Start, End + // the timestamps for all the row data will be the exact same + // per aggregation. However, the values will differ. + // Each row has its own tags. + startTimestamp := timeToProtoTimestamp(vd.Start) + endTimestamp := timeToProtoTimestamp(vd.End) + + mType := measureTypeFromMeasure(vd.View.Measure) + timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) + // It is imperative that the ordering of "LabelValues" matches those + // of the Label keys in the metric descriptor. + for _, row := range vd.Rows { + labelValues := labelValuesFromTags(row.Tags) + point := rowToPoint(vd.View, row, endTimestamp, mType) + timeseries = append(timeseries, &metricspb.TimeSeries{ + StartTimestamp: startTimestamp, + LabelValues: labelValues, + Points: []*metricspb.Point{point}, + }) + } + + if len(timeseries) == 0 { + return nil, nil + } + + return timeseries, nil +} + +func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { + unixNano := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: int64(unixNano / 1e9), + Nanos: int32(unixNano % 1e9), + } +} + +func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { + pt := &metricspb.Point{ + Timestamp: endTimestamp, + } + + switch data := row.Data.(type) { + case *view.CountData: + pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} + + case *view.DistributionData: + pt.Value = &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + Count: data.Count, + Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count + // TODO: Add Exemplar + Buckets: bucketsToProtoBuckets(data.CountPerBucket), + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: v.Aggregation.Buckets, + }, + }, + }, + SumOfSquaredDeviation: data.SumOfSquaredDev, + }} + + case *view.LastValueData: + setPointValue(pt, data.Value, mType) + + case *view.SumData: + setPointValue(pt, data.Value, mType) + } + + return pt +} + +// Not returning anything from this function because metricspb.Point.is_Value is an unexported +// interface hence we just have to set its value by pointer. +func setPointValue(pt *metricspb.Point, value float64, mType measureType) { + if mType == measureInt64 { + pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} + } else { + pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} + } +} + +func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { + distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) + for i := 0; i < len(countPerBucket); i++ { + count := countPerBucket[i] + + distBuckets[i] = &metricspb.DistributionValue_Bucket{ + Count: count, + } + } + + return distBuckets +} + +func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { + if len(tags) == 0 { + return nil + } + + labelValues := make([]*metricspb.LabelValue, 0, len(tags)) + for _, tag_ := range tags { + labelValues = append(labelValues, &metricspb.LabelValue{ + Value: tag_.Value, + + // It is imperative that we set the "HasValue" attribute, + // in order to distinguish missing a label from the empty string. + // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue + // + // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, + // so the best case that we can use to distinguish missing labels/tags from the + // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have + // a value. + HasValue: tag_.Key.Name() != "", + }) + } + return labelValues +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go new file mode 100644 index 0000000000..68be4c75bd --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go @@ -0,0 +1,17 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const Version = "0.0.1" diff --git a/test/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE b/test/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go b/test/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go new file mode 100644 index 0000000000..59ce1c0a3d --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/prometheus/prometheus.go @@ -0,0 +1,277 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus contains a Prometheus exporter that supports exporting +// OpenCensus views as Prometheus metrics. +package prometheus // import "contrib.go.opencensus.io/exporter/prometheus" + +import ( + "fmt" + "log" + "net/http" + "sync" + + "context" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/stats/view" +) + +// Exporter exports stats to Prometheus, users need +// to register the exporter as an http.Handler to be +// able to export. +type Exporter struct { + opts Options + g prometheus.Gatherer + c *collector + handler http.Handler +} + +// Options contains options for configuring the exporter. +type Options struct { + Namespace string + Registry *prometheus.Registry + OnError func(err error) + ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views. +} + +// NewExporter returns an exporter that exports stats to Prometheus. +func NewExporter(o Options) (*Exporter, error) { + if o.Registry == nil { + o.Registry = prometheus.NewRegistry() + } + collector := newCollector(o, o.Registry) + e := &Exporter{ + opts: o, + g: o.Registry, + c: collector, + handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), + } + collector.ensureRegisteredOnce() + + return e, nil +} + +var _ http.Handler = (*Exporter)(nil) + +// ensureRegisteredOnce invokes reg.Register on the collector itself +// exactly once to ensure that we don't get errors such as +// cannot register the collector: descriptor Desc{fqName: *} +// already exists with the same fully-qualified name and const label values +// which is documented by Prometheus at +// https://github.com/prometheus/client_golang/blob/fcc130e101e76c5d303513d0e28f4b6d732845c7/prometheus/registry.go#L89-L101 +func (c *collector) ensureRegisteredOnce() { + c.registerOnce.Do(func() { + if err := c.reg.Register(c); err != nil { + c.opts.onError(fmt.Errorf("cannot register the collector: %v", err)) + } + }) + +} + +func (o *Options) onError(err error) { + if o.OnError != nil { + o.OnError(err) + } else { + log.Printf("Failed to export to Prometheus: %v", err) + } +} + +// ExportView exports to the Prometheus if view data has one or more rows. +// Each OpenCensus AggregationData will be converted to +// corresponding Prometheus Metric: SumData will be converted +// to Untyped Metric, CountData will be a Counter Metric, +// DistributionData will be a Histogram Metric. +// Deprecated in lieu of metricexport.Reader interface. +func (e *Exporter) ExportView(vd *view.Data) { +} + +// ServeHTTP serves the Prometheus endpoint. +func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { + e.handler.ServeHTTP(w, r) +} + +// collector implements prometheus.Collector +type collector struct { + opts Options + mu sync.Mutex // mu guards all the fields. + + registerOnce sync.Once + + // reg helps collector register views dynamically. + reg *prometheus.Registry + + // reader reads metrics from all registered producers. + reader *metricexport.Reader +} + +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + de := &descExporter{c: c, descCh: ch} + c.reader.ReadAndExport(de) +} + +// Collect fetches the statistics from OpenCensus +// and delivers them as Prometheus Metrics. +// Collect is invoked every time a prometheus.Gatherer is run +// for example when the HTTP endpoint is invoked by Prometheus. +func (c *collector) Collect(ch chan<- prometheus.Metric) { + me := &metricExporter{c: c, metricCh: ch} + c.reader.ReadAndExport(me) +} + +func newCollector(opts Options, registrar *prometheus.Registry) *collector { + return &collector{ + reg: registrar, + opts: opts, + reader: metricexport.NewReader()} +} + +func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { + return prometheus.NewDesc( + metricName(c.opts.Namespace, metric), + metric.Descriptor.Description, + toPromLabels(metric.Descriptor.LabelKeys), + c.opts.ConstLabels) +} + +type metricExporter struct { + c *collector + metricCh chan<- prometheus.Metric +} + +// ExportMetrics exports to the Prometheus. +// Each OpenCensus Metric will be converted to +// corresponding Prometheus Metric: +// TypeCumulativeInt64 and TypeCumulativeFloat64 will be a Counter Metric, +// TypeCumulativeDistribution will be a Histogram Metric. +// TypeGaugeFloat64 and TypeGaugeInt64 will be a Gauge Metric +func (me *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + for _, ts := range metric.TimeSeries { + tvs := toLabelValues(ts.LabelValues) + for _, point := range ts.Points { + metric, err := toPromMetric(desc, metric, point, tvs) + if err != nil { + me.c.opts.onError(err) + } else if metric != nil { + me.metricCh <- metric + } + } + } + } + return nil +} + +type descExporter struct { + c *collector + descCh chan<- *prometheus.Desc +} + +// ExportMetrics exports descriptor to the Prometheus. +// It is invoked when request to scrape descriptors is received. +func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + me.descCh <- desc + } + return nil +} + +func toPromLabels(mls []metricdata.LabelKey) (labels []string) { + for _, ml := range mls { + labels = append(labels, sanitize(ml.Key)) + } + return labels +} + +func metricName(namespace string, m *metricdata.Metric) string { + var name string + if namespace != "" { + name = namespace + "_" + } + return name + sanitize(m.Descriptor.Name) +} + +func toPromMetric( + desc *prometheus.Desc, + metric *metricdata.Metric, + point metricdata.Point, + labelValues []string) (prometheus.Metric, error) { + switch metric.Descriptor.Type { + case metricdata.TypeCumulativeFloat64, metricdata.TypeCumulativeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.CounterValue, pv, labelValues...) + + case metricdata.TypeGaugeFloat64, metricdata.TypeGaugeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, pv, labelValues...) + + case metricdata.TypeCumulativeDistribution: + switch v := point.Value.(type) { + case *metricdata.Distribution: + points := make(map[float64]uint64) + // Histograms are cumulative in Prometheus. + // Get cumulative bucket counts. + cumCount := uint64(0) + for i, b := range v.BucketOptions.Bounds { + cumCount += uint64(v.Buckets[i].Count) + points[b] = cumCount + } + return prometheus.NewConstHistogram(desc, uint64(v.Count), v.Sum, points, labelValues...) + default: + return nil, typeMismatchError(point) + } + case metricdata.TypeSummary: + // TODO: [rghetia] add support for TypeSummary. + return nil, nil + default: + return nil, fmt.Errorf("aggregation %T is not yet supported", metric.Descriptor.Type) + } +} + +func toLabelValues(labelValues []metricdata.LabelValue) (values []string) { + for _, lv := range labelValues { + if lv.Present { + values = append(values, lv.Value) + } else { + values = append(values, "") + } + } + return values +} + +func typeMismatchError(point metricdata.Point) error { + return fmt.Errorf("point type %T does not match metric type", point) + +} + +func toPromValue(point metricdata.Point) (float64, error) { + switch v := point.Value.(type) { + case float64: + return v, nil + case int64: + return float64(v), nil + default: + return 0.0, typeMismatchError(point) + } +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go b/test/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go new file mode 100644 index 0000000000..ed6d8a14d5 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/prometheus/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS new file mode 100644 index 0000000000..e491a9e7f7 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/test/vendor/github.com/knative/test-infra/LICENSE b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE similarity index 100% rename from test/vendor/github.com/knative/test-infra/LICENSE rename to test/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go new file mode 100644 index 0000000000..88835cc0fc --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +// Labels represents a set of Stackdriver Monitoring labels. +type Labels struct { + m map[string]labelValue +} + +type labelValue struct { + val, desc string +} + +// Set stores a label with the given key, value and description, +// overwriting any previous values with the given key. +func (labels *Labels) Set(key, value, description string) { + if labels.m == nil { + labels.m = make(map[string]labelValue) + } + labels.m[key] = labelValue{value, description} +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go new file mode 100644 index 0000000000..a2df93f05a --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go @@ -0,0 +1,519 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +/* +The code in this file is responsible for converting OpenCensus Proto metrics +directly to Stackdriver Metrics. +*/ + +import ( + "context" + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "github.com/golang/protobuf/ptypes/timestamp" + "go.opencensus.io/trace" + + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/resource" +) + +const ( + exemplarAttachmentTypeString = "type.googleapis.com/google.protobuf.StringValue" + exemplarAttachmentTypeSpanCtx = "type.googleapis.com/google.monitoring.v3.SpanContext" + + // TODO(songy23): add support for this. + // exemplarAttachmentTypeDroppedLabels = "type.googleapis.com/google.monitoring.v3.DroppedLabels" +) + +// ExportMetrics exports OpenCensus Metrics to Stackdriver Monitoring. +func (se *statsExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + if len(metrics) == 0 { + return nil + } + + for _, metric := range metrics { + se.metricsBundler.Add(metric, 1) + // TODO: [rghetia] handle errors. + } + + return nil +} + +func (se *statsExporter) handleMetricsUpload(metrics []*metricdata.Metric) { + err := se.uploadMetrics(metrics) + if err != nil { + se.o.handleError(err) + } +} + +func (se *statsExporter) uploadMetrics(metrics []*metricdata.Metric) error { + ctx, cancel := newContextWithTimeout(se.o.Context, se.o.Timeout) + defer cancel() + + var errors []error + + ctx, span := trace.StartSpan( + ctx, + "contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + + for _, metric := range metrics { + // Now create the metric descriptor remotely. + if err := se.createMetricDescriptorFromMetric(ctx, metric); err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + errors = append(errors, err) + continue + } + } + + var allTimeSeries []*monitoringpb.TimeSeries + for _, metric := range metrics { + tsl, err := se.metricToMpbTs(ctx, metric) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + errors = append(errors, err) + continue + } + if tsl != nil { + allTimeSeries = append(allTimeSeries, tsl...) + } + } + + // Now batch timeseries up and then export. + for start, end := 0, 0; start < len(allTimeSeries); start = end { + end = start + maxTimeSeriesPerUpload + if end > len(allTimeSeries) { + end = len(allTimeSeries) + } + batch := allTimeSeries[start:end] + ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(batch) + for _, ctsreq := range ctsreql { + if err := createTimeSeries(ctx, se.c, ctsreq); err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + errors = append(errors, err) + } + } + } + + numErrors := len(errors) + if numErrors == 0 { + return nil + } else if numErrors == 1 { + return errors[0] + } + errMsgs := make([]string, 0, numErrors) + for _, err := range errors { + errMsgs = append(errMsgs, err.Error()) + } + return fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) +} + +// metricToMpbTs converts a metric into a list of Stackdriver Monitoring v3 API TimeSeries +// but it doesn't invoke any remote API. +func (se *statsExporter) metricToMpbTs(ctx context.Context, metric *metricdata.Metric) ([]*monitoringpb.TimeSeries, error) { + if metric == nil { + return nil, errNilMetricOrMetricDescriptor + } + + resource := se.metricRscToMpbRsc(metric.Resource) + + metricName := metric.Descriptor.Name + metricType := se.metricTypeFromProto(metricName) + metricLabelKeys := metric.Descriptor.LabelKeys + metricKind, _ := metricDescriptorTypeToMetricKind(metric) + + if metricKind == googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED { + // ignore these Timeserieses. TODO [rghetia] log errors. + return nil, nil + } + + timeSeries := make([]*monitoringpb.TimeSeries, 0, len(metric.TimeSeries)) + for _, ts := range metric.TimeSeries { + sdPoints, err := se.metricTsToMpbPoint(ts, metricKind) + if err != nil { + // TODO(@rghetia): record error metrics + continue + } + + // Each TimeSeries has labelValues which MUST be correlated + // with that from the MetricDescriptor + labels, err := metricLabelsToTsLabels(se.defaultLabels, metricLabelKeys, ts.LabelValues) + if err != nil { + // TODO: (@rghetia) perhaps log this error from labels extraction, if non-nil. + continue + } + + var rsc *monitoredrespb.MonitoredResource + var mr monitoredresource.Interface + if se.o.ResourceByDescriptor != nil { + labels, mr = se.o.ResourceByDescriptor(&metric.Descriptor, labels) + // TODO(rghetia): optimize this. It is inefficient to convert this for all metrics. + rsc = convertMonitoredResourceToPB(mr) + if rsc.Type == "" { + rsc.Type = "global" + rsc.Labels = nil + } + } else { + rsc = resource + } + timeSeries = append(timeSeries, &monitoringpb.TimeSeries{ + Metric: &googlemetricpb.Metric{ + Type: metricType, + Labels: labels, + }, + Resource: rsc, + Points: sdPoints, + }) + } + + return timeSeries, nil +} + +func metricLabelsToTsLabels(defaults map[string]labelValue, labelKeys []metricdata.LabelKey, labelValues []metricdata.LabelValue) (map[string]string, error) { + // Perform this sanity check now. + if len(labelKeys) != len(labelValues) { + return nil, fmt.Errorf("length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues)) + } + + if len(defaults)+len(labelKeys) == 0 { + return nil, nil + } + + labels := make(map[string]string) + // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. + for key, label := range defaults { + labels[sanitize(key)] = label.val + } + + for i, labelKey := range labelKeys { + labelValue := labelValues[i] + labels[sanitize(labelKey.Key)] = labelValue.Value + } + + return labels, nil +} + +// createMetricDescriptorFromMetric creates a metric descriptor from the OpenCensus metric +// and then creates it remotely using Stackdriver's API. +func (se *statsExporter) createMetricDescriptorFromMetric(ctx context.Context, metric *metricdata.Metric) error { + // Skip create metric descriptor if configured + if se.o.SkipCMD { + return nil + } + + se.metricMu.Lock() + defer se.metricMu.Unlock() + + name := metric.Descriptor.Name + if _, created := se.metricDescriptors[name]; created { + return nil + } + + if builtinMetric(se.metricTypeFromProto(name)) { + se.metricDescriptors[name] = true + return nil + } + + // Otherwise, we encountered a cache-miss and + // should create the metric descriptor remotely. + inMD, err := se.metricToMpbMetricDescriptor(metric) + if err != nil { + return err + } + + if err = se.createMetricDescriptor(ctx, inMD); err != nil { + return err + } + + // Now record the metric as having been created. + se.metricDescriptors[name] = true + return nil +} + +func (se *statsExporter) metricToMpbMetricDescriptor(metric *metricdata.Metric) (*googlemetricpb.MetricDescriptor, error) { + if metric == nil { + return nil, errNilMetricOrMetricDescriptor + } + + metricType := se.metricTypeFromProto(metric.Descriptor.Name) + displayName := se.displayName(metric.Descriptor.Name) + metricKind, valueType := metricDescriptorTypeToMetricKind(metric) + + sdm := &googlemetricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType), + DisplayName: displayName, + Description: metric.Descriptor.Description, + Unit: string(metric.Descriptor.Unit), + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: metricLableKeysToLabels(se.defaultLabels, metric.Descriptor.LabelKeys), + } + + return sdm, nil +} + +func metricLableKeysToLabels(defaults map[string]labelValue, labelKeys []metricdata.LabelKey) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(labelKeys)) + + // Fill in the defaults first. + for key, lbl := range defaults { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key), + Description: lbl.desc, + ValueType: labelpb.LabelDescriptor_STRING, + }) + } + + // Now fill in those from the metric. + for _, key := range labelKeys { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key.Key), + Description: key.Description, + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + }) + } + return labelDescriptors +} + +func metricDescriptorTypeToMetricKind(m *metricdata.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { + if m == nil { + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } + + switch m.Descriptor.Type { + case metricdata.TypeCumulativeInt64: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 + + case metricdata.TypeCumulativeFloat64: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE + + case metricdata.TypeCumulativeDistribution: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION + + case metricdata.TypeGaugeFloat64: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + + case metricdata.TypeGaugeInt64: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + + case metricdata.TypeGaugeDistribution: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION + + case metricdata.TypeSummary: + // TODO: [rghetia] after upgrading to proto version3, retrun UNRECOGNIZED instead of UNSPECIFIED + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + + default: + // TODO: [rghetia] after upgrading to proto version3, retrun UNRECOGNIZED instead of UNSPECIFIED + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } +} + +func (se *statsExporter) metricRscToMpbRsc(rs *resource.Resource) *monitoredrespb.MonitoredResource { + if rs == nil { + resource := se.o.Resource + if resource == nil { + resource = &monitoredrespb.MonitoredResource{ + Type: "global", + } + } + return resource + } + typ := rs.Type + if typ == "" { + typ = "global" + } + mrsp := &monitoredrespb.MonitoredResource{ + Type: typ, + } + if rs.Labels != nil { + mrsp.Labels = make(map[string]string, len(rs.Labels)) + for k, v := range rs.Labels { + // TODO: [rghetia] add mapping between OC Labels and SD Labels. + mrsp.Labels[k] = v + } + } + return mrsp +} + +func (se *statsExporter) metricTsToMpbPoint(ts *metricdata.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) (sptl []*monitoringpb.Point, err error) { + for _, pt := range ts.Points { + + // If we have a last value aggregation point i.e. MetricDescriptor_GAUGE + // StartTime should be nil. + startTime := timestampProto(ts.StartTime) + if metricKind == googlemetricpb.MetricDescriptor_GAUGE { + startTime = nil + } + + spt, err := metricPointToMpbPoint(startTime, &pt, se.o.ProjectID) + if err != nil { + return nil, err + } + sptl = append(sptl, spt) + } + return sptl, nil +} + +func metricPointToMpbPoint(startTime *timestamp.Timestamp, pt *metricdata.Point, projectID string) (*monitoringpb.Point, error) { + if pt == nil { + return nil, nil + } + + mptv, err := metricPointToMpbValue(pt, projectID) + if err != nil { + return nil, err + } + + mpt := &monitoringpb.Point{ + Value: mptv, + Interval: &monitoringpb.TimeInterval{ + StartTime: startTime, + EndTime: timestampProto(pt.Time), + }, + } + return mpt, nil +} + +func metricPointToMpbValue(pt *metricdata.Point, projectID string) (*monitoringpb.TypedValue, error) { + if pt == nil { + return nil, nil + } + + var err error + var tval *monitoringpb.TypedValue + switch v := pt.Value.(type) { + default: + err = fmt.Errorf("protoToMetricPoint: unknown Data type: %T", pt.Value) + + case int64: + tval = &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v, + }, + } + + case float64: + tval = &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v, + }, + } + + case *metricdata.Distribution: + dv := v + var mv *monitoringpb.TypedValue_DistributionValue + var mean float64 + if dv.Count > 0 { + mean = float64(dv.Sum) / float64(dv.Count) + } + mv = &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: dv.Count, + Mean: mean, + SumOfSquaredDeviation: dv.SumOfSquaredDeviation, + }, + } + + insertZeroBound := false + if bopts := dv.BucketOptions; bopts != nil { + insertZeroBound = shouldInsertZeroBound(bopts.Bounds...) + mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + // The first bucket bound should be 0.0 because the Metrics first bucket is + // [0, first_bound) but Stackdriver monitoring bucket bounds begin with -infinity + // (first bucket is (-infinity, 0)) + Bounds: addZeroBoundOnCondition(insertZeroBound, bopts.Bounds...), + }, + }, + } + } + bucketCounts, exemplars := metricBucketToBucketCountsAndExemplars(dv.Buckets, projectID) + mv.DistributionValue.BucketCounts = addZeroBucketCountOnCondition(insertZeroBound, bucketCounts...) + mv.DistributionValue.Exemplars = exemplars + + tval = &monitoringpb.TypedValue{Value: mv} + } + + return tval, err +} + +func metricBucketToBucketCountsAndExemplars(buckets []metricdata.Bucket, projectID string) ([]int64, []*distributionpb.Distribution_Exemplar) { + bucketCounts := make([]int64, len(buckets)) + var exemplars []*distributionpb.Distribution_Exemplar + for i, bucket := range buckets { + bucketCounts[i] = bucket.Count + if bucket.Exemplar != nil { + exemplars = append(exemplars, metricExemplarToPbExemplar(bucket.Exemplar, projectID)) + } + } + return bucketCounts, exemplars +} + +func metricExemplarToPbExemplar(exemplar *metricdata.Exemplar, projectID string) *distributionpb.Distribution_Exemplar { + return &distributionpb.Distribution_Exemplar{ + Value: exemplar.Value, + Timestamp: timestampProto(exemplar.Timestamp), + Attachments: attachmentsToPbAttachments(exemplar.Attachments, projectID), + } +} + +func attachmentsToPbAttachments(attachments metricdata.Attachments, projectID string) []*any.Any { + var pbAttachments []*any.Any + for _, v := range attachments { + if spanCtx, succ := v.(trace.SpanContext); succ { + pbAttachments = append(pbAttachments, toPbSpanCtxAttachment(spanCtx, projectID)) + } else { + // Treat everything else as plain string for now. + // TODO(songy23): add support for dropped label attachments. + pbAttachments = append(pbAttachments, toPbStringAttachment(v)) + } + } + return pbAttachments +} + +func toPbStringAttachment(v interface{}) *any.Any { + s := fmt.Sprintf("%v", v) + return &any.Any{ + TypeUrl: exemplarAttachmentTypeString, + Value: []byte(s), + } +} + +func toPbSpanCtxAttachment(spanCtx trace.SpanContext, projectID string) *any.Any { + pbSpanCtx := monitoringpb.SpanContext{ + SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, spanCtx.TraceID.String(), spanCtx.SpanID.String()), + } + bytes, _ := proto.Marshal(&pbSpanCtx) + return &any.Any{ + TypeUrl: exemplarAttachmentTypeSpanCtx, + Value: bytes, + } +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go new file mode 100644 index 0000000000..ccd6ee4a6f --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go @@ -0,0 +1,201 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + monitoring "cloud.google.com/go/monitoring/apiv3" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const ( + minNumWorkers = 1 + minReqsChanSize = 5 +) + +type metricsBatcher struct { + projectName string + allTss []*monitoringpb.TimeSeries + allErrs []error + + // Counts all dropped TimeSeries by this metricsBatcher. + droppedTimeSeries int + + workers []*worker + // reqsChan, respsChan and wg are shared between metricsBatcher and worker goroutines. + reqsChan chan *monitoringpb.CreateTimeSeriesRequest + respsChan chan *response + wg *sync.WaitGroup +} + +func newMetricsBatcher(ctx context.Context, projectID string, numWorkers int, mc *monitoring.MetricClient, timeout time.Duration) *metricsBatcher { + if numWorkers < minNumWorkers { + numWorkers = minNumWorkers + } + workers := make([]*worker, 0, numWorkers) + reqsChanSize := numWorkers + if reqsChanSize < minReqsChanSize { + reqsChanSize = minReqsChanSize + } + reqsChan := make(chan *monitoringpb.CreateTimeSeriesRequest, reqsChanSize) + respsChan := make(chan *response, numWorkers) + var wg sync.WaitGroup + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + w := newWorker(ctx, mc, reqsChan, respsChan, &wg, timeout) + workers = append(workers, w) + go w.start() + } + return &metricsBatcher{ + projectName: fmt.Sprintf("projects/%s", projectID), + allTss: make([]*monitoringpb.TimeSeries, 0, maxTimeSeriesPerUpload), + droppedTimeSeries: 0, + workers: workers, + wg: &wg, + reqsChan: reqsChan, + respsChan: respsChan, + } +} + +func (mb *metricsBatcher) recordDroppedTimeseries(numTimeSeries int, errs ...error) { + mb.droppedTimeSeries += numTimeSeries + for _, err := range errs { + if err != nil { + mb.allErrs = append(mb.allErrs, err) + } + } +} + +func (mb *metricsBatcher) addTimeSeries(ts *monitoringpb.TimeSeries) { + mb.allTss = append(mb.allTss, ts) + if len(mb.allTss) == maxTimeSeriesPerUpload { + mb.sendReqToChan() + mb.allTss = make([]*monitoringpb.TimeSeries, 0, maxTimeSeriesPerUpload) + } +} + +func (mb *metricsBatcher) close(ctx context.Context) error { + // Send any remaining time series, must be <200 + if len(mb.allTss) > 0 { + mb.sendReqToChan() + } + + close(mb.reqsChan) + mb.wg.Wait() + for i := 0; i < len(mb.workers); i++ { + resp := <-mb.respsChan + mb.recordDroppedTimeseries(resp.droppedTimeSeries, resp.errs...) + } + close(mb.respsChan) + + numErrors := len(mb.allErrs) + if numErrors == 0 { + return nil + } + + if numErrors == 1 { + return mb.allErrs[0] + } + + errMsgs := make([]string, 0, numErrors) + for _, err := range mb.allErrs { + errMsgs = append(errMsgs, err.Error()) + } + return fmt.Errorf("[%s]", strings.Join(errMsgs, "; ")) +} + +// sendReqToChan grabs all the timeseies in this metricsBatcher, puts them +// to a CreateTimeSeriesRequest and sends the request to reqsChan. +func (mb *metricsBatcher) sendReqToChan() { + req := &monitoringpb.CreateTimeSeriesRequest{ + Name: mb.projectName, + TimeSeries: mb.allTss, + } + mb.reqsChan <- req +} + +// sendReq sends create time series requests to Stackdriver, +// and returns the count of dropped time series and error. +func sendReq(ctx context.Context, c *monitoring.MetricClient, req *monitoringpb.CreateTimeSeriesRequest) (int, error) { + if c != nil { // c==nil only happens in unit tests where we don't make real calls to Stackdriver server + err := createTimeSeries(ctx, c, req) + if err != nil { + return len(req.TimeSeries), err + } + } + return 0, nil +} + +type worker struct { + ctx context.Context + timeout time.Duration + mc *monitoring.MetricClient + + resp *response + + respsChan chan *response + reqsChan chan *monitoringpb.CreateTimeSeriesRequest + + wg *sync.WaitGroup +} + +func newWorker( + ctx context.Context, + mc *monitoring.MetricClient, + reqsChan chan *monitoringpb.CreateTimeSeriesRequest, + respsChan chan *response, + wg *sync.WaitGroup, + timeout time.Duration) *worker { + return &worker{ + ctx: ctx, + mc: mc, + resp: &response{}, + reqsChan: reqsChan, + respsChan: respsChan, + wg: wg, + } +} + +func (w *worker) start() { + for req := range w.reqsChan { + w.sendReqWithTimeout(req) + } + w.respsChan <- w.resp + w.wg.Done() +} + +func (w *worker) sendReqWithTimeout(req *monitoringpb.CreateTimeSeriesRequest) { + ctx, cancel := newContextWithTimeout(w.ctx, w.timeout) + defer cancel() + + w.recordDroppedTimeseries(sendReq(ctx, w.mc, req)) +} + +func (w *worker) recordDroppedTimeseries(numTimeSeries int, err error) { + w.resp.droppedTimeSeries += numTimeSeries + if err != nil { + w.resp.errs = append(w.resp.errs, err) + } +} + +type response struct { + droppedTimeSeries int + errs []error +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go new file mode 100644 index 0000000000..bcc1f0ee9f --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_proto.go @@ -0,0 +1,569 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +/* +The code in this file is responsible for converting OpenCensus Proto metrics +directly to Stackdriver Metrics. +*/ + +import ( + "context" + "errors" + "fmt" + "path" + "strings" + + "go.opencensus.io/resource" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +var errNilMetricOrMetricDescriptor = errors.New("non-nil metric or metric descriptor") +var percentileLabelKey = &metricspb.LabelKey{ + Key: "percentile", + Description: "the value at a given percentile of a distribution", +} +var globalResource = &resource.Resource{Type: "global"} +var domains = []string{"googleapis.com", "kubernetes.io", "istio.io", "knative.dev"} + +// PushMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring synchronously, +// without de-duping or adding proto metrics to the bundler. +func (se *statsExporter) PushMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) (int, error) { + if len(metrics) == 0 { + return 0, errNilMetricOrMetricDescriptor + } + + // Caches the resources seen so far + seenResources := make(map[*resourcepb.Resource]*monitoredrespb.MonitoredResource) + + mb := newMetricsBatcher(ctx, se.o.ProjectID, se.o.NumberOfWorkers, se.c, se.o.Timeout) + for _, metric := range metrics { + if len(metric.GetTimeseries()) == 0 { + // No TimeSeries to export, skip this metric. + continue + } + mappedRsc := se.getResource(rsc, metric, seenResources) + if metric.GetMetricDescriptor().GetType() == metricspb.MetricDescriptor_SUMMARY { + summaryMtcs := se.convertSummaryMetrics(metric) + for _, summaryMtc := range summaryMtcs { + if err := se.createMetricDescriptorFromMetricProto(ctx, summaryMtc); err != nil { + mb.recordDroppedTimeseries(len(summaryMtc.GetTimeseries()), err) + continue + } + se.protoMetricToTimeSeries(ctx, mappedRsc, summaryMtc, mb) + } + } else { + if err := se.createMetricDescriptorFromMetricProto(ctx, metric); err != nil { + mb.recordDroppedTimeseries(len(metric.GetTimeseries()), err) + continue + } + se.protoMetricToTimeSeries(ctx, mappedRsc, metric, mb) + } + } + + return mb.droppedTimeSeries, mb.close(ctx) +} + +func (se *statsExporter) convertSummaryMetrics(summary *metricspb.Metric) []*metricspb.Metric { + var metrics []*metricspb.Metric + + for _, ts := range summary.Timeseries { + var percentileTss []*metricspb.TimeSeries + var countTss []*metricspb.TimeSeries + var sumTss []*metricspb.TimeSeries + lvs := ts.GetLabelValues() + + startTime := ts.StartTimestamp + for _, pt := range ts.GetPoints() { + ptTimestamp := pt.GetTimestamp() + summaryValue := pt.GetSummaryValue() + if summaryValue.Sum != nil { + sumTs := &metricspb.TimeSeries{ + LabelValues: lvs, + StartTimestamp: startTime, + Points: []*metricspb.Point{ + { + Value: &metricspb.Point_DoubleValue{ + DoubleValue: summaryValue.Sum.Value, + }, + Timestamp: ptTimestamp, + }, + }, + } + sumTss = append(sumTss, sumTs) + } + + if summaryValue.Count != nil { + countTs := &metricspb.TimeSeries{ + LabelValues: lvs, + StartTimestamp: startTime, + Points: []*metricspb.Point{ + { + Value: &metricspb.Point_Int64Value{ + Int64Value: summaryValue.Count.Value, + }, + Timestamp: ptTimestamp, + }, + }, + } + countTss = append(countTss, countTs) + } + + snapshot := summaryValue.GetSnapshot() + for _, percentileValue := range snapshot.GetPercentileValues() { + lvsWithPercentile := lvs[0:] + lvsWithPercentile = append(lvsWithPercentile, &metricspb.LabelValue{ + HasValue: true, + Value: fmt.Sprintf("%f", percentileValue.Percentile), + }) + percentileTs := &metricspb.TimeSeries{ + LabelValues: lvsWithPercentile, + StartTimestamp: nil, + Points: []*metricspb.Point{ + { + Value: &metricspb.Point_DoubleValue{ + DoubleValue: percentileValue.Value, + }, + Timestamp: ptTimestamp, + }, + }, + } + percentileTss = append(percentileTss, percentileTs) + } + } + + if len(sumTss) > 0 { + metric := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s_summary_sum", summary.GetMetricDescriptor().GetName()), + Description: summary.GetMetricDescriptor().GetDescription(), + Type: metricspb.MetricDescriptor_CUMULATIVE_DOUBLE, + Unit: summary.GetMetricDescriptor().GetUnit(), + LabelKeys: summary.GetMetricDescriptor().GetLabelKeys(), + }, + Timeseries: sumTss, + Resource: summary.Resource, + } + metrics = append(metrics, metric) + } + if len(countTss) > 0 { + metric := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s_summary_count", summary.GetMetricDescriptor().GetName()), + Description: summary.GetMetricDescriptor().GetDescription(), + Type: metricspb.MetricDescriptor_CUMULATIVE_INT64, + Unit: "1", + LabelKeys: summary.GetMetricDescriptor().GetLabelKeys(), + }, + Timeseries: countTss, + Resource: summary.Resource, + } + metrics = append(metrics, metric) + } + if len(percentileTss) > 0 { + lks := summary.GetMetricDescriptor().GetLabelKeys()[0:] + lks = append(lks, percentileLabelKey) + metric := &metricspb.Metric{ + MetricDescriptor: &metricspb.MetricDescriptor{ + Name: fmt.Sprintf("%s_summary_percentile", summary.GetMetricDescriptor().GetName()), + Description: summary.GetMetricDescriptor().GetDescription(), + Type: metricspb.MetricDescriptor_GAUGE_DOUBLE, + Unit: summary.GetMetricDescriptor().GetUnit(), + LabelKeys: lks, + }, + Timeseries: percentileTss, + Resource: summary.Resource, + } + metrics = append(metrics, metric) + } + } + return metrics +} + +func (se *statsExporter) getResource(rsc *resourcepb.Resource, metric *metricspb.Metric, seenRscs map[*resourcepb.Resource]*monitoredrespb.MonitoredResource) *monitoredrespb.MonitoredResource { + var resource = rsc + if metric.Resource != nil { + resource = metric.Resource + } + mappedRsc, ok := seenRscs[resource] + if !ok { + mappedRsc = se.o.MapResource(resourcepbToResource(resource)) + seenRscs[resource] = mappedRsc + } + return mappedRsc +} + +func resourcepbToResource(rsc *resourcepb.Resource) *resource.Resource { + if rsc == nil { + return globalResource + } + res := &resource.Resource{ + Type: rsc.Type, + Labels: make(map[string]string, len(rsc.Labels)), + } + + for k, v := range rsc.Labels { + res.Labels[k] = v + } + return res +} + +// protoMetricToTimeSeries converts a metric into a Stackdriver Monitoring v3 API CreateTimeSeriesRequest +// but it doesn't invoke any remote API. +func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, mappedRsc *monitoredrespb.MonitoredResource, metric *metricspb.Metric, mb *metricsBatcher) { + if metric == nil || metric.MetricDescriptor == nil { + mb.recordDroppedTimeseries(len(metric.GetTimeseries()), errNilMetricOrMetricDescriptor) + } + + metricType := se.metricTypeFromProto(metric.GetMetricDescriptor().GetName()) + metricLabelKeys := metric.GetMetricDescriptor().GetLabelKeys() + metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric) + labelKeys := make([]string, 0, len(metricLabelKeys)) + for _, key := range metricLabelKeys { + labelKeys = append(labelKeys, sanitize(key.GetKey())) + } + + for _, protoTimeSeries := range metric.Timeseries { + if len(protoTimeSeries.Points) == 0 { + // No points to send just move forward. + continue + } + + sdPoints, err := se.protoTimeSeriesToMonitoringPoints(protoTimeSeries, metricKind) + if err != nil { + mb.recordDroppedTimeseries(1, err) + continue + } + + // Each TimeSeries has labelValues which MUST be correlated + // with that from the MetricDescriptor + labels, err := labelsPerTimeSeries(se.defaultLabels, labelKeys, protoTimeSeries.GetLabelValues()) + if err != nil { + mb.recordDroppedTimeseries(1, err) + continue + } + mb.addTimeSeries(&monitoringpb.TimeSeries{ + Metric: &googlemetricpb.Metric{ + Type: metricType, + Labels: labels, + }, + MetricKind: metricKind, + ValueType: valueType, + Resource: mappedRsc, + Points: sdPoints, + }) + } +} + +func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []string, labelValues []*metricspb.LabelValue) (map[string]string, error) { + if len(labelKeys) != len(labelValues) { + return nil, fmt.Errorf("length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues)) + } + + if len(defaults)+len(labelKeys) == 0 { + // No labels for this metric + return nil, nil + } + + labels := make(map[string]string) + // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. + for key, label := range defaults { + labels[key] = label.val + } + + for i, labelKey := range labelKeys { + labelValue := labelValues[i] + if !labelValue.GetHasValue() { + continue + } + labels[labelKey] = labelValue.GetValue() + } + + return labels, nil +} + +func (se *statsExporter) createMetricDescriptorFromMetricProto(ctx context.Context, metric *metricspb.Metric) error { + // Skip create metric descriptor if configured + if se.o.SkipCMD { + return nil + } + + ctx, cancel := newContextWithTimeout(ctx, se.o.Timeout) + defer cancel() + + se.protoMu.Lock() + defer se.protoMu.Unlock() + + name := metric.GetMetricDescriptor().GetName() + if _, created := se.protoMetricDescriptors[name]; created { + return nil + } + + if builtinMetric(se.metricTypeFromProto(name)) { + se.protoMetricDescriptors[name] = true + return nil + } + + // Otherwise, we encountered a cache-miss and + // should create the metric descriptor remotely. + inMD, err := se.protoToMonitoringMetricDescriptor(metric, se.defaultLabels) + if err != nil { + return err + } + + if err = se.createMetricDescriptor(ctx, inMD); err != nil { + return err + } + + se.protoMetricDescriptors[name] = true + return nil +} + +func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) ([]*monitoringpb.Point, error) { + sptl := make([]*monitoringpb.Point, 0, len(ts.Points)) + for _, pt := range ts.Points { + // If we have a last value aggregation point i.e. MetricDescriptor_GAUGE + // StartTime should be nil. + startTime := ts.StartTimestamp + if metricKind == googlemetricpb.MetricDescriptor_GAUGE { + startTime = nil + } + spt, err := fromProtoPoint(startTime, pt) + if err != nil { + return nil, err + } + sptl = append(sptl, spt) + } + return sptl, nil +} + +func (se *statsExporter) protoToMonitoringMetricDescriptor(metric *metricspb.Metric, additionalLabels map[string]labelValue) (*googlemetricpb.MetricDescriptor, error) { + if metric == nil || metric.MetricDescriptor == nil { + return nil, errNilMetricOrMetricDescriptor + } + + md := metric.GetMetricDescriptor() + metricName := md.GetName() + unit := md.GetUnit() + description := md.GetDescription() + metricType := se.metricTypeFromProto(metricName) + displayName := se.displayName(metricName) + metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric) + + sdm := &googlemetricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType), + DisplayName: displayName, + Description: description, + Unit: unit, + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: labelDescriptorsFromProto(additionalLabels, metric.GetMetricDescriptor().GetLabelKeys()), + } + + return sdm, nil +} + +func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []*metricspb.LabelKey) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(protoLabelKeys)) + + // Fill in the defaults first. + for key, lbl := range defaults { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key), + Description: lbl.desc, + ValueType: labelpb.LabelDescriptor_STRING, + }) + } + + // Now fill in those from the metric. + for _, protoKey := range protoLabelKeys { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(protoKey.GetKey()), + Description: protoKey.GetDescription(), + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + }) + } + return labelDescriptors +} + +func (se *statsExporter) metricTypeFromProto(name string) string { + prefix := se.o.MetricPrefix + if se.o.GetMetricPrefix != nil { + prefix = se.o.GetMetricPrefix(name) + } + if prefix != "" { + name = path.Join(prefix, name) + } + if !hasDomain(name) { + // Still needed because the name may or may not have a "/" at the beginning. + name = path.Join(defaultDomain, name) + } + return name +} + +// hasDomain checks if the metric name already has a domain in it. +func hasDomain(name string) bool { + for _, domain := range domains { + if strings.Contains(name, domain) { + return true + } + } + return false +} + +func fromProtoPoint(startTime *timestamppb.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) { + if pt == nil { + return nil, nil + } + + mptv, err := protoToMetricPoint(pt.Value) + if err != nil { + return nil, err + } + + return &monitoringpb.Point{ + Value: mptv, + Interval: &monitoringpb.TimeInterval{ + StartTime: startTime, + EndTime: pt.Timestamp, + }, + }, nil +} + +func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) { + if value == nil { + return nil, nil + } + + switch v := value.(type) { + default: + // All the other types are not yet handled. + // TODO: (@odeke-em, @songy23) talk to the Stackdriver team to determine + // the use cases for: + // + // *TypedValue_BoolValue + // *TypedValue_StringValue + // + // and then file feature requests on OpenCensus-Specs and then OpenCensus-Proto, + // lest we shall error here. + // + // TODO: Add conversion from SummaryValue when + // https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/66 + // has been figured out. + return nil, fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value) + + case *metricspb.Point_Int64Value: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v.Int64Value, + }, + }, nil + + case *metricspb.Point_DoubleValue: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.DoubleValue, + }, + }, nil + + case *metricspb.Point_DistributionValue: + dv := v.DistributionValue + var mv *monitoringpb.TypedValue_DistributionValue + if dv != nil { + var mean float64 + if dv.Count > 0 { + mean = float64(dv.Sum) / float64(dv.Count) + } + mv = &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: dv.Count, + Mean: mean, + SumOfSquaredDeviation: dv.SumOfSquaredDeviation, + }, + } + + insertZeroBound := false + if bopts := dv.BucketOptions; bopts != nil && bopts.Type != nil { + bexp, ok := bopts.Type.(*metricspb.DistributionValue_BucketOptions_Explicit_) + if ok && bexp != nil && bexp.Explicit != nil { + insertZeroBound = shouldInsertZeroBound(bexp.Explicit.Bounds...) + mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + // The first bucket bound should be 0.0 because the Metrics first bucket is + // [0, first_bound) but Stackdriver monitoring bucket bounds begin with -infinity + // (first bucket is (-infinity, 0)) + Bounds: addZeroBoundOnCondition(insertZeroBound, bexp.Explicit.Bounds...), + }, + }, + } + } + } + mv.DistributionValue.BucketCounts = addZeroBucketCountOnCondition(insertZeroBound, bucketCounts(dv.Buckets)...) + + } + return &monitoringpb.TypedValue{Value: mv}, nil + } +} + +func bucketCounts(buckets []*metricspb.DistributionValue_Bucket) []int64 { + bucketCounts := make([]int64, len(buckets)) + for i, bucket := range buckets { + if bucket != nil { + bucketCounts[i] = bucket.Count + } + } + return bucketCounts +} + +func protoMetricDescriptorTypeToMetricKind(m *metricspb.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) { + dt := m.GetMetricDescriptor() + if dt == nil { + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } + + switch dt.Type { + case metricspb.MetricDescriptor_CUMULATIVE_INT64: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64 + + case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE + + case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION: + return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION + + case metricspb.MetricDescriptor_GAUGE_DOUBLE: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE + + case metricspb.MetricDescriptor_GAUGE_INT64: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64 + + case metricspb.MetricDescriptor_GAUGE_DISTRIBUTION: + return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION + + default: + return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED + } +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go new file mode 100644 index 0000000000..ee519a4bf3 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go @@ -0,0 +1,57 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +// awsIdentityDocument is used to store parsed AWS Identity Document. +type awsIdentityDocument struct { + // accountID is the AWS account number for the VM. + accountID string + + // instanceID is the instance id of the instance. + instanceID string + + // Region is the AWS region for the VM. + region string +} + +// retrieveAWSIdentityDocument attempts to retrieve AWS Identity Document. +// If the environment is AWS EC2 Instance then a valid document is retrieved. +// Relevant attributes from the document are stored in awsIdentityDoc. +// This is only done once. +func retrieveAWSIdentityDocument() *awsIdentityDocument { + awsIdentityDoc := awsIdentityDocument{} + sesion, err := session.NewSession() + if err != nil { + return nil + } + c := ec2metadata.New(sesion) + if !c.Available() { + return nil + } + ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument() + if err != nil { + return nil + } + awsIdentityDoc.region = ec2InstanceIdentifyDocument.Region + awsIdentityDoc.instanceID = ec2InstanceIdentifyDocument.InstanceID + awsIdentityDoc.accountID = ec2InstanceIdentifyDocument.AccountID + + return &awsIdentityDoc +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go new file mode 100644 index 0000000000..f0d88856b9 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "context" + "fmt" + "log" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" + container "cloud.google.com/go/container/apiv1" + containerpb "google.golang.org/genproto/googleapis/container/v1" +) + +// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment. +type gcpMetadata struct { + + // projectID is the identifier of the GCP project associated with this resource, such as "my-project". + projectID string + + // instanceID is the numeric VM instance identifier assigned by Compute Engine. + instanceID string + + // clusterName is the name for the cluster the container is running in. + clusterName string + + // containerName is the name of the container. + containerName string + + // namespaceID is the identifier for the cluster namespace the container is running in + namespaceID string + + // podID is the identifier for the pod the container is running in. + podID string + + // zone is the Compute Engine zone in which the VM is running. + zone string + + monitoringV2 bool +} + +// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server +// in GKE container and GCE instance environment. +// Some attributes are retrieved from the system environment. +// This is only executed detectOnce. +func retrieveGCPMetadata() *gcpMetadata { + gcpMetadata := gcpMetadata{} + var err error + gcpMetadata.instanceID, err = metadata.InstanceID() + if err != nil { + // Not a GCP environment + return &gcpMetadata + } + + gcpMetadata.projectID, err = metadata.ProjectID() + logError(err) + + gcpMetadata.zone, err = metadata.Zone() + logError(err) + + clusterName, err := metadata.InstanceAttributeValue("cluster-name") + logError(err) + gcpMetadata.clusterName = strings.TrimSpace(clusterName) + + clusterLocation, err := metadata.InstanceAttributeValue("cluster-location") + logError(err) + + // Following attributes are derived from environment variables. They are configured + // via yaml file. For details refer to: + // https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application + gcpMetadata.namespaceID = os.Getenv("NAMESPACE") + gcpMetadata.containerName = os.Getenv("CONTAINER_NAME") + gcpMetadata.podID = os.Getenv("HOSTNAME") + + // Monitoring API version can be obtained from cluster info.q + if gcpMetadata.clusterName != "" { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + logError(err) + if c != nil { + req := &containerpb.GetClusterRequest{ + Name: fmt.Sprintf("projects/%s/locations/%s/clusters/%s", gcpMetadata.projectID, strings.TrimSpace(clusterLocation), gcpMetadata.clusterName), + } + resp, err := c.GetCluster(ctx, req) + logError(err) + if resp != nil && resp.GetMonitoringService() == "monitoring.googleapis.com/kubernetes" && + resp.GetLoggingService() == "logging.googleapis.com/kubernetes" { + gcpMetadata.monitoringV2 = true + } + } + } + + return &gcpMetadata +} + +// logError logs error only if the error is present and it is not 'not defined' +func logError(err error) { + if err != nil { + if !strings.Contains(err.Error(), "not defined") { + log.Printf("Error retrieving gcp metadata: %v", err) + } + } +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go new file mode 100644 index 0000000000..86e76002ad --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go @@ -0,0 +1,232 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "fmt" + "os" + "sync" +) + +// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface +type Interface interface { + + // MonitoredResource returns the resource type and resource labels. + MonitoredResource() (resType string, labels map[string]string) +} + +// GKEContainer represents gke_container type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_gke_container +type GKEContainer struct { + + // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". + ProjectID string + + // InstanceID is the numeric VM instance identifier assigned by Compute Engine. + InstanceID string + + // ClusterName is the name for the cluster the container is running in. + ClusterName string + + // ContainerName is the name of the container. + ContainerName string + + // NamespaceID is the identifier for the cluster namespace the container is running in + NamespaceID string + + // PodID is the identifier for the pod the container is running in. + PodID string + + // Zone is the Compute Engine zone in which the VM is running. + Zone string + + // LoggingMonitoringV2Enabled is the identifier if user enabled V2 logging and monitoring for GKE + LoggingMonitoringV2Enabled bool +} + +// MonitoredResource returns resource type and resource labels for GKEContainer +func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "project_id": gke.ProjectID, + "cluster_name": gke.ClusterName, + "container_name": gke.ContainerName, + } + var typ string + if gke.LoggingMonitoringV2Enabled { + typ = "k8s_container" + labels["pod_name"] = gke.PodID + labels["namespace_name"] = gke.NamespaceID + labels["location"] = gke.Zone + } else { + typ = "gke_container" + labels["pod_id"] = gke.PodID + labels["namespace_id"] = gke.NamespaceID + labels["zone"] = gke.Zone + labels["instance_id"] = gke.InstanceID + } + return typ, labels +} + +// GCEInstance represents gce_instance type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_gce_instance +type GCEInstance struct { + + // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". + ProjectID string + + // InstanceID is the numeric VM instance identifier assigned by Compute Engine. + InstanceID string + + // Zone is the Compute Engine zone in which the VM is running. + Zone string +} + +// MonitoredResource returns resource type and resource labels for GCEInstance +func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "project_id": gce.ProjectID, + "instance_id": gce.InstanceID, + "zone": gce.Zone, + } + return "gce_instance", labels +} + +// AWSEC2Instance represents aws_ec2_instance type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance +type AWSEC2Instance struct { + + // AWSAccount is the AWS account number for the VM. + AWSAccount string + + // InstanceID is the instance id of the instance. + InstanceID string + + // Region is the AWS region for the VM. The format of this field is "aws:{region}", + // where supported values for {region} are listed at + // http://docs.aws.amazon.com/general/latest/gr/rande.html. + Region string +} + +// MonitoredResource returns resource type and resource labels for AWSEC2Instance +func (aws *AWSEC2Instance) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "aws_account": aws.AWSAccount, + "instance_id": aws.InstanceID, + "region": aws.Region, + } + return "aws_ec2_instance", labels +} + +// Autodetect auto detects monitored resources based on +// the environment where the application is running. +// It supports detection of following resource types +// 1. gke_container: +// 2. gce_instance: +// 3. aws_ec2_instance: +// +// Returns MonitoredResInterface which implements getLabels() and getType() +// For resource definition go to https://cloud.google.com/monitoring/api/resources +func Autodetect() Interface { + return func() Interface { + detectOnce.Do(func() { + var awsIdentityDoc *awsIdentityDocument + var gcpMetadata *gcpMetadata + + // First attempts to retrieve AWS Identity Doc and GCP metadata. + // It then determines the resource type + // In GCP and AWS environment both func finishes quickly. However, + // in an environment other than those (e.g local laptop) it + // takes 2 seconds for GCP and 5-6 for AWS. + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + awsIdentityDoc = retrieveAWSIdentityDocument() + }() + go func() { + defer wg.Done() + gcpMetadata = retrieveGCPMetadata() + }() + + wg.Wait() + autoDetected = detectResourceType(awsIdentityDoc, gcpMetadata) + }) + return autoDetected + }() + +} + +// createAWSEC2InstanceMonitoredResource creates a aws_ec2_instance monitored resource +// awsIdentityDoc contains AWS EC2 specific attributes. +func createAWSEC2InstanceMonitoredResource(awsIdentityDoc *awsIdentityDocument) *AWSEC2Instance { + awsInstance := AWSEC2Instance{ + AWSAccount: awsIdentityDoc.accountID, + InstanceID: awsIdentityDoc.instanceID, + Region: fmt.Sprintf("aws:%s", awsIdentityDoc.region), + } + return &awsInstance +} + +// createGCEInstanceMonitoredResource creates a gce_instance monitored resource +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance { + gceInstance := GCEInstance{ + ProjectID: gcpMetadata.projectID, + InstanceID: gcpMetadata.instanceID, + Zone: gcpMetadata.zone, + } + return &gceInstance +} + +// createGKEContainerMonitoredResource creates a gke_container monitored resource +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer { + gkeContainer := GKEContainer{ + ProjectID: gcpMetadata.projectID, + InstanceID: gcpMetadata.instanceID, + Zone: gcpMetadata.zone, + ContainerName: gcpMetadata.containerName, + ClusterName: gcpMetadata.clusterName, + NamespaceID: gcpMetadata.namespaceID, + PodID: gcpMetadata.podID, + LoggingMonitoringV2Enabled: gcpMetadata.monitoringV2, + } + return &gkeContainer +} + +// detectOnce is used to make sure GCP and AWS metadata detect function executes only once. +var detectOnce sync.Once + +// autoDetected is the metadata detected after the first execution of Autodetect function. +var autoDetected Interface + +// detectResourceType determines the resource type. +// awsIdentityDoc contains AWS EC2 attributes. nil if it is not AWS EC2 environment +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func detectResourceType(awsIdentityDoc *awsIdentityDocument, gcpMetadata *gcpMetadata) Interface { + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + gcpMetadata != nil && gcpMetadata.instanceID != "" { + return createGKEContainerMonitoredResource(gcpMetadata) + } else if gcpMetadata != nil && gcpMetadata.instanceID != "" { + return createGCEInstanceMonitoredResource(gcpMetadata) + } else if awsIdentityDoc != nil { + return createAWSEC2InstanceMonitoredResource(awsIdentityDoc) + } + return nil +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go new file mode 100644 index 0000000000..782011cb6e --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go @@ -0,0 +1,143 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver" + +import ( + "fmt" + + "go.opencensus.io/resource" + "go.opencensus.io/resource/resourcekeys" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// Resource labels that are generally internal to the exporter. +// Consider exposing these labels and a type identifier in the future to allow +// for customization. +const ( + stackdriverLocation = "contrib.opencensus.io/exporter/stackdriver/location" + stackdriverProjectID = "contrib.opencensus.io/exporter/stackdriver/project_id" + stackdriverGenericTaskNamespace = "contrib.opencensus.io/exporter/stackdriver/generic_task/namespace" + stackdriverGenericTaskJob = "contrib.opencensus.io/exporter/stackdriver/generic_task/job" + stackdriverGenericTaskID = "contrib.opencensus.io/exporter/stackdriver/generic_task/task_id" +) + +// Mappings for the well-known OpenCensus resources to applicable Stackdriver resources. +var k8sContainerMap = map[string]string{ + "project_id": stackdriverProjectID, + "location": resourcekeys.CloudKeyZone, + "cluster_name": resourcekeys.K8SKeyClusterName, + "namespace_name": resourcekeys.K8SKeyNamespaceName, + "pod_name": resourcekeys.K8SKeyPodName, + "container_name": resourcekeys.ContainerKeyName, +} + +var k8sPodMap = map[string]string{ + "project_id": stackdriverProjectID, + "location": resourcekeys.CloudKeyZone, + "cluster_name": resourcekeys.K8SKeyClusterName, + "namespace_name": resourcekeys.K8SKeyNamespaceName, + "pod_name": resourcekeys.K8SKeyPodName, +} + +var k8sNodeMap = map[string]string{ + "project_id": stackdriverProjectID, + "location": resourcekeys.CloudKeyZone, + "cluster_name": resourcekeys.K8SKeyClusterName, + "node_name": resourcekeys.HostKeyName, +} + +var gcpResourceMap = map[string]string{ + "project_id": stackdriverProjectID, + "instance_id": resourcekeys.HostKeyID, + "zone": resourcekeys.CloudKeyZone, +} + +var awsResourceMap = map[string]string{ + "project_id": stackdriverProjectID, + "instance_id": resourcekeys.HostKeyID, + "region": resourcekeys.CloudKeyRegion, + "aws_account": resourcekeys.CloudKeyAccountID, +} + +// Generic task resource. +var genericResourceMap = map[string]string{ + "project_id": stackdriverProjectID, + "location": stackdriverLocation, + "namespace": stackdriverGenericTaskNamespace, + "job": stackdriverGenericTaskJob, + "task_id": stackdriverGenericTaskID, +} + +// returns transformed label map and true if all labels in match are found +// in input except optional project_id. It returns false if at least one label +// other than project_id is missing. +func transformResource(match, input map[string]string) (map[string]string, bool) { + output := make(map[string]string, len(input)) + for dst, src := range match { + v, ok := input[src] + if ok { + output[dst] = v + } else if dst != "project_id" { + return nil, true + } + } + return output, false +} + +func defaultMapResource(res *resource.Resource) *monitoredrespb.MonitoredResource { + match := genericResourceMap + result := &monitoredrespb.MonitoredResource{ + Type: "global", + } + if res == nil || res.Labels == nil { + return result + } + + switch { + case res.Type == resourcekeys.ContainerType: + result.Type = "k8s_container" + match = k8sContainerMap + case res.Type == resourcekeys.K8SType: + result.Type = "k8s_pod" + match = k8sPodMap + case res.Type == resourcekeys.HostType && res.Labels[resourcekeys.K8SKeyClusterName] != "": + result.Type = "k8s_node" + match = k8sNodeMap + case res.Labels[resourcekeys.CloudKeyProvider] == resourcekeys.CloudProviderGCP: + result.Type = "gce_instance" + match = gcpResourceMap + case res.Labels[resourcekeys.CloudKeyProvider] == resourcekeys.CloudProviderAWS: + result.Type = "aws_ec2_instance" + match = awsResourceMap + } + + var missing bool + result.Labels, missing = transformResource(match, res.Labels) + if missing { + result.Type = "global" + // if project id specified then transform it. + if v, ok := res.Labels[stackdriverProjectID]; ok { + result.Labels = make(map[string]string, 1) + result.Labels["project_id"] = v + } + return result + } + if result.Type == "aws_ec2_instance" { + if v, ok := result.Labels["region"]; ok { + result.Labels["region"] = fmt.Sprintf("aws:%s", v) + } + } + return result +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go new file mode 100644 index 0000000000..184bb1d435 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go new file mode 100644 index 0000000000..fafd06c282 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go @@ -0,0 +1,480 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdriver contains the OpenCensus exporters for +// Stackdriver Monitoring and Stackdriver Tracing. +// +// This exporter can be used to send metrics to Stackdriver Monitoring and traces +// to Stackdriver trace. +// +// The package uses Application Default Credentials to authenticate by default. +// See: https://developers.google.com/identity/protocols/application-default-credentials +// +// Alternatively, pass the authentication options in both the MonitoringClientOptions +// and the TraceClientOptions fields of Options. +// +// Stackdriver Monitoring +// +// This exporter support exporting OpenCensus views to Stackdriver Monitoring. +// Each registered view becomes a metric in Stackdriver Monitoring, with the +// tags becoming labels. +// +// The aggregation function determines the metric kind: LastValue aggregations +// generate Gauge metrics and all other aggregations generate Cumulative metrics. +// +// In order to be able to push your stats to Stackdriver Monitoring, you must: +// +// 1. Create a Cloud project: https://support.google.com/cloud/answer/6251787?hl=en +// 2. Enable billing: https://support.google.com/cloud/answer/6288653#new-billing +// 3. Enable the Stackdriver Monitoring API: https://console.cloud.google.com/apis/dashboard +// +// These steps enable the API but don't require that your app is hosted on Google Cloud Platform. +// +// Stackdriver Trace +// +// This exporter supports exporting Trace Spans to Stackdriver Trace. It also +// supports the Google "Cloud Trace" propagation format header. +package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver" + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "path" + "strings" + "time" + + metadataapi "cloud.google.com/go/compute/metadata" + traceapi "cloud.google.com/go/trace/apiv2" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/resource" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + + commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "go.opencensus.io/metric/metricdata" +) + +// Options contains options for configuring the exporter. +type Options struct { + // ProjectID is the identifier of the Stackdriver + // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials. + // + // It will be used in the project_id label of a Stackdriver monitored + // resource if the resource does not inherently belong to a specific + // project, e.g. on-premise resource like k8s_container or generic_task. + ProjectID string + + // Location is the identifier of the GCP or AWS cloud region/zone in which + // the data for a resource is stored. + // If not set, it will default to the location provided by the metadata server. + // + // It will be used in the location label of a Stackdriver monitored resource + // if the resource does not inherently belong to a specific project, e.g. + // on-premise resource like k8s_container or generic_task. + Location string + + // OnError is the hook to be called when there is + // an error uploading the stats or tracing data. + // If no custom hook is set, errors are logged. + // Optional. + OnError func(err error) + + // MonitoringClientOptions are additional options to be passed + // to the underlying Stackdriver Monitoring API client. + // Optional. + MonitoringClientOptions []option.ClientOption + + // TraceClientOptions are additional options to be passed + // to the underlying Stackdriver Trace API client. + // Optional. + TraceClientOptions []option.ClientOption + + // BundleDelayThreshold determines the max amount of time + // the exporter can wait before uploading view data or trace spans to + // the backend. + // Optional. + BundleDelayThreshold time.Duration + + // BundleCountThreshold determines how many view data events or trace spans + // can be buffered before batch uploading them to the backend. + // Optional. + BundleCountThreshold int + + // TraceSpansBufferMaxBytes is the maximum size (in bytes) of spans that + // will be buffered in memory before being dropped. + // + // If unset, a default of 8MB will be used. + TraceSpansBufferMaxBytes int + + // Resource sets the MonitoredResource against which all views will be + // recorded by this exporter. + // + // All Stackdriver metrics created by this exporter are custom metrics, + // so only a limited number of MonitoredResource types are supported, see: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource + // + // An important consideration when setting the Resource here is that + // Stackdriver Monitoring only allows a single writer per + // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series + // A TimeSeries is uniquely defined by the metric type name + // (constructed from the view name and the MetricPrefix), the Resource field, + // and the set of label key/value pairs (in OpenCensus terminology: tag). + // + // If no custom Resource is set, a default MonitoredResource + // with type global and no resource labels will be used. If you explicitly + // set this field, you may also want to set custom DefaultMonitoringLabels. + // + // Deprecated: Use MonitoredResource instead. + Resource *monitoredrespb.MonitoredResource + + // MonitoredResource sets the MonitoredResource against which all views will be + // recorded by this exporter. + // + // All Stackdriver metrics created by this exporter are custom metrics, + // so only a limited number of MonitoredResource types are supported, see: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource + // + // An important consideration when setting the MonitoredResource here is that + // Stackdriver Monitoring only allows a single writer per + // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series + // A TimeSeries is uniquely defined by the metric type name + // (constructed from the view name and the MetricPrefix), the MonitoredResource field, + // and the set of label key/value pairs (in OpenCensus terminology: tag). + // + // If no custom MonitoredResource is set AND if Resource is also not set then + // a default MonitoredResource with type global and no resource labels will be used. + // If you explicitly set this field, you may also want to set custom DefaultMonitoringLabels. + // + // This field replaces Resource field. If this is set then it will override the + // Resource field. + // Optional, but encouraged. + MonitoredResource monitoredresource.Interface + + // ResourceDetector provides a hook to discover arbitrary resource information. + // + // The translation function provided in MapResource must be able to conver the + // the resource information to a Stackdriver monitored resource. + // + // If this field is unset, resource type and tags will automatically be discovered through + // the OC_RESOURCE_TYPE and OC_RESOURCE_LABELS environment variables. + ResourceDetector resource.Detector + + // MapResource converts a OpenCensus resource to a Stackdriver monitored resource. + // + // If this field is unset, defaultMapResource will be used which encodes a set of default + // conversions from auto-detected resources to well-known Stackdriver monitored resources. + MapResource func(*resource.Resource) *monitoredrespb.MonitoredResource + + // MetricPrefix overrides the prefix of a Stackdriver metric names. + // Optional. If unset defaults to "custom.googleapis.com/opencensus/". + // If GetMetricPrefix is non-nil, this option is ignored. + MetricPrefix string + + // GetMetricDisplayName allows customizing the display name for the metric + // associated with the given view. By default it will be: + // MetricPrefix + view.Name + GetMetricDisplayName func(view *view.View) string + + // GetMetricType allows customizing the metric type for the given view. + // By default, it will be: + // "custom.googleapis.com/opencensus/" + view.Name + // + // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor + // Depreacted. Use GetMetricPrefix instead. + GetMetricType func(view *view.View) string + + // GetMetricPrefix allows customizing the metric prefix for the given metric name. + // If it is not set, MetricPrefix is used. If MetricPrefix is not set, it defaults to: + // "custom.googleapis.com/opencensus/" + // + // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor + GetMetricPrefix func(name string) string + + // DefaultTraceAttributes will be appended to every span that is exported to + // Stackdriver Trace. + DefaultTraceAttributes map[string]interface{} + + // DefaultMonitoringLabels are labels added to every metric created by this + // exporter in Stackdriver Monitoring. + // + // If unset, this defaults to a single label with key "opencensus_task" and + // value "go-@". This default ensures that the set of labels + // together with the default Resource (global) are unique to this + // process, as required by Stackdriver Monitoring. + // + // If you set DefaultMonitoringLabels, make sure that the Resource field + // together with these labels is unique to the + // current process. This is to ensure that there is only a single writer to + // each TimeSeries in Stackdriver. + // + // Set this to &Labels{} (a pointer to an empty Labels) to avoid getting the + // default "opencensus_task" label. You should only do this if you know that + // the Resource you set uniquely identifies this Go process. + DefaultMonitoringLabels *Labels + + // Context allows you to provide a custom context for API calls. + // + // This context will be used several times: first, to create Stackdriver + // trace and metric clients, and then every time a new batch of traces or + // stats needs to be uploaded. + // + // Do not set a timeout on this context. Instead, set the Timeout option. + // + // If unset, context.Background() will be used. + Context context.Context + + // SkipCMD enforces to skip all the CreateMetricDescriptor calls. + // These calls are important in order to configure the unit of the metrics, + // but in some cases all the exported metrics are builtin (unit is configured) + // or the unit is not important. + SkipCMD bool + + // Timeout for all API calls. If not set, defaults to 5 seconds. + Timeout time.Duration + + // ReportingInterval sets the interval between reporting metrics. + // If it is set to zero then default value is used. + ReportingInterval time.Duration + + // NumberOfWorkers sets the number of go rountines that send requests + // to Stackdriver Monitoring. This is only used for Proto metrics export + // for now. The minimum number of workers is 1. + NumberOfWorkers int + + // ResourceByDescriptor may be provided to supply monitored resource dynamically + // based on the metric Descriptor. Most users will not need to set this, + // but should instead set ResourceDetector. + // + // The MonitoredResource and ResourceDetector fields are ignored if this + // field is set to a non-nil value. + // + // The ResourceByDescriptor is called to derive monitored resources from + // metric.Descriptor and the label map associated with the time-series. + // If any label is used for the derived resource then it will be removed + // from the label map. The remaining labels in the map are returned to + // be used with the time-series. + // + // If the func set to this field does not return valid resource even for one + // time-series then it will result into an error for the entire CreateTimeSeries request + // which may contain more than one time-series. + ResourceByDescriptor func(*metricdata.Descriptor, map[string]string) (map[string]string, monitoredresource.Interface) +} + +const defaultTimeout = 5 * time.Second + +var defaultDomain = path.Join("custom.googleapis.com", "opencensus") + +// Exporter is a stats and trace exporter that uploads data to Stackdriver. +// +// You can create a single Exporter and register it as both a trace exporter +// (to export to Stackdriver Trace) and a stats exporter (to integrate with +// Stackdriver Monitoring). +type Exporter struct { + traceExporter *traceExporter + statsExporter *statsExporter +} + +// NewExporter creates a new Exporter that implements both stats.Exporter and +// trace.Exporter. +func NewExporter(o Options) (*Exporter, error) { + if o.ProjectID == "" { + ctx := o.Context + if ctx == nil { + ctx = context.Background() + } + creds, err := google.FindDefaultCredentials(ctx, traceapi.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("stackdriver: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("stackdriver: no project found with application default credentials") + } + o.ProjectID = creds.ProjectID + } + if o.Location == "" { + if metadataapi.OnGCE() { + zone, err := metadataapi.Zone() + if err != nil { + // This error should be logged with a warning level. + err = fmt.Errorf("setting Stackdriver default location failed: %s", err) + if o.OnError != nil { + o.OnError(err) + } else { + log.Print(err) + } + } else { + o.Location = zone + } + } + } + + if o.MonitoredResource != nil { + o.Resource = convertMonitoredResourceToPB(o.MonitoredResource) + } + if o.MapResource == nil { + o.MapResource = defaultMapResource + } + if o.ResourceDetector != nil { + // For backwards-compatibility we still respect the deprecated resource field. + if o.Resource != nil { + return nil, errors.New("stackdriver: ResourceDetector must not be used in combination with deprecated resource fields") + } + res, err := o.ResourceDetector(o.Context) + if err != nil { + return nil, fmt.Errorf("stackdriver: detect resource: %s", err) + } + // Populate internal resource labels for defaulting project_id, location, and + // generic resource labels of applicable monitored resources. + res.Labels[stackdriverProjectID] = o.ProjectID + res.Labels[stackdriverLocation] = o.Location + res.Labels[stackdriverGenericTaskNamespace] = "default" + res.Labels[stackdriverGenericTaskJob] = path.Base(os.Args[0]) + res.Labels[stackdriverGenericTaskID] = getTaskValue() + + o.Resource = o.MapResource(res) + } + if o.MetricPrefix != "" && !strings.HasSuffix(o.MetricPrefix, "/") { + o.MetricPrefix = o.MetricPrefix + "/" + } + + se, err := newStatsExporter(o) + if err != nil { + return nil, err + } + te, err := newTraceExporter(o) + if err != nil { + return nil, err + } + return &Exporter{ + statsExporter: se, + traceExporter: te, + }, nil +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +// Deprecated: use ExportMetrics and StartMetricsExporter instead. +func (e *Exporter) ExportView(vd *view.Data) { + e.statsExporter.ExportView(vd) +} + +// ExportMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring synchronously, +// without de-duping or adding proto metrics to the bundler. +func (e *Exporter) ExportMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) error { + _, err := e.statsExporter.PushMetricsProto(ctx, node, rsc, metrics) + return err +} + +// PushMetricsProto simliar with ExportMetricsProto but returns the number of dropped timeseries. +func (e *Exporter) PushMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) (int, error) { + return e.statsExporter.PushMetricsProto(ctx, node, rsc, metrics) +} + +// ExportMetrics exports OpenCensus Metrics to Stackdriver Monitoring +func (e *Exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + return e.statsExporter.ExportMetrics(ctx, metrics) +} + +// StartMetricsExporter starts exporter by creating an interval reader that reads metrics +// from all registered producers at set interval and exports them. +// Use StopMetricsExporter to stop exporting metrics. +// Previously, it required registering exporter to export stats collected by opencensus. +// exporter := stackdriver.NewExporter(stackdriver.Option{}) +// view.RegisterExporter(exporter) +// Now, it requires to call StartMetricsExporter() to export stats and metrics collected by opencensus. +// exporter := stackdriver.NewExporter(stackdriver.Option{}) +// exporter.StartMetricsExporter() +// defer exporter.StopMetricsExporter() +// +// Both approach should not be used simultaenously. Otherwise it may result into unknown behavior. +// Previous approach continues to work as before but will not report newly define metrics such +// as gauges. +func (e *Exporter) StartMetricsExporter() error { + return e.statsExporter.startMetricsReader() +} + +// StopMetricsExporter stops exporter from exporting metrics. +func (e *Exporter) StopMetricsExporter() { + e.statsExporter.stopMetricsReader() +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *Exporter) ExportSpan(sd *trace.SpanData) { + if len(e.traceExporter.o.DefaultTraceAttributes) > 0 { + sd = e.sdWithDefaultTraceAttributes(sd) + } + e.traceExporter.ExportSpan(sd) +} + +func (e *Exporter) sdWithDefaultTraceAttributes(sd *trace.SpanData) *trace.SpanData { + newSD := *sd + newSD.Attributes = make(map[string]interface{}) + for k, v := range e.traceExporter.o.DefaultTraceAttributes { + newSD.Attributes[k] = v + } + for k, v := range sd.Attributes { + newSD.Attributes[k] = v + } + return &newSD +} + +// Flush waits for exported data to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose recent stats or spans. +func (e *Exporter) Flush() { + e.statsExporter.Flush() + e.traceExporter.Flush() +} + +func (o Options) handleError(err error) { + if o.OnError != nil { + o.OnError(err) + return + } + log.Printf("Failed to export to Stackdriver: %v", err) +} + +func newContextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, func()) { + if ctx == nil { + ctx = context.Background() + } + if timeout <= 0 { + timeout = defaultTimeout + } + return context.WithTimeout(ctx, timeout) +} + +// convertMonitoredResourceToPB converts MonitoredResource data in to +// protocol buffer. +func convertMonitoredResourceToPB(mr monitoredresource.Interface) *monitoredrespb.MonitoredResource { + mrpb := new(monitoredrespb.MonitoredResource) + var labels map[string]string + mrpb.Type, labels = mr.MonitoredResource() + mrpb.Labels = make(map[string]string) + for k, v := range labels { + mrpb.Labels[k] = v + } + return mrpb +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go new file mode 100644 index 0000000000..e0a02ca9a3 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go @@ -0,0 +1,628 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + "sync" + "time" + + opencensus "go.opencensus.io" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + monitoring "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/ptypes/timestamp" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + "google.golang.org/genproto/googleapis/api/metric" + googlemetricpb "google.golang.org/genproto/googleapis/api/metric" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const ( + maxTimeSeriesPerUpload = 200 + opencensusTaskKey = "opencensus_task" + opencensusTaskDescription = "Opencensus task identifier" + defaultDisplayNamePrefix = "OpenCensus" + version = "0.10.0" +) + +var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version) + +// statsExporter exports stats to the Stackdriver Monitoring. +type statsExporter struct { + o Options + + viewDataBundler *bundler.Bundler + metricsBundler *bundler.Bundler + + protoMu sync.Mutex + protoMetricDescriptors map[string]bool // Metric descriptors that were already created remotely + + metricMu sync.Mutex + metricDescriptors map[string]bool // Metric descriptors that were already created remotely + + c *monitoring.MetricClient + defaultLabels map[string]labelValue + ir *metricexport.IntervalReader + + initReaderOnce sync.Once +} + +var ( + errBlankProjectID = errors.New("expecting a non-blank ProjectID") +) + +// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. +// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent +// invocations of NewExporter with the same ProjectID will return an error. +func newStatsExporter(o Options) (*statsExporter, error) { + if strings.TrimSpace(o.ProjectID) == "" { + return nil, errBlankProjectID + } + + opts := append(o.MonitoringClientOptions, option.WithUserAgent(userAgent)) + ctx := o.Context + if ctx == nil { + ctx = context.Background() + } + client, err := monitoring.NewMetricClient(ctx, opts...) + if err != nil { + return nil, err + } + e := &statsExporter{ + c: client, + o: o, + protoMetricDescriptors: make(map[string]bool), + metricDescriptors: make(map[string]bool), + } + + var defaultLablesNotSanitized map[string]labelValue + if o.DefaultMonitoringLabels != nil { + defaultLablesNotSanitized = o.DefaultMonitoringLabels.m + } else { + defaultLablesNotSanitized = map[string]labelValue{ + opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription}, + } + } + + e.defaultLabels = make(map[string]labelValue) + // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched. + for key, label := range defaultLablesNotSanitized { + e.defaultLabels[sanitize(key)] = label + } + + e.viewDataBundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { + vds := bundle.([]*view.Data) + e.handleUpload(vds...) + }) + e.metricsBundler = bundler.NewBundler((*metricdata.Metric)(nil), func(bundle interface{}) { + metrics := bundle.([]*metricdata.Metric) + e.handleMetricsUpload(metrics) + }) + if delayThreshold := e.o.BundleDelayThreshold; delayThreshold > 0 { + e.viewDataBundler.DelayThreshold = delayThreshold + e.metricsBundler.DelayThreshold = delayThreshold + } + if countThreshold := e.o.BundleCountThreshold; countThreshold > 0 { + e.viewDataBundler.BundleCountThreshold = countThreshold + e.metricsBundler.BundleCountThreshold = countThreshold + } + return e, nil +} + +func (e *statsExporter) startMetricsReader() error { + e.initReaderOnce.Do(func() { + e.ir, _ = metricexport.NewIntervalReader(metricexport.NewReader(), e) + }) + e.ir.ReportingInterval = e.o.ReportingInterval + return e.ir.Start() +} + +func (e *statsExporter) stopMetricsReader() { + if e.ir != nil { + e.ir.Stop() + } +} + +func (e *statsExporter) getMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, *monitoredrespb.MonitoredResource) { + resource := e.o.Resource + if resource == nil { + resource = &monitoredrespb.MonitoredResource{ + Type: "global", + } + } + return tags, resource +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *statsExporter) ExportView(vd *view.Data) { + if len(vd.Rows) == 0 { + return + } + err := e.viewDataBundler.Add(vd, 1) + switch err { + case nil: + return + case bundler.ErrOverflow: + e.o.handleError(errors.New("failed to upload: buffer full")) + default: + e.o.handleError(err) + } +} + +// getTaskValue returns a task label value in the format of +// "go-@". +func getTaskValue() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "localhost" + } + return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname +} + +// handleUpload handles uploading a slice +// of Data, as well as error handling. +func (e *statsExporter) handleUpload(vds ...*view.Data) { + if err := e.uploadStats(vds); err != nil { + e.o.handleError(err) + } +} + +// Flush waits for exported view data and metrics to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose data that hasn't yet been exported. +func (e *statsExporter) Flush() { + e.viewDataBundler.Flush() + e.metricsBundler.Flush() +} + +func (e *statsExporter) uploadStats(vds []*view.Data) error { + ctx, cancel := newContextWithTimeout(e.o.Context, e.o.Timeout) + defer cancel() + ctx, span := trace.StartSpan( + ctx, + "contrib.go.opencensus.io/exporter/stackdriver.uploadStats", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + + for _, vd := range vds { + if err := e.createMetricDescriptorFromView(ctx, vd.View); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + return err + } + } + for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { + if err := createTimeSeries(ctx, e.c, req); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + // TODO(jbd): Don't fail fast here, batch errors? + return err + } + } + return nil +} + +func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { + var reqs []*monitoringpb.CreateTimeSeriesRequest + + var allTimeSeries []*monitoringpb.TimeSeries + for _, vd := range vds { + for _, row := range vd.Rows { + tags, resource := e.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...)) + ts := &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: e.metricType(vd.View), + Labels: newLabels(e.defaultLabels, tags), + }, + Resource: resource, + Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, + } + allTimeSeries = append(allTimeSeries, ts) + } + } + + var timeSeries []*monitoringpb.TimeSeries + for _, ts := range allTimeSeries { + timeSeries = append(timeSeries, ts) + if len(timeSeries) == limit { + ctsreql := e.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries) + reqs = append(reqs, ctsreql...) + timeSeries = timeSeries[:0] + } + } + + if len(timeSeries) > 0 { + ctsreql := e.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries) + reqs = append(reqs, ctsreql...) + } + return reqs +} + +func (e *statsExporter) viewToMetricDescriptor(ctx context.Context, v *view.View) (*metricpb.MetricDescriptor, error) { + m := v.Measure + agg := v.Aggregation + viewName := v.Name + + metricType := e.metricType(v) + var valueType metricpb.MetricDescriptor_ValueType + unit := m.Unit() + // Default metric Kind + metricKind := metricpb.MetricDescriptor_CUMULATIVE + + switch agg.Type { + case view.AggTypeCount: + valueType = metricpb.MetricDescriptor_INT64 + // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", + // because this view does not apply to the recorded values. + unit = stats.UnitDimensionless + case view.AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + case view.AggTypeDistribution: + valueType = metricpb.MetricDescriptor_DISTRIBUTION + case view.AggTypeLastValue: + metricKind = metricpb.MetricDescriptor_GAUGE + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + default: + return nil, fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) + } + + var displayName string + if e.o.GetMetricDisplayName == nil { + displayName = e.displayName(viewName) + } else { + displayName = e.o.GetMetricDisplayName(v) + } + + res := &metricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), + DisplayName: displayName, + Description: v.Description, + Unit: unit, + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys), + } + return res, nil +} + +// createMetricDescriptorFromView creates a MetricDescriptor for the given view data in Stackdriver Monitoring. +// An error will be returned if there is already a metric descriptor created with the same name +// but it has a different aggregation or keys. +func (e *statsExporter) createMetricDescriptorFromView(ctx context.Context, v *view.View) error { + // Skip create metric descriptor if configured + if e.o.SkipCMD { + return nil + } + + e.metricMu.Lock() + defer e.metricMu.Unlock() + + viewName := v.Name + + if _, created := e.metricDescriptors[viewName]; created { + return nil + } + + if builtinMetric(e.metricType(v)) { + e.metricDescriptors[viewName] = true + return nil + } + + inMD, err := e.viewToMetricDescriptor(ctx, v) + if err != nil { + return err + } + + if err = e.createMetricDescriptor(ctx, inMD); err != nil { + return err + } + + // Now cache the metric descriptor + e.metricDescriptors[viewName] = true + return nil +} + +func (e *statsExporter) displayName(suffix string) string { + return path.Join(defaultDisplayNamePrefix, suffix) +} + +func (e *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) { + if len(ts) == 0 { + return nil + } + + // Since there are scenarios in which Metrics with the same Type + // can be bunched in the same TimeSeries, we have to ensure that + // we create a unique CreateTimeSeriesRequest with entirely unique Metrics + // per TimeSeries, lest we'll encounter: + // + // err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written: + // Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered. + // Only one point can be written per TimeSeries per request.: timeSeries[2] + // + // This scenario happens when we are using the OpenCensus Agent in which multiple metrics + // are streamed by various client applications. + // See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73 + uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts)) + nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts)) + seenMetrics := make(map[string]struct{}) + + for _, tti := range ts { + key := metricSignature(tti.Metric) + if _, alreadySeen := seenMetrics[key]; !alreadySeen { + uniqueTimeSeries = append(uniqueTimeSeries, tti) + seenMetrics[key] = struct{}{} + } else { + nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti) + } + } + + // UniqueTimeSeries can be bunched up together + // While for each nonUniqueTimeSeries, we have + // to make a unique CreateTimeSeriesRequest. + ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{ + Name: fmt.Sprintf("projects/%s", e.o.ProjectID), + TimeSeries: uniqueTimeSeries, + }) + + // Now recursively also combine the non-unique TimeSeries + // that were singly added to nonUniqueTimeSeries. + // The reason is that we need optimal combinations + // for optimal combinations because: + // * "a/b/c" + // * "a/b/c" + // * "x/y/z" + // * "a/b/c" + // * "x/y/z" + // * "p/y/z" + // * "d/y/z" + // + // should produce: + // CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"] + // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"] + // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"] + nonUniqueRequests := e.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries) + ctsreql = append(ctsreql, nonUniqueRequests...) + + return ctsreql +} + +// metricSignature creates a unique signature consisting of a +// metric's type and its lexicographically sorted label values +// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/120 +func metricSignature(metric *googlemetricpb.Metric) string { + labels := metric.GetLabels() + labelValues := make([]string, 0, len(labels)) + + for _, labelValue := range labels { + labelValues = append(labelValues, labelValue) + } + sort.Strings(labelValues) + return fmt.Sprintf("%s:%s", metric.GetType(), strings.Join(labelValues, ",")) +} + +func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + switch v.Aggregation.Type { + case view.AggTypeLastValue: + return newGaugePoint(v, row, end) + default: + return newCumulativePoint(v, row, start, end) + } +} + +func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: newTypedValue(v, row), + } +} + +func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point { + gaugeTime := ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + } + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: gaugeTime, + }, + Value: newTypedValue(v, row), + } +} + +func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { + switch v := r.Data.(type) { + case *view.CountData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v.Value, + }} + case *view.SumData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + case *view.DistributionData: + insertZeroBound := shouldInsertZeroBound(vd.Aggregation.Buckets...) + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: v.Count, + Mean: v.Mean, + SumOfSquaredDeviation: v.SumOfSquaredDev, + // TODO(songya): uncomment this once Stackdriver supports min/max. + // Range: &distributionpb.Distribution_Range{ + // Min: v.Min, + // Max: v.Max, + // }, + BucketOptions: &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + Bounds: addZeroBoundOnCondition(insertZeroBound, vd.Aggregation.Buckets...), + }, + }, + }, + BucketCounts: addZeroBucketCountOnCondition(insertZeroBound, v.CountPerBucket...), + }, + }} + case *view.LastValueData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + } + return nil +} + +func shouldInsertZeroBound(bounds ...float64) bool { + if len(bounds) > 0 && bounds[0] != 0.0 { + return true + } + return false +} + +func addZeroBucketCountOnCondition(insert bool, counts ...int64) []int64 { + if insert { + return append([]int64{0}, counts...) + } + return counts +} + +func addZeroBoundOnCondition(insert bool, bounds ...float64) []float64 { + if insert { + return append([]float64{0.0}, bounds...) + } + return bounds +} + +func (e *statsExporter) metricType(v *view.View) string { + if formatter := e.o.GetMetricType; formatter != nil { + return formatter(v) + } + return path.Join("custom.googleapis.com", "opencensus", v.Name) +} + +func newLabels(defaults map[string]labelValue, tags []tag.Tag) map[string]string { + labels := make(map[string]string) + for k, lbl := range defaults { + labels[sanitize(k)] = lbl.val + } + for _, tag := range tags { + labels[sanitize(tag.Key.Name())] = tag.Value + } + return labels +} + +func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(keys)+len(defaults)) + for key, lbl := range defaults { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key), + Description: lbl.desc, + ValueType: labelpb.LabelDescriptor_STRING, + }) + } + for _, key := range keys { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key.Name()), + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + }) + } + return labelDescriptors +} + +func (e *statsExporter) createMetricDescriptor(ctx context.Context, md *metric.MetricDescriptor) error { + ctx, cancel := newContextWithTimeout(ctx, e.o.Timeout) + defer cancel() + cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", e.o.ProjectID), + MetricDescriptor: md, + } + _, err := createMetricDescriptor(ctx, e.c, cmrdesc) + return err +} + +var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.CreateMetricDescriptor(ctx, mdr) +} + +var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error { + return c.CreateTimeSeries(ctx, ts) +} + +var knownExternalMetricPrefixes = []string{ + "custom.googleapis.com/", + "external.googleapis.com/", +} + +// builtinMetric returns true if a MetricType is a heuristically known +// built-in Stackdriver metric +func builtinMetric(metricType string) bool { + for _, knownExternalMetric := range knownExternalMetricPrefixes { + if strings.HasPrefix(metricType, knownExternalMetric) { + return false + } + } + return true +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go new file mode 100644 index 0000000000..ee6535eefc --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go @@ -0,0 +1,178 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + tracingclient "cloud.google.com/go/trace/apiv2" + "github.com/golang/protobuf/proto" + "go.opencensus.io/trace" + "google.golang.org/api/support/bundler" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +// traceExporter is an implementation of trace.Exporter that uploads spans to +// Stackdriver. +// +type traceExporter struct { + o Options + projectID string + bundler *bundler.Bundler + // uploadFn defaults to uploadSpans; it can be replaced for tests. + uploadFn func(spans []*tracepb.Span) + overflowLogger + client *tracingclient.Client +} + +var _ trace.Exporter = (*traceExporter)(nil) + +func newTraceExporter(o Options) (*traceExporter, error) { + ctx := o.Context + if ctx == nil { + ctx = context.Background() + } + client, err := tracingclient.NewClient(ctx, o.TraceClientOptions...) + if err != nil { + return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) + } + return newTraceExporterWithClient(o, client), nil +} + +const defaultBufferedByteLimit = 8 * 1024 * 1024 + +func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { + e := &traceExporter{ + projectID: o.ProjectID, + client: c, + o: o, + } + b := bundler.NewBundler((*tracepb.Span)(nil), func(bundle interface{}) { + e.uploadFn(bundle.([]*tracepb.Span)) + }) + if o.BundleDelayThreshold > 0 { + b.DelayThreshold = o.BundleDelayThreshold + } else { + b.DelayThreshold = 2 * time.Second + } + if o.BundleCountThreshold > 0 { + b.BundleCountThreshold = o.BundleCountThreshold + } else { + b.BundleCountThreshold = 50 + } + // The measured "bytes" are not really bytes, see exportReceiver. + b.BundleByteThreshold = b.BundleCountThreshold * 200 + b.BundleByteLimit = b.BundleCountThreshold * 1000 + if o.TraceSpansBufferMaxBytes > 0 { + b.BufferedByteLimit = o.TraceSpansBufferMaxBytes + } else { + b.BufferedByteLimit = defaultBufferedByteLimit + } + + e.bundler = b + e.uploadFn = e.uploadSpans + return e +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *traceExporter) ExportSpan(s *trace.SpanData) { + protoSpan := protoFromSpanData(s, e.projectID, e.o.Resource) + protoSize := proto.Size(protoSpan) + err := e.bundler.Add(protoSpan, protoSize) + switch err { + case nil: + return + case bundler.ErrOversizedItem: + case bundler.ErrOverflow: + e.overflowLogger.log() + default: + e.o.handleError(err) + } +} + +// Flush waits for exported trace spans to be uploaded. +// +// This is useful if your program is ending and you do not want to lose recent +// spans. +func (e *traceExporter) Flush() { + e.bundler.Flush() +} + +// uploadSpans uploads a set of spans to Stackdriver. +func (e *traceExporter) uploadSpans(spans []*tracepb.Span) { + req := tracepb.BatchWriteSpansRequest{ + Name: "projects/" + e.projectID, + Spans: spans, + } + // Create a never-sampled span to prevent traces associated with exporter. + ctx, cancel := newContextWithTimeout(e.o.Context, e.o.Timeout) + defer cancel() + ctx, span := trace.StartSpan( + ctx, + "contrib.go.opencensus.io/exporter/stackdriver.uploadSpans", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) + + err := e.client.BatchWriteSpans(ctx, &req) + if err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + e.o.handleError(err) + } +} + +// overflowLogger ensures that at most one overflow error log message is +// written every 5 seconds. +type overflowLogger struct { + mu sync.Mutex + pause bool + accum int +} + +func (o *overflowLogger) delay() { + o.pause = true + time.AfterFunc(5*time.Second, func() { + o.mu.Lock() + defer o.mu.Unlock() + switch { + case o.accum == 0: + o.pause = false + case o.accum == 1: + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.accum = 0 + o.delay() + default: + log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) + o.accum = 0 + o.delay() + } + }) +} + +func (o *overflowLogger) log() { + o.mu.Lock() + defer o.mu.Unlock() + if !o.pause { + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.delay() + } else { + o.accum++ + } +} diff --git a/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go new file mode 100644 index 0000000000..422a980268 --- /dev/null +++ b/test/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go @@ -0,0 +1,291 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "fmt" + "math" + "strconv" + "time" + "unicode/utf8" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 + maxAttributeStringValue = 256 + agentLabel = "g.co/agent" + + labelHTTPHost = `/http/host` + labelHTTPMethod = `/http/method` + labelHTTPStatusCode = `/http/status_code` + labelHTTPPath = `/http/path` + labelHTTPUserAgent = `/http/user_agent` +) + +// proto returns a protocol buffer representation of a SpanData. +func protoFromSpanData(s *trace.SpanData, projectID string, mr *monitoredrespb.MonitoredResource) *tracepb.Span { + if s == nil { + return nil + } + + traceIDString := s.SpanContext.TraceID.String() + spanIDString := s.SpanContext.SpanID.String() + + name := s.Name + switch s.SpanKind { + case trace.SpanKindClient: + name = "Sent." + name + case trace.SpanKindServer: + name = "Recv." + name + } + + sp := &tracepb.Span{ + Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, + SpanId: spanIDString, + DisplayName: trunc(name, 128), + StartTime: timestampProto(s.StartTime), + EndTime: timestampProto(s.EndTime), + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, + } + if p := s.ParentSpanID; p != (trace.SpanID{}) { + sp.ParentSpanId = p.String() + } + if s.Status.Code != 0 || s.Status.Message != "" { + sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} + } + + var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int + copyAttributes(&sp.Attributes, s.Attributes) + + // Copy MonitoredResources as span Attributes + sp.Attributes = copyMonitoredResourceAttributes(sp.Attributes, mr) + + as := s.Annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} + copyAttributes(&annotation.Attributes, a.Attributes) + event := &tracepb.Span_TimeEvent{ + Time: timestampProto(a.Time), + Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, + } + annotations++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) + } + + if sp.Attributes == nil { + sp.Attributes = &tracepb.Span_Attributes{ + AttributeMap: make(map[string]*tracepb.AttributeValue), + } + } + + // Only set the agent label if it is not already set. That enables the + // OpenCensus agent/collector to set the agent label based on the library that + // sent the span to the agent. + if _, hasAgent := sp.Attributes.AttributeMap[agentLabel]; !hasAgent { + sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: trunc(userAgent, maxAttributeStringValue), + }, + } + } + + es := s.MessageEvents + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ + Time: timestampProto(e.Time), + Value: &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: e.MessageID, + UncompressedSizeBytes: e.UncompressedByteSize, + CompressedSizeBytes: e.CompressedByteSize, + }, + }, + }) + } + + if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + } + + if len(s.Links) > 0 { + sp.Links = &tracepb.Span_Links{} + sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) + for _, l := range s.Links { + link := &tracepb.Span_Link{ + TraceId: l.TraceID.String(), + SpanId: l.SpanID.String(), + Type: tracepb.Span_Link_Type(l.Type), + } + copyAttributes(&link.Attributes, l.Attributes) + sp.Links.Link = append(sp.Links.Link, link) + } + } + return sp +} + +// timestampProto creates a timestamp proto for a time.Time. +func timestampProto(t time.Time) *timestamppb.Timestamp { + return ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// copyMonitoredResourceAttributes copies proto monitoredResource to proto map field (Span_Attributes) +// it creates the map if it is nil. +func copyMonitoredResourceAttributes(out *tracepb.Span_Attributes, mr *monitoredrespb.MonitoredResource) *tracepb.Span_Attributes { + if mr == nil { + return out + } + if out == nil { + out = &tracepb.Span_Attributes{} + } + if out.AttributeMap == nil { + out.AttributeMap = make(map[string]*tracepb.AttributeValue) + } + for k, v := range mr.Labels { + av := attributeValue(v) + out.AttributeMap[fmt.Sprintf("g.co/r/%s/%s", mr.Type, k)] = av + } + return out +} + +// copyAttributes copies a map of attributes to a proto map field. +// It creates the map if it is nil. +func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { + if len(in) == 0 { + return + } + if *out == nil { + *out = &tracepb.Span_Attributes{} + } + if (*out).AttributeMap == nil { + (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) + } + var dropped int32 + for key, value := range in { + av := attributeValue(value) + if av == nil { + continue + } + switch key { + case ochttp.PathAttribute: + (*out).AttributeMap[labelHTTPPath] = av + case ochttp.HostAttribute: + (*out).AttributeMap[labelHTTPHost] = av + case ochttp.MethodAttribute: + (*out).AttributeMap[labelHTTPMethod] = av + case ochttp.UserAgentAttribute: + (*out).AttributeMap[labelHTTPUserAgent] = av + case ochttp.StatusCodeAttribute: + (*out).AttributeMap[labelHTTPStatusCode] = av + default: + if len(key) > 128 { + dropped++ + continue + } + (*out).AttributeMap[key] = av + } + } + (*out).DroppedAttributesCount = dropped +} + +func attributeValue(v interface{}) *tracepb.AttributeValue { + switch value := v.(type) { + case bool: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, + } + case int64: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_IntValue{IntValue: value}, + } + case float64: + // TODO: set double value if Stackdriver Trace support it in the future. + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: trunc(strconv.FormatFloat(value, 'f', -1, 64), + maxAttributeStringValue)}, + } + case string: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, + } + } + return nil +} + +// trunc returns a TruncatableString truncated to the given limit. +func trunc(s string, limit int) *tracepb.TruncatableString { + if len(s) > limit { + b := []byte(s[:limit]) + for { + r, size := utf8.DecodeLastRune(b) + if r == utf8.RuneError && size == 1 { + b = b[:len(b)-1] + } else { + break + } + } + return &tracepb.TruncatableString{ + Value: string(b), + TruncatedByteCount: clip32(len(s) - len(b)), + } + } + return &tracepb.TruncatableString{ + Value: s, + TruncatedByteCount: 0, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/test/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/test/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000000..899129ecc4 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000000..99849c0e19 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000000..9cf7eaf400 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000000..1a3d106d5c --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000000..142a7a01c5 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000000..a4eb6a7f43 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000000..710eb432f8 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000000..645df2450f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/test/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000000..03334d6920 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,97 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/test/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000000..9f6af19dd4 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/test/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 0000000000..8958c32d4e --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,194 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/test/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000000..0c48f72e08 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,14 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/test/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 0000000000..881d575f01 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/config.go b/test/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000000..2def23fa1d --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,586 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/test/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 0000000000..2866f9a7fb --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/test/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 0000000000..3718b26e10 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 0000000000..2f9446333a --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 0000000000..9c29f29af1 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/test/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 0000000000..304fd15612 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/test/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000000..4e076c1837 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000000..aa902d7083 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,230 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000000..7d50b1557c --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 0000000000..ab69c7a6f3 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000000..3ad1e798df --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 0000000000..5852b26487 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 0000000000..388b215418 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 0000000000..8152a864ad --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 0000000000..4356edb3d5 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000000..c75d7bba03 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,310 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds atomic.Value + sf singleflight.Group + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + c.creds.Store(Value{}) + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", c.singleRetrieve) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve() (interface{}, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + creds, err := c.provider.Retrieve() + if err == nil { + c.creds.Store(creds) + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.creds.Store(Value{}) +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + return c.isExpired(c.creds.Load()) +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), + nil) + } + if c.creds.Load().(Value) == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000000..43d4ed386a --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,180 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 0000000000..1a7af53a4d --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,203 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000000..54c5cf7333 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 0000000000..e624836002 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000000..e155149581 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000000..531139e397 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,55 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000000..9f37f44bcf --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,321 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + TransitiveTagKeys: p.TransitiveTagKeys, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 0000000000..b20b633948 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,100 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFilePath string + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFilePath: path, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + b, err := ioutil.ReadFile(p.tokenFilePath) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 0000000000..25a66d1dda --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 0000000000..4b19e2800e --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 0000000000..5bacc791a1 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 0000000000..82a3e345e9 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 0000000000..54a99280ce --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 0000000000..835bcd49cb --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/test/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000000..23bb639e01 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/test/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 0000000000..ca0ee1dcc7 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/test/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 0000000000..4fcb616184 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000000..12897eef62 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,199 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocument() + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000000..b8b2940d74 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,228 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 0000000000..663372a915 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,92 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 0000000000..343a2106f8 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,216 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 0000000000..295bb4de47 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,6435 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "access-analyzer": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips": endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "eu-north-1": endpoint{}, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "me-south-1": endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "portal.sso": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "me-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 0000000000..ca8fc828e1 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 0000000000..84316b92c0 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 0000000000..ca956e5f12 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,564 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 0000000000..df75e899ad --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 0000000000..eb2ac83c99 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,341 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { + region = "aws-global" + } + } + + e, hasEndpoint := s.endpointForRegion(region) + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + region = signingRegion + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 0000000000..0fdfcc56e0 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/test/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000000..fa06f7a8f8 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/test/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 0000000000..91a6f277a7 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/test/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000000..6ed15b2ecc --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 0000000000..d9b37f4d32 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,18 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000000..e819ab6c0e --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,343 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000000..79f79602b0 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000000..9370fa50c3 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000000..d597c6ead5 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,698 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 0000000000..e36e468b7c --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 0000000000..de1292f45a --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,36 @@ +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 0000000000..a7365cd1e4 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 0000000000..307fa0705b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000000..64784e16f3 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000000..752ae47f84 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 0000000000..09a44eb987 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000000..8630683f31 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/test/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 0000000000..4601f883cc --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 0000000000..ea9ebb6f6a --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 0000000000..fec39dfc12 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 0000000000..1c5a5391e6 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 0000000000..cc64e24f1d --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,259 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 0000000000..7ec66e7e58 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,245 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 0000000000..c1e0e9c954 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,345 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000000..0ff4996051 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,734 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/test/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 0000000000..a8ed880760 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,547 @@ +package session + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000000..07ea799fbd --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 0000000000..6aa2ed241b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 0000000000..f35fc860b3 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 0000000000..fed5c859ca --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 0000000000..02cbd97e23 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 0000000000..bd082e9d1f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000000..d71f7b3f4f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,846 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/types.go b/test/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000000..d542ef01bc --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,241 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/url.go b/test/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 0000000000..6192b2455b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/test/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 0000000000..0210d2720e --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/aws/version.go b/test/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000000..261815c0ad --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.29.4" diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/test/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 0000000000..876dcb3fde --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 0000000000..e83a99886b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 0000000000..0895d53cbe --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 0000000000..0b76999ba1 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 0000000000..25ce0fe134 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 0000000000..04345a54c2 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 0000000000..91ba2a59dd --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 0000000000..8d462f77e2 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 0000000000..3b0ca7afe3 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 0000000000..582c024ad1 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 0000000000..cf9fad81e7 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,356 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 0000000000..24df543d38 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 0000000000..e52ac399f1 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 0000000000..a45c0bc566 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 0000000000..8a84c7cbe0 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 0000000000..4572870193 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 0000000000..7f01cf7c70 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 0000000000..f82095ba25 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 0000000000..da7a4049cf --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 0000000000..18f3fe8931 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 0000000000..305999d29b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 0000000000..94841c3244 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 0000000000..99915f7f77 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 0000000000..7ffb4ae06f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 0000000000..6c443988bb --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 0000000000..5aa9137e0f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 0000000000..e5f005613b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 0000000000..44898eed0f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 0000000000..810ec7f08b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 0000000000..0c9802d877 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 0000000000..f4651da2da --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 0000000000..b1d93a33d4 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 0000000000..38ea61afea --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 0000000000..7da8a49ce5 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 0000000000..ebcbc2b40a --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/test/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 0000000000..d008ae27cb --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..14ad0c5891 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 0000000000..d7d42db0a6 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 0000000000..915b0fcafd --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 0000000000..53831dff98 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 0000000000..864fb6704b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 0000000000..5e9499699b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,282 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 0000000000..776d110184 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 0000000000..0ea0647a57 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 0000000000..9d521dcb95 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 0000000000..d40346a779 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000000..75866d0121 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000000..9231e95d16 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000000..831b0110c5 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000000..1301b149d3 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000000..4366de2e1e --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000000..92f8b4d9a4 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,257 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 0000000000..05d4ff5192 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,84 @@ +package protocol + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 0000000000..f614ef898b --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 0000000000..cc857f136c --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000000..cf981fe951 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,306 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 0000000000..c1a511851f --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000000..7108d38009 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,291 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000000..42f71648ee --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,159 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/test/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 0000000000..7f60d4aa18 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3115 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// in the IAM User Guide. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service API operations. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. +// +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. +// +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plain text that you use for both inline and managed session policies + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/test/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000000..d5307fcaa0 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/test/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 0000000000..fcb720dcac --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) +// in the IAM User Guide. +// +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/test/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/test/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 0000000000..a233f542ef --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,82 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/test/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/test/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 0000000000..d34a685533 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/test/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/test/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 0000000000..e2e1d6efe5 --- /dev/null +++ b/test/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/test/vendor/github.com/beorn7/perks/LICENSE b/test/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000000..339177be66 --- /dev/null +++ b/test/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/github.com/beorn7/perks/quantile/stream.go b/test/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000000..d7d14f8eb6 --- /dev/null +++ b/test/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/test/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS new file mode 100644 index 0000000000..e068e731ea --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS @@ -0,0 +1 @@ +Google Inc. \ No newline at end of file diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/test/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go new file mode 100644 index 0000000000..a6f0febe25 --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LibraryInfo_Language int32 + +const ( + LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 + LibraryInfo_CPP LibraryInfo_Language = 1 + LibraryInfo_C_SHARP LibraryInfo_Language = 2 + LibraryInfo_ERLANG LibraryInfo_Language = 3 + LibraryInfo_GO_LANG LibraryInfo_Language = 4 + LibraryInfo_JAVA LibraryInfo_Language = 5 + LibraryInfo_NODE_JS LibraryInfo_Language = 6 + LibraryInfo_PHP LibraryInfo_Language = 7 + LibraryInfo_PYTHON LibraryInfo_Language = 8 + LibraryInfo_RUBY LibraryInfo_Language = 9 + LibraryInfo_WEB_JS LibraryInfo_Language = 10 +) + +var LibraryInfo_Language_name = map[int32]string{ + 0: "LANGUAGE_UNSPECIFIED", + 1: "CPP", + 2: "C_SHARP", + 3: "ERLANG", + 4: "GO_LANG", + 5: "JAVA", + 6: "NODE_JS", + 7: "PHP", + 8: "PYTHON", + 9: "RUBY", + 10: "WEB_JS", +} + +var LibraryInfo_Language_value = map[string]int32{ + "LANGUAGE_UNSPECIFIED": 0, + "CPP": 1, + "C_SHARP": 2, + "ERLANG": 3, + "GO_LANG": 4, + "JAVA": 5, + "NODE_JS": 6, + "PHP": 7, + "PYTHON": 8, + "RUBY": 9, + "WEB_JS": 10, +} + +func (x LibraryInfo_Language) String() string { + return proto.EnumName(LibraryInfo_Language_name, int32(x)) +} + +func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2, 0} +} + +// Identifier metadata of the Node that produces the span or tracing data. +// Note, this is not the metadata about the Node or service that is described by associated spans. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +type Node struct { + // Identifier that uniquely identifies a process within a VM/container. + Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` + // Additional information on service. + ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` + // Additional attributes. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{0} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetIdentifier() *ProcessIdentifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Node) GetLibraryInfo() *LibraryInfo { + if m != nil { + return m.LibraryInfo + } + return nil +} + +func (m *Node) GetServiceInfo() *ServiceInfo { + if m != nil { + return m.ServiceInfo + } + return nil +} + +func (m *Node) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// Identifier that uniquely identifies a process within a VM/container. +type ProcessIdentifier struct { + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // Process id. + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + // Start time of this ProcessIdentifier. Represented in epoch time. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } +func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } +func (*ProcessIdentifier) ProtoMessage() {} +func (*ProcessIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{1} +} + +func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) +} +func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) +} +func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessIdentifier.Merge(m, src) +} +func (m *ProcessIdentifier) XXX_Size() int { + return xxx_messageInfo_ProcessIdentifier.Size(m) +} +func (m *ProcessIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo + +func (m *ProcessIdentifier) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *ProcessIdentifier) GetPid() uint32 { + if m != nil { + return m.Pid + } + return 0 +} + +func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +// Information on OpenCensus Library. +type LibraryInfo struct { + // Language of OpenCensus Library. + Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` + // Version of Agent exporter of Library. + ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` + // Version of OpenCensus Library. + CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } +func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } +func (*LibraryInfo) ProtoMessage() {} +func (*LibraryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2} +} + +func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) +} +func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) +} +func (m *LibraryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LibraryInfo.Merge(m, src) +} +func (m *LibraryInfo) XXX_Size() int { + return xxx_messageInfo_LibraryInfo.Size(m) +} +func (m *LibraryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LibraryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo + +func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { + if m != nil { + return m.Language + } + return LibraryInfo_LANGUAGE_UNSPECIFIED +} + +func (m *LibraryInfo) GetExporterVersion() string { + if m != nil { + return m.ExporterVersion + } + return "" +} + +func (m *LibraryInfo) GetCoreLibraryVersion() string { + if m != nil { + return m.CoreLibraryVersion + } + return "" +} + +// Additional service information. +type ServiceInfo struct { + // Name of the service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } +func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } +func (*ServiceInfo) ProtoMessage() {} +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{3} +} + +func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) +} +func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) +} +func (m *ServiceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceInfo.Merge(m, src) +} +func (m *ServiceInfo) XXX_Size() int { + return xxx_messageInfo_ServiceInfo.Size(m) +} +func (m *ServiceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo + +func (m *ServiceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) + proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") + proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") + proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") + proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) +} + +var fileDescriptor_126c72ed8a252c84 = []byte{ + // 618 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x6e, 0xda, 0x4e, + 0x14, 0xc7, 0x7f, 0xc6, 0x24, 0x81, 0xe7, 0x5f, 0x13, 0x77, 0x94, 0x05, 0x4a, 0x17, 0xa5, 0x74, + 0x93, 0x2e, 0xb0, 0x9b, 0x44, 0xaa, 0xaa, 0x4a, 0x5d, 0x18, 0xe2, 0x26, 0x44, 0x11, 0x58, 0x26, + 0xa1, 0x4a, 0x37, 0x96, 0x21, 0x83, 0x33, 0x2a, 0x9e, 0x41, 0xe3, 0x31, 0x2a, 0x27, 0xe8, 0x09, + 0xda, 0x03, 0xf4, 0x50, 0x3d, 0x44, 0x4f, 0x51, 0xcd, 0x8c, 0x01, 0xab, 0x59, 0x90, 0xdd, 0xfb, + 0xf3, 0xfd, 0x7e, 0x9e, 0xf5, 0xe6, 0xc9, 0xd0, 0x66, 0x73, 0x4c, 0x27, 0x98, 0x66, 0x79, 0xe6, + 0xce, 0x39, 0x13, 0xcc, 0x8d, 0x13, 0x4c, 0x85, 0x3b, 0x61, 0x69, 0xca, 0xa8, 0xbb, 0x38, 0x29, + 0x22, 0x47, 0x35, 0x51, 0x73, 0x23, 0xd7, 0x15, 0x47, 0xc9, 0x9d, 0x42, 0xb4, 0x38, 0x39, 0x7a, + 0x99, 0x30, 0x96, 0xcc, 0xb0, 0x86, 0x8d, 0xf3, 0xa9, 0x2b, 0x48, 0x8a, 0x33, 0x11, 0xa7, 0x73, + 0x6d, 0x68, 0xfd, 0x34, 0xa1, 0xda, 0x67, 0xf7, 0x18, 0x0d, 0x01, 0xc8, 0x3d, 0xa6, 0x82, 0x4c, + 0x09, 0xe6, 0x0d, 0xa3, 0x69, 0x1c, 0x5b, 0xa7, 0x67, 0xce, 0xb6, 0x01, 0x4e, 0xc0, 0xd9, 0x04, + 0x67, 0x59, 0x6f, 0x6d, 0x0d, 0x4b, 0x18, 0x14, 0xc0, 0xff, 0x33, 0x32, 0xe6, 0x31, 0x5f, 0x46, + 0x84, 0x4e, 0x59, 0xa3, 0xa2, 0xb0, 0xed, 0xed, 0xd8, 0x6b, 0xed, 0xea, 0xd1, 0x29, 0x0b, 0xad, + 0xd9, 0x26, 0x91, 0xc4, 0x0c, 0xf3, 0x05, 0x99, 0x60, 0x4d, 0x34, 0x9f, 0x4a, 0x1c, 0x6a, 0x97, + 0x26, 0x66, 0x9b, 0x04, 0x8d, 0x00, 0x62, 0x21, 0x38, 0x19, 0xe7, 0x02, 0x67, 0x8d, 0x6a, 0xd3, + 0x3c, 0xb6, 0x4e, 0xdf, 0x6d, 0xe7, 0xc9, 0xa5, 0x39, 0xde, 0xda, 0xe8, 0x53, 0xc1, 0x97, 0x61, + 0x89, 0x74, 0xf4, 0x11, 0x0e, 0xfe, 0x69, 0x23, 0x1b, 0xcc, 0xaf, 0x78, 0xa9, 0x96, 0x5b, 0x0f, + 0x65, 0x88, 0x0e, 0x61, 0x67, 0x11, 0xcf, 0x72, 0xac, 0x36, 0x53, 0x0f, 0x75, 0xf2, 0xa1, 0xf2, + 0xde, 0x68, 0x7d, 0x37, 0xe0, 0xf9, 0xa3, 0xe5, 0xa2, 0x17, 0x50, 0x7f, 0x60, 0x99, 0x88, 0x68, + 0x9c, 0xe2, 0x82, 0x53, 0x93, 0x85, 0x7e, 0x9c, 0x62, 0x89, 0x9f, 0x93, 0x7b, 0x85, 0x7a, 0x16, + 0xca, 0x10, 0x75, 0xe1, 0x20, 0x13, 0x31, 0x17, 0xd1, 0xfa, 0xd9, 0x8b, 0x85, 0x1d, 0x39, 0xfa, + 0x30, 0x9c, 0xd5, 0x61, 0x38, 0x37, 0x2b, 0x45, 0xb8, 0xaf, 0x2c, 0xeb, 0xbc, 0xf5, 0xbb, 0x02, + 0x56, 0xe9, 0x3d, 0x50, 0x08, 0xb5, 0x59, 0x4c, 0x93, 0x3c, 0x4e, 0xf4, 0x27, 0xec, 0x3f, 0x65, + 0x5d, 0x25, 0x80, 0x73, 0x5d, 0xb8, 0xc3, 0x35, 0x07, 0xbd, 0x01, 0x1b, 0x7f, 0x9b, 0x33, 0x2e, + 0x30, 0x8f, 0x16, 0x98, 0x67, 0x84, 0xd1, 0x62, 0x25, 0x07, 0xab, 0xfa, 0x48, 0x97, 0xd1, 0x5b, + 0x38, 0x9c, 0x30, 0x8e, 0xa3, 0xd5, 0x61, 0xad, 0xe4, 0xa6, 0x92, 0x23, 0xd9, 0x2b, 0x86, 0x15, + 0x8e, 0xd6, 0x0f, 0x03, 0x6a, 0xab, 0x99, 0xa8, 0x01, 0x87, 0xd7, 0x5e, 0xff, 0xe2, 0xd6, 0xbb, + 0xf0, 0xa3, 0xdb, 0xfe, 0x30, 0xf0, 0xbb, 0xbd, 0x4f, 0x3d, 0xff, 0xdc, 0xfe, 0x0f, 0xed, 0x81, + 0xd9, 0x0d, 0x02, 0xdb, 0x40, 0x16, 0xec, 0x75, 0xa3, 0xe1, 0xa5, 0x17, 0x06, 0x76, 0x05, 0x01, + 0xec, 0xfa, 0xa1, 0x74, 0xd8, 0xa6, 0x6c, 0x5c, 0x0c, 0x22, 0x95, 0x54, 0x51, 0x0d, 0xaa, 0x57, + 0xde, 0xc8, 0xb3, 0x77, 0x64, 0xb9, 0x3f, 0x38, 0xf7, 0xa3, 0xab, 0xa1, 0xbd, 0x2b, 0x29, 0xc1, + 0x65, 0x60, 0xef, 0x49, 0x63, 0x70, 0x77, 0x73, 0x39, 0xe8, 0xdb, 0x35, 0xa9, 0x0d, 0x6f, 0x3b, + 0x77, 0x76, 0x5d, 0x56, 0x3f, 0xfb, 0x1d, 0x29, 0x85, 0xd6, 0x2b, 0xb0, 0x4a, 0x57, 0x89, 0x10, + 0x54, 0x4b, 0xcf, 0xaa, 0xe2, 0xce, 0x2f, 0x03, 0x5e, 0x13, 0xb6, 0x75, 0xbd, 0x1d, 0xab, 0xab, + 0xc2, 0x40, 0x36, 0x03, 0xe3, 0x4b, 0x2f, 0x21, 0xe2, 0x21, 0x1f, 0x4b, 0x81, 0xab, 0x7d, 0x6d, + 0x42, 0x33, 0xc1, 0xf3, 0x14, 0x53, 0x11, 0x0b, 0xc2, 0xa8, 0xbb, 0x41, 0xb6, 0xf5, 0x9f, 0x26, + 0xc1, 0xb4, 0x9d, 0x3c, 0xfa, 0xe1, 0xfc, 0xa9, 0x34, 0x07, 0x73, 0x4c, 0xbb, 0x7a, 0xb8, 0xe2, + 0x3b, 0x9e, 0x1a, 0xae, 0x27, 0x3a, 0xa3, 0x93, 0xf1, 0xae, 0x02, 0x9c, 0xfd, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x53, 0x74, 0x5e, 0xbe, 0x04, 0x00, 0x00, +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go new file mode 100644 index 0000000000..5f222b473e --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportMetricsServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Metrics from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of metrics that belong to the last received Node. + Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + // The resource for the metrics in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known + // at all or when all sent metrics have an explicit resource set. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{0} +} + +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportMetricsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{1} +} + +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) +} + +var fileDescriptor_47e253a956287d04 = []byte{ + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x4a, 0xc3, 0x40, + 0x14, 0x86, 0x9d, 0x56, 0xaa, 0x4c, 0xc1, 0x45, 0xdc, 0x94, 0x2a, 0xd2, 0x56, 0x91, 0x8a, 0x64, + 0x62, 0xea, 0x42, 0x10, 0x54, 0xac, 0xb8, 0x11, 0xd4, 0x12, 0xc1, 0x85, 0x1b, 0x69, 0xd3, 0x47, + 0xcc, 0x22, 0x33, 0x71, 0x66, 0x12, 0xbc, 0x85, 0x77, 0x70, 0xef, 0x8d, 0x3c, 0x81, 0xa7, 0x90, + 0xe4, 0x4d, 0x5a, 0x4a, 0x8c, 0x05, 0x77, 0x8f, 0xe4, 0xff, 0xfe, 0xf7, 0xff, 0x33, 0x43, 0x4f, + 0x44, 0x0c, 0xdc, 0x07, 0xae, 0x12, 0xe5, 0xc4, 0x52, 0x68, 0xe1, 0x8c, 0x03, 0xe0, 0xda, 0x89, + 0x40, 0xcb, 0xd0, 0x57, 0x4e, 0xea, 0x16, 0xe3, 0xb3, 0x02, 0x99, 0x86, 0x3e, 0xb0, 0x5c, 0x66, + 0x75, 0xe7, 0x20, 0x7e, 0x61, 0x39, 0xc8, 0x8c, 0x9a, 0xa5, 0x6e, 0xdb, 0xae, 0xf0, 0xf6, 0x45, + 0x14, 0x09, 0x9e, 0x59, 0xe3, 0x84, 0x7c, 0xfb, 0xa0, 0x24, 0x2f, 0x87, 0x30, 0xd2, 0xc3, 0x92, + 0x54, 0x82, 0x12, 0x89, 0xf4, 0x21, 0xd3, 0x16, 0x33, 0x8a, 0x7b, 0x5f, 0x84, 0x6e, 0x5d, 0xbf, + 0xc5, 0x42, 0xea, 0x5b, 0x34, 0x79, 0xc0, 0x22, 0x1e, 0xbc, 0x26, 0xa0, 0xb4, 0x75, 0x4a, 0x57, + 0xb9, 0x98, 0x42, 0x8b, 0x74, 0x48, 0xbf, 0x39, 0xd8, 0x67, 0x15, 0xc5, 0x4c, 0xd6, 0xd4, 0x65, + 0x77, 0x62, 0x0a, 0x5e, 0xce, 0x58, 0x67, 0x74, 0xcd, 0x24, 0x6b, 0xd5, 0x3a, 0xf5, 0x7e, 0x73, + 0xb0, 0x5b, 0xc6, 0xe7, 0x27, 0xc2, 0x30, 0x80, 0x57, 0x30, 0xd6, 0x90, 0xae, 0x17, 0x61, 0x5b, + 0xf5, 0xaa, 0xf5, 0xb3, 0x3a, 0xa9, 0xcb, 0x3c, 0x33, 0x7b, 0x33, 0xae, 0xb7, 0x43, 0xb7, 0x7f, + 0x6f, 0xa7, 0x62, 0xc1, 0x15, 0x0c, 0x3e, 0x08, 0xdd, 0x58, 0xfc, 0x65, 0xbd, 0x13, 0xda, 0x40, + 0xc6, 0x3a, 0x67, 0x4b, 0xef, 0x91, 0xfd, 0x71, 0x78, 0xed, 0x8b, 0x7f, 0xf3, 0x18, 0xaf, 0xb7, + 0xd2, 0x27, 0x47, 0x64, 0xf8, 0x49, 0xe8, 0x5e, 0x28, 0x96, 0x7b, 0x0d, 0x37, 0x17, 0x6d, 0x46, + 0x99, 0x6a, 0x44, 0x9e, 0x6e, 0x82, 0x50, 0xbf, 0x24, 0x93, 0xec, 0x92, 0x1c, 0x34, 0xb0, 0x43, + 0xae, 0xb4, 0x4c, 0x22, 0xe0, 0x7a, 0xac, 0x43, 0xc1, 0x9d, 0xb9, 0xb7, 0x8d, 0x4f, 0x26, 0x00, + 0x6e, 0x07, 0xe5, 0xf7, 0xfe, 0x5d, 0xeb, 0xde, 0xc7, 0xc0, 0xaf, 0x30, 0x46, 0xbe, 0x80, 0x5d, + 0xe6, 0x31, 0xcc, 0x6a, 0xf6, 0xe8, 0x4e, 0x1a, 0xb9, 0xc5, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x19, 0x28, 0xa4, 0x50, 0x3f, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceExportClient{stream} + return x, nil +} + +type MetricsService_ExportClient interface { + Send(*ExportMetricsServiceRequest) error + Recv() (*ExportMetricsServiceResponse, error) + grpc.ClientStream +} + +type metricsServiceExportClient struct { + grpc.ClientStream +} + +func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { + m := new(ExportMetricsServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(MetricsService_ExportServer) error +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(srv MetricsService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) +} + +type MetricsService_ExportServer interface { + Send(*ExportMetricsServiceResponse) error + Recv() (*ExportMetricsServiceRequest, error) + grpc.ServerStream +} + +type metricsServiceExportServer struct { + grpc.ServerStream +} + +func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { + m := new(ExportMetricsServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Export", + Handler: _MetricsService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go new file mode 100644 index 0000000000..158c160891 --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opencensus/proto/agent/metrics/v1/metrics_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (MetricsService_ExportClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Export(ctx) + if err != nil { + grpclog.Infof("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq ExportMetricsServiceRequest + err := dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Infof("Failed to decode request: %v", err) + return err + } + if err := stream.Send(&protoReq); err != nil { + grpclog.Infof("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Infof("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Infof("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Infof("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMetricsServiceHandler(ctx, mux, conn) +} + +// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) +} + +// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MetricsServiceClient" to call the correct interceptors. +func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "")) +) + +var ( + forward_MetricsService_Export_0 = runtime.ForwardResponseStream +) diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go new file mode 100644 index 0000000000..a0a3504ddd --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -0,0 +1,457 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CurrentLibraryConfig struct { + // This is required only in the first message on the stream or if the + // previous sent CurrentLibraryConfig message has a different Node (e.g. + // when the same RPC is used to configure multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Current configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } +func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*CurrentLibraryConfig) ProtoMessage() {} +func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{0} +} + +func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) +} +func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) +} +func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) +} +func (m *CurrentLibraryConfig) XXX_Size() int { + return xxx_messageInfo_CurrentLibraryConfig.Size(m) +} +func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo + +func (m *CurrentLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type UpdatedLibraryConfig struct { + // This field is ignored when the RPC is used to configure only one Application. + // This is required only in the first message on the stream or if the + // previous sent UpdatedLibraryConfig message has a different Node. + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Requested updated configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } +func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*UpdatedLibraryConfig) ProtoMessage() {} +func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{1} +} + +func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) +} +func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) +} +func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) +} +func (m *UpdatedLibraryConfig) XXX_Size() int { + return xxx_messageInfo_UpdatedLibraryConfig.Size(m) +} +func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo + +func (m *UpdatedLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type ExportTraceServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportTraceServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Spans from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of Spans that belong to the last received Node. + Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The resource for the spans in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{2} +} + +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { + if m != nil { + return m.Spans + } + return nil +} + +func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{3} +} + +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") + proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) +} + +var fileDescriptor_7027f99caf7ac6a5 = []byte{ + // 442 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0xcf, 0xaa, 0xd4, 0x30, + 0x14, 0xc6, 0x4d, 0xaf, 0x16, 0xc9, 0x75, 0x63, 0x71, 0x51, 0x8b, 0x30, 0x97, 0x82, 0x32, 0xa0, + 0x4d, 0xed, 0x5c, 0xee, 0xe6, 0x0a, 0x82, 0x33, 0x08, 0x2e, 0x44, 0x2f, 0x1d, 0x75, 0xe1, 0x66, + 0xe8, 0xb4, 0xc7, 0xda, 0xc5, 0x24, 0x31, 0x49, 0x8b, 0x82, 0x7b, 0xf7, 0x2e, 0x7c, 0x03, 0x5f, + 0xc8, 0xc7, 0xf0, 0x29, 0xa4, 0x39, 0x9d, 0x3f, 0x3a, 0x53, 0x0b, 0xba, 0xb9, 0xbb, 0x43, 0xf3, + 0xfd, 0xbe, 0xf3, 0x25, 0x39, 0x29, 0x3d, 0x15, 0x12, 0x78, 0x0e, 0x5c, 0xd7, 0x3a, 0x96, 0x4a, + 0x18, 0x11, 0x67, 0x25, 0x70, 0x13, 0x1b, 0x95, 0xe5, 0x10, 0x37, 0x09, 0x16, 0x0b, 0x0d, 0xaa, + 0xa9, 0x72, 0x60, 0x56, 0xe2, 0x8d, 0xb6, 0x10, 0x7e, 0x61, 0x16, 0x62, 0x56, 0xcb, 0x9a, 0x24, + 0x88, 0x7a, 0x5c, 0x73, 0xb1, 0x5a, 0x09, 0xde, 0xda, 0x62, 0x85, 0x74, 0x70, 0x7f, 0x4f, 0xae, + 0x40, 0x8b, 0x5a, 0x61, 0x82, 0x75, 0xdd, 0x89, 0xef, 0xee, 0x89, 0x7f, 0xcf, 0xda, 0xc9, 0x1e, + 0x0c, 0xc8, 0x16, 0xb9, 0xe0, 0xef, 0xaa, 0x12, 0xd5, 0xe1, 0x57, 0x42, 0x6f, 0xcd, 0x6a, 0xa5, + 0x80, 0x9b, 0xe7, 0xd5, 0x52, 0x65, 0xea, 0xd3, 0xcc, 0x2e, 0x7b, 0xe7, 0xf4, 0x2a, 0x17, 0x05, + 0xf8, 0xe4, 0x84, 0x8c, 0x8f, 0x27, 0xf7, 0x58, 0xcf, 0xce, 0xbb, 0xed, 0x34, 0x09, 0x7b, 0x21, + 0x0a, 0x48, 0x2d, 0xe3, 0x3d, 0xa6, 0x2e, 0x36, 0xf1, 0x9d, 0x3e, 0x7a, 0x7d, 0x62, 0xec, 0x55, + 0x5b, 0x60, 0xcf, 0xb4, 0xa3, 0x6c, 0xa8, 0xd7, 0xb2, 0xc8, 0x0c, 0x14, 0x97, 0x27, 0xd4, 0x0f, + 0x42, 0x6f, 0x3f, 0xfd, 0x28, 0x85, 0x32, 0x76, 0x75, 0x8e, 0x83, 0x91, 0xc2, 0x87, 0x1a, 0xb4, + 0xf9, 0xaf, 0x64, 0x67, 0xf4, 0x9a, 0x96, 0x19, 0xd7, 0xbe, 0x73, 0x72, 0x34, 0x3e, 0x9e, 0x8c, + 0xfe, 0x12, 0x6c, 0x2e, 0x33, 0x9e, 0xa2, 0xda, 0x9b, 0xd2, 0xeb, 0xeb, 0x09, 0xf1, 0x8f, 0xfa, + 0xda, 0x6e, 0x66, 0xa8, 0x49, 0x58, 0xda, 0xd5, 0xe9, 0x86, 0x0b, 0xef, 0xd0, 0xe0, 0xd0, 0x9e, + 0xb4, 0x14, 0x5c, 0xc3, 0xe4, 0x9b, 0x43, 0x6f, 0xec, 0x2e, 0x78, 0x9f, 0xa9, 0xdb, 0xdd, 0xc4, + 0x19, 0x1b, 0x78, 0x0a, 0xec, 0xd0, 0x54, 0x05, 0xc3, 0xd8, 0xa1, 0x7b, 0x0f, 0xaf, 0x8c, 0xc9, + 0x43, 0xe2, 0x7d, 0x21, 0xd4, 0xc5, 0xb4, 0xde, 0xf9, 0xa0, 0x4f, 0xef, 0x55, 0x05, 0x8f, 0xfe, + 0x89, 0xc5, 0x23, 0xc1, 0x24, 0xd3, 0xef, 0x84, 0x86, 0x95, 0x18, 0xf2, 0x99, 0xde, 0xdc, 0xb5, + 0xb8, 0x68, 0x15, 0x17, 0xe4, 0xed, 0xb3, 0xb2, 0x32, 0xef, 0xeb, 0x65, 0x3b, 0x0a, 0x31, 0xc2, + 0x51, 0xc5, 0xb5, 0x51, 0xf5, 0x0a, 0xb8, 0xc9, 0x4c, 0x25, 0x78, 0xbc, 0xf5, 0x8d, 0xf0, 0x05, + 0x97, 0xc0, 0xa3, 0xf2, 0xcf, 0x3f, 0xd4, 0x4f, 0x67, 0xf4, 0x52, 0x02, 0x9f, 0x61, 0x00, 0x6b, + 0xcf, 0x9e, 0xd8, 0x00, 0xb6, 0x2d, 0x7b, 0x93, 0x2c, 0x5d, 0x8b, 0x9f, 0xfe, 0x0a, 0x00, 0x00, + 0xff, 0xff, 0x65, 0x76, 0xd7, 0xb9, 0xed, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) + if err != nil { + return nil, err + } + x := &traceServiceConfigClient{stream} + return x, nil +} + +type TraceService_ConfigClient interface { + Send(*CurrentLibraryConfig) error + Recv() (*UpdatedLibraryConfig, error) + grpc.ClientStream +} + +type traceServiceConfigClient struct { + grpc.ClientStream +} + +func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { + m := new(UpdatedLibraryConfig) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) + if err != nil { + return nil, err + } + x := &traceServiceExportClient{stream} + return x, nil +} + +type TraceService_ExportClient interface { + Send(*ExportTraceServiceRequest) error + Recv() (*ExportTraceServiceResponse, error) + grpc.ClientStream +} + +type traceServiceExportClient struct { + grpc.ClientStream +} + +func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { + m := new(ExportTraceServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(TraceService_ConfigServer) error + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(TraceService_ExportServer) error +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Config(srv TraceService_ConfigServer) error { + return status.Errorf(codes.Unimplemented, "method Config not implemented") +} +func (*UnimplementedTraceServiceServer) Export(srv TraceService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) +} + +type TraceService_ConfigServer interface { + Send(*UpdatedLibraryConfig) error + Recv() (*CurrentLibraryConfig, error) + grpc.ServerStream +} + +type traceServiceConfigServer struct { + grpc.ServerStream +} + +func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { + m := new(CurrentLibraryConfig) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) +} + +type TraceService_ExportServer interface { + Send(*ExportTraceServiceResponse) error + Recv() (*ExportTraceServiceRequest, error) + grpc.ServerStream +} + +type traceServiceExportServer struct { + grpc.ServerStream +} + +func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { + m := new(ExportTraceServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Config", + Handler: _TraceService_Config_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Export", + Handler: _TraceService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go new file mode 100644 index 0000000000..334331b0dd --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Export(ctx) + if err != nil { + grpclog.Infof("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq ExportTraceServiceRequest + err := dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Infof("Failed to decode request: %v", err) + return err + } + if err := stream.Send(&protoReq); err != nil { + grpclog.Infof("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Infof("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Infof("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Infof("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseStream +) diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go new file mode 100644 index 0000000000..466b234285 --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go @@ -0,0 +1,1127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The kind of metric. It describes how the data is reported. +// +// A gauge is an instantaneous measurement of a value. +// +// A cumulative measurement is a value accumulated over a time interval. In +// a time series, cumulative measurements should have the same start time, +// increasing values and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. +type MetricDescriptor_Type int32 + +const ( + // Do not use this default value. + MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 + // Integer gauge. The value can go both up and down. + MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 + // Floating point gauge. The value can go both up and down. + MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 + // Distribution gauge measurement. The count and sum can go both up and + // down. Recorded values are always >= 0. + // Used in scenarios like a snapshot of time the current items in a queue + // have spent there. + MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 + // Integer cumulative measurement. The value cannot decrease, if resets + // then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 + // Floating point cumulative measurement. The value cannot decrease, if + // resets then the start_time should also be reset. Recorded values are + // always >= 0. + MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 + // Distribution cumulative measurement. The count and sum cannot decrease, + // if resets then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 + // Some frameworks implemented Histograms as a summary of observations + // (usually things like request durations and response sizes). While it + // also provides a total count of observations and a sum of all observed + // values, it calculates configurable percentiles over a sliding time + // window. This is not recommended, since it cannot be aggregated. + MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 +) + +var MetricDescriptor_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "GAUGE_INT64", + 2: "GAUGE_DOUBLE", + 3: "GAUGE_DISTRIBUTION", + 4: "CUMULATIVE_INT64", + 5: "CUMULATIVE_DOUBLE", + 6: "CUMULATIVE_DISTRIBUTION", + 7: "SUMMARY", +} + +var MetricDescriptor_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "GAUGE_INT64": 1, + "GAUGE_DOUBLE": 2, + "GAUGE_DISTRIBUTION": 3, + "CUMULATIVE_INT64": 4, + "CUMULATIVE_DOUBLE": 5, + "CUMULATIVE_DISTRIBUTION": 6, + "SUMMARY": 7, +} + +func (x MetricDescriptor_Type) String() string { + return proto.EnumName(MetricDescriptor_Type_name, int32(x)) +} + +func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1, 0} +} + +// Defines a Metric which has one or more timeseries. +type Metric struct { + // The descriptor of the Metric. + // TODO(issue #152): consider only sending the name of descriptor for + // optimization. + MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + // One or more timeseries for a single metric, where each timeseries has + // one or more points. + Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + // The resource for the metric. If unset, it may be set to a default value + // provided for a sequence of messages in an RPC stream. + Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{0} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetMetricDescriptor() *MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +func (m *Metric) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *Metric) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +// Defines a metric type and its schema. +type MetricDescriptor struct { + // The metric type, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` + // The label keys associated with the metric descriptor. + LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetType() MetricDescriptor_Type { + if m != nil { + return m.Type + } + return MetricDescriptor_UNSPECIFIED +} + +func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { + if m != nil { + return m.LabelKeys + } + return nil +} + +// Defines a label key associated with a metric descriptor. +type LabelKey struct { + // The key for the label. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // A human-readable description of what this label key represents. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelKey) Reset() { *m = LabelKey{} } +func (m *LabelKey) String() string { return proto.CompactTextString(m) } +func (*LabelKey) ProtoMessage() {} +func (*LabelKey) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{2} +} + +func (m *LabelKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelKey.Unmarshal(m, b) +} +func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) +} +func (m *LabelKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelKey.Merge(m, src) +} +func (m *LabelKey) XXX_Size() int { + return xxx_messageInfo_LabelKey.Size(m) +} +func (m *LabelKey) XXX_DiscardUnknown() { + xxx_messageInfo_LabelKey.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelKey proto.InternalMessageInfo + +func (m *LabelKey) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelKey) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// A collection of data points that describes the time-varying values +// of a metric. +type TimeSeries struct { + // Must be present for cumulative metrics. The time when the cumulative value + // was reset to zero. Exclusive. The cumulative value is over the time interval + // (start_timestamp, timestamp]. If not specified, the backend can use the + // previous recorded value. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + // The set of label values that uniquely identify this timeseries. Applies to + // all points. The order of label values must match that of label keys in the + // metric descriptor. + LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The data points of this timeseries. Point.value type MUST match the + // MetricDescriptor.type. + Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{3} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +func (m *TimeSeries) GetLabelValues() []*LabelValue { + if m != nil { + return m.LabelValues + } + return nil +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +type LabelValue struct { + // The value for the label. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // If false the value field is ignored and considered not set. + // This is used to differentiate a missing label from an empty string. + HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValue) Reset() { *m = LabelValue{} } +func (m *LabelValue) String() string { return proto.CompactTextString(m) } +func (*LabelValue) ProtoMessage() {} +func (*LabelValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{4} +} + +func (m *LabelValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelValue.Unmarshal(m, b) +} +func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) +} +func (m *LabelValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValue.Merge(m, src) +} +func (m *LabelValue) XXX_Size() int { + return xxx_messageInfo_LabelValue.Size(m) +} +func (m *LabelValue) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValue.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValue proto.InternalMessageInfo + +func (m *LabelValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *LabelValue) GetHasValue() bool { + if m != nil { + return m.HasValue + } + return false +} + +// A timestamped measurement. +type Point struct { + // The moment when this point was recorded. Inclusive. + // If not specified, the timestamp will be decided by the backend. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The actual point value. + // + // Types that are valid to be assigned to Value: + // *Point_Int64Value + // *Point_DoubleValue + // *Point_DistributionValue + // *Point_SummaryValue + Value isPoint_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{5} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type isPoint_Value interface { + isPoint_Value() +} + +type Point_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Point_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Point_DistributionValue struct { + DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +type Point_SummaryValue struct { + SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` +} + +func (*Point_Int64Value) isPoint_Value() {} + +func (*Point_DoubleValue) isPoint_Value() {} + +func (*Point_DistributionValue) isPoint_Value() {} + +func (*Point_SummaryValue) isPoint_Value() {} + +func (m *Point) GetValue() isPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Point) GetInt64Value() int64 { + if x, ok := m.GetValue().(*Point_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *Point) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*Point_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Point) GetDistributionValue() *DistributionValue { + if x, ok := m.GetValue().(*Point_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +func (m *Point) GetSummaryValue() *SummaryValue { + if x, ok := m.GetValue().(*Point_SummaryValue); ok { + return x.SummaryValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Point) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Point_Int64Value)(nil), + (*Point_DoubleValue)(nil), + (*Point_DistributionValue)(nil), + (*Point_SummaryValue)(nil), + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type DistributionValue struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // Don't change bucket boundaries within a TimeSeries if your backend doesn't + // support this. + // TODO(issue #152): consider not required to send bucket options for + // optimization. + BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue) Reset() { *m = DistributionValue{} } +func (m *DistributionValue) String() string { return proto.CompactTextString(m) } +func (*DistributionValue) ProtoMessage() {} +func (*DistributionValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6} +} + +func (m *DistributionValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue.Unmarshal(m, b) +} +func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) +} +func (m *DistributionValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue.Merge(m, src) +} +func (m *DistributionValue) XXX_Size() int { + return xxx_messageInfo_DistributionValue.Size(m) +} +func (m *DistributionValue) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue proto.InternalMessageInfo + +func (m *DistributionValue) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { + if m != nil { + return m.Buckets + } + return nil +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described by +// BucketOptions. +// +// If bucket_options has no type, then there is no histogram associated with +// the Distribution. +type DistributionValue_BucketOptions struct { + // Types that are valid to be assigned to Type: + // *DistributionValue_BucketOptions_Explicit_ + Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } +func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions) ProtoMessage() {} +func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0} +} + +func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) +} +func (m *DistributionValue_BucketOptions) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) +} +func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo + +type isDistributionValue_BucketOptions_Type interface { + isDistributionValue_BucketOptions_Type() +} + +type DistributionValue_BucketOptions_Explicit_ struct { + Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` +} + +func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} + +func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { + if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { + return x.Explicit + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DistributionValue_BucketOptions_Explicit_)(nil), + } +} + +// Specifies a set of buckets with arbitrary upper-bounds. +// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket +// index i are: +// +// [0, bucket_bounds[i]) for i == 0 +// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 +// [bucket_bounds[i], +infinity) for i == N-1 +type DistributionValue_BucketOptions_Explicit struct { + // The values must be strictly increasing and > 0. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions_Explicit) Reset() { + *m = DistributionValue_BucketOptions_Explicit{} +} +func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} +func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} +} + +func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +type DistributionValue_Bucket struct { + // The number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // If the distribution does not have a histogram, then omit this field. + Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } +func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Bucket) ProtoMessage() {} +func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 1} +} + +func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) +} +func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) +} +func (m *DistributionValue_Bucket) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Bucket.Size(m) +} +func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo + +func (m *DistributionValue_Bucket) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// Distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket. +type DistributionValue_Exemplar struct { + // Value of the exemplar point. It determines which bucket the exemplar + // belongs to. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. + Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } +func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Exemplar) ProtoMessage() {} +func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 2} +} + +func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) +} +func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) +} +func (m *DistributionValue_Exemplar) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Exemplar.Size(m) +} +func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo + +func (m *DistributionValue_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { + if m != nil { + return m.Attachments + } + return nil +} + +// The start_timestamp only applies to the count and sum in the SummaryValue. +type SummaryValue struct { + // The total number of recorded values since start_time. Optional since + // some systems don't expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The total sum of recorded values since start_time. Optional since some + // systems don't expose this. If count is zero then this field must be zero. + // This field must be unset if the sum is not available. + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // Values calculated over an arbitrary time window. + Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue) Reset() { *m = SummaryValue{} } +func (m *SummaryValue) String() string { return proto.CompactTextString(m) } +func (*SummaryValue) ProtoMessage() {} +func (*SummaryValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7} +} + +func (m *SummaryValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue.Unmarshal(m, b) +} +func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) +} +func (m *SummaryValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue.Merge(m, src) +} +func (m *SummaryValue) XXX_Size() int { + return xxx_messageInfo_SummaryValue.Size(m) +} +func (m *SummaryValue) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue proto.InternalMessageInfo + +func (m *SummaryValue) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The values in this message can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type SummaryValue_Snapshot struct { + // The number of values in the snapshot. Optional since some systems don't + // expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of values in the snapshot. Optional since some systems don't + // expose this. If count is zero then this field must be zero or not set + // (if not supported). + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // A list of values at different percentiles of the distribution calculated + // from the current snapshot. The percentiles must be strictly increasing. + PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } +func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot) ProtoMessage() {} +func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0} +} + +func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) +} +func (m *SummaryValue_Snapshot) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot.Size(m) +} +func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { + if m != nil { + return m.PercentileValues + } + return nil +} + +// Represents the value at a given percentile of a distribution. +type SummaryValue_Snapshot_ValueAtPercentile struct { + // The percentile of a distribution. Must be in the interval + // (0.0, 100.0]. + Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` + // The value at the given percentile of a distribution. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { + *m = SummaryValue_Snapshot_ValueAtPercentile{} +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} +func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { + if m != nil { + return m.Percentile + } + return 0 +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) + proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") + proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") + proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") + proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") + proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") + proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") + proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") + proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") + proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") + proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") + proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") + proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") + proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") + proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") +} + +func init() { + proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) +} + +var fileDescriptor_0ee3deb72053811a = []byte{ + // 1118 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xd2, 0xf5, 0xa8, 0xed, 0xdf, 0x72, 0xfe, 0x0a, 0x61, + 0x11, 0x90, 0x0a, 0x65, 0xad, 0x98, 0xd2, 0x56, 0x15, 0x2a, 0x8a, 0x63, 0x37, 0x31, 0x24, 0xb1, + 0x35, 0xb6, 0x23, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x0d, 0xf8, + 0x05, 0x78, 0x02, 0xc4, 0x35, 0xb7, 0x88, 0xe7, 0xe0, 0x8a, 0x27, 0xe0, 0x15, 0xb8, 0x41, 0xbc, + 0x01, 0xda, 0x99, 0xd9, 0x8f, 0xc4, 0x60, 0xea, 0x22, 0x71, 0x77, 0xe6, 0xcc, 0x39, 0xbf, 0xfd, + 0x9d, 0xcf, 0x1d, 0x78, 0xe4, 0xf9, 0xc4, 0xb5, 0x88, 0x4b, 0x43, 0x5a, 0xf7, 0x03, 0x8f, 0x79, + 0x75, 0x87, 0xb0, 0xc0, 0xb6, 0x68, 0xfd, 0x66, 0x3f, 0x16, 0x0d, 0x7e, 0x81, 0xb6, 0x52, 0x53, + 0xa1, 0x31, 0xe2, 0xfb, 0x9b, 0xfd, 0xda, 0x3b, 0x97, 0x9e, 0x77, 0x39, 0x25, 0x02, 0x63, 0x1c, + 0x5e, 0xd4, 0x99, 0xed, 0x10, 0xca, 0x4c, 0xc7, 0x17, 0xb6, 0xb5, 0xed, 0xbb, 0x06, 0x5f, 0x07, + 0xa6, 0xef, 0x93, 0x40, 0x62, 0xd5, 0x3e, 0x9a, 0x23, 0x12, 0x10, 0xea, 0x85, 0x81, 0x45, 0x22, + 0x26, 0xb1, 0x2c, 0x8c, 0xf5, 0x3f, 0x14, 0x28, 0x9e, 0xf2, 0x8f, 0xa3, 0x57, 0x50, 0x11, 0x34, + 0x46, 0x13, 0x42, 0xad, 0xc0, 0xf6, 0x99, 0x17, 0x54, 0x95, 0x1d, 0x65, 0x57, 0x6d, 0xec, 0x19, + 0x0b, 0x18, 0x1b, 0xc2, 0xbf, 0x95, 0x38, 0x61, 0xcd, 0xb9, 0xa3, 0x41, 0x47, 0x00, 0x3c, 0x0c, + 0x12, 0xd8, 0x84, 0x56, 0x73, 0x3b, 0xf9, 0x5d, 0xb5, 0xf1, 0xe1, 0x42, 0xd0, 0x81, 0xed, 0x90, + 0x3e, 0x37, 0xc7, 0x19, 0x57, 0xd4, 0x84, 0x52, 0x1c, 0x41, 0x35, 0xcf, 0xb9, 0x7d, 0x30, 0x0f, + 0x93, 0xc4, 0x78, 0xb3, 0x6f, 0x60, 0x29, 0xe3, 0xc4, 0x4f, 0xff, 0x3e, 0x0f, 0xda, 0x5d, 0xce, + 0x08, 0x41, 0xc1, 0x35, 0x1d, 0xc2, 0x03, 0x5e, 0xc7, 0x5c, 0x46, 0x3b, 0xa0, 0xc6, 0xa9, 0xb0, + 0x3d, 0xb7, 0x9a, 0xe3, 0x57, 0x59, 0x55, 0xe4, 0x15, 0xba, 0x36, 0xe3, 0x54, 0xd6, 0x31, 0x97, + 0xd1, 0x4b, 0x28, 0xb0, 0x99, 0x4f, 0xaa, 0x85, 0x1d, 0x65, 0x77, 0xb3, 0xd1, 0x58, 0x2a, 0x75, + 0xc6, 0x60, 0xe6, 0x13, 0xcc, 0xfd, 0x51, 0x0b, 0x60, 0x6a, 0x8e, 0xc9, 0x74, 0x74, 0x4d, 0x66, + 0xb4, 0xba, 0xca, 0x73, 0xf6, 0xfe, 0x42, 0xb4, 0x93, 0xc8, 0xfc, 0x0b, 0x32, 0xc3, 0xeb, 0x53, + 0x29, 0x51, 0xfd, 0x47, 0x05, 0x0a, 0x11, 0x28, 0xba, 0x07, 0xea, 0xf0, 0xac, 0xdf, 0x6b, 0x1f, + 0x76, 0x5e, 0x76, 0xda, 0x2d, 0x6d, 0x25, 0x52, 0x1c, 0x1d, 0x0c, 0x8f, 0xda, 0xa3, 0xce, 0xd9, + 0xe0, 0xc9, 0x63, 0x4d, 0x41, 0x1a, 0x94, 0x85, 0xa2, 0xd5, 0x1d, 0x36, 0x4f, 0xda, 0x5a, 0x0e, + 0x3d, 0x04, 0x24, 0x35, 0x9d, 0xfe, 0x00, 0x77, 0x9a, 0xc3, 0x41, 0xa7, 0x7b, 0xa6, 0xe5, 0xd1, + 0x7d, 0xd0, 0x0e, 0x87, 0xa7, 0xc3, 0x93, 0x83, 0x41, 0xe7, 0x3c, 0xf6, 0x2f, 0xa0, 0x07, 0x50, + 0xc9, 0x68, 0x25, 0xc8, 0x2a, 0xda, 0x82, 0xff, 0x65, 0xd5, 0x59, 0xa4, 0x22, 0x52, 0x61, 0xad, + 0x3f, 0x3c, 0x3d, 0x3d, 0xc0, 0x5f, 0x6a, 0x6b, 0xfa, 0x0b, 0x28, 0xc5, 0x21, 0x20, 0x0d, 0xf2, + 0xd7, 0x64, 0x26, 0xcb, 0x11, 0x89, 0xff, 0x5c, 0x0d, 0xfd, 0x57, 0x05, 0x20, 0xed, 0x1b, 0x74, + 0x08, 0xf7, 0x28, 0x33, 0x03, 0x36, 0x4a, 0x26, 0x48, 0xb6, 0x73, 0xcd, 0x10, 0x23, 0x64, 0xc4, + 0x23, 0xc4, 0xbb, 0x8d, 0x5b, 0xe0, 0x4d, 0xee, 0x92, 0x9c, 0xd1, 0xe7, 0x50, 0x16, 0x55, 0xb8, + 0x31, 0xa7, 0xe1, 0x1b, 0xf6, 0x2e, 0x0f, 0xe2, 0x3c, 0xb2, 0xc7, 0xea, 0x34, 0x91, 0x29, 0x7a, + 0x0e, 0x45, 0xdf, 0xb3, 0x5d, 0x46, 0xab, 0x79, 0x8e, 0xa2, 0x2f, 0x44, 0xe9, 0x45, 0xa6, 0x58, + 0x7a, 0xe8, 0x9f, 0x01, 0xa4, 0xb0, 0xe8, 0x3e, 0xac, 0x72, 0x3e, 0x32, 0x3f, 0xe2, 0x80, 0xb6, + 0x60, 0xfd, 0xca, 0xa4, 0x82, 0x29, 0xcf, 0x4f, 0x09, 0x97, 0xae, 0x4c, 0xca, 0x5d, 0xf4, 0x9f, + 0x73, 0xb0, 0xca, 0x21, 0xd1, 0x33, 0x58, 0x5f, 0x26, 0x23, 0xa9, 0x31, 0x7a, 0x17, 0x54, 0xdb, + 0x65, 0x4f, 0x1e, 0x67, 0x3e, 0x91, 0x3f, 0x5e, 0xc1, 0xc0, 0x95, 0x82, 0xd9, 0x7b, 0x50, 0x9e, + 0x78, 0xe1, 0x78, 0x4a, 0xa4, 0x4d, 0x34, 0x19, 0xca, 0xf1, 0x0a, 0x56, 0x85, 0x56, 0x18, 0x8d, + 0x00, 0x4d, 0x6c, 0xca, 0x02, 0x7b, 0x1c, 0x46, 0x85, 0x93, 0xa6, 0x05, 0x4e, 0xc5, 0x58, 0x98, + 0x94, 0x56, 0xc6, 0x8d, 0x63, 0x1d, 0xaf, 0xe0, 0xca, 0xe4, 0xae, 0x12, 0xf5, 0x60, 0x83, 0x86, + 0x8e, 0x63, 0x06, 0x33, 0x89, 0xbd, 0xca, 0xb1, 0x1f, 0x2d, 0xc4, 0xee, 0x0b, 0x8f, 0x18, 0xb6, + 0x4c, 0x33, 0xe7, 0xe6, 0x9a, 0xcc, 0xb8, 0xfe, 0x4b, 0x11, 0x2a, 0x73, 0x2c, 0xa2, 0x82, 0x58, + 0x5e, 0xe8, 0x32, 0x9e, 0xcf, 0x3c, 0x16, 0x87, 0xa8, 0x89, 0x69, 0xe8, 0xf0, 0x3c, 0x29, 0x38, + 0x12, 0xd1, 0x53, 0xa8, 0xd2, 0xd0, 0x19, 0x79, 0x17, 0x23, 0xfa, 0x3a, 0x34, 0x03, 0x32, 0x19, + 0x4d, 0xc8, 0x8d, 0x6d, 0xf2, 0x8e, 0xe6, 0xa9, 0xc2, 0x0f, 0x68, 0xe8, 0x74, 0x2f, 0xfa, 0xe2, + 0xb6, 0x15, 0x5f, 0x22, 0x0b, 0x36, 0xc7, 0xa1, 0x75, 0x4d, 0xd8, 0xc8, 0xe3, 0xcd, 0x4e, 0x65, + 0xba, 0x3e, 0x5d, 0x2e, 0x5d, 0x46, 0x93, 0x83, 0x74, 0x05, 0x06, 0xde, 0x18, 0x67, 0x8f, 0xa8, + 0x0b, 0x6b, 0x42, 0x11, 0xef, 0x9b, 0x4f, 0xde, 0x0a, 0x1d, 0xc7, 0x28, 0xb5, 0x1f, 0x14, 0xd8, + 0xb8, 0xf5, 0x45, 0x64, 0x41, 0x89, 0x7c, 0xe3, 0x4f, 0x6d, 0xcb, 0x66, 0xb2, 0xf7, 0xda, 0xff, + 0x26, 0x02, 0xa3, 0x2d, 0xc1, 0x8e, 0x57, 0x70, 0x02, 0x5c, 0xd3, 0xa1, 0x14, 0xeb, 0xd1, 0x43, + 0x28, 0x8e, 0xbd, 0xd0, 0x9d, 0xd0, 0xaa, 0xb2, 0x93, 0xdf, 0x55, 0xb0, 0x3c, 0x35, 0x8b, 0x62, + 0x4d, 0xd7, 0x28, 0x14, 0x05, 0xe2, 0xdf, 0xd4, 0xb0, 0x1f, 0x11, 0x26, 0x8e, 0x3f, 0x35, 0x03, + 0x5e, 0x48, 0xb5, 0xf1, 0x74, 0x49, 0xc2, 0x6d, 0xe9, 0x8e, 0x13, 0xa0, 0xda, 0xb7, 0xb9, 0x88, + 0xa1, 0x38, 0xdc, 0x1e, 0x66, 0x25, 0x1e, 0xe6, 0x5b, 0x53, 0x9a, 0x5b, 0x66, 0x4a, 0xbf, 0x02, + 0xd5, 0x64, 0xcc, 0xb4, 0xae, 0x1c, 0x92, 0xee, 0x9a, 0xe3, 0xb7, 0x24, 0x6d, 0x1c, 0xa4, 0x50, + 0x6d, 0x97, 0x05, 0x33, 0x9c, 0x05, 0xaf, 0xbd, 0x00, 0xed, 0xae, 0xc1, 0x5f, 0xac, 0xee, 0x24, + 0xc2, 0x5c, 0x66, 0x5d, 0x3d, 0xcf, 0x3d, 0x53, 0xf4, 0xdf, 0xf3, 0x50, 0xce, 0xce, 0x1d, 0xda, + 0xcf, 0x16, 0x41, 0x6d, 0x6c, 0xcd, 0x85, 0xdc, 0x49, 0x76, 0x4d, 0x5c, 0x21, 0x23, 0x9d, 0x32, + 0xb5, 0xf1, 0xff, 0x39, 0x87, 0x56, 0xba, 0x78, 0xc4, 0x0c, 0x9e, 0x41, 0x89, 0xba, 0xa6, 0x4f, + 0xaf, 0x3c, 0x26, 0xdf, 0x10, 0x8d, 0x37, 0xde, 0x0b, 0x46, 0x5f, 0x7a, 0xe2, 0x04, 0xa3, 0xf6, + 0x53, 0x0e, 0x4a, 0xb1, 0xfa, 0xbf, 0xe0, 0xff, 0x1a, 0x2a, 0x3e, 0x09, 0x2c, 0xe2, 0x32, 0x3b, + 0x5e, 0xb3, 0x71, 0x95, 0x5b, 0xcb, 0x07, 0x62, 0xf0, 0xe3, 0x01, 0xeb, 0x25, 0x90, 0x58, 0x4b, + 0xe1, 0xc5, 0x9f, 0xab, 0xd6, 0x81, 0xca, 0x9c, 0x19, 0xda, 0x06, 0x48, 0x0d, 0x65, 0xf3, 0x66, + 0x34, 0xb7, 0xab, 0x1e, 0xf7, 0x75, 0xf3, 0x3b, 0x05, 0xb6, 0x6d, 0x6f, 0x11, 0xcf, 0x66, 0x59, + 0x3c, 0x8b, 0x68, 0x2f, 0xba, 0xe8, 0x29, 0xaf, 0x5a, 0x97, 0x36, 0xbb, 0x0a, 0xc7, 0x86, 0xe5, + 0x39, 0x75, 0xe1, 0xb3, 0x67, 0xbb, 0x94, 0x05, 0x61, 0xd4, 0x74, 0x7c, 0x3d, 0xd6, 0x53, 0xb8, + 0x3d, 0xf1, 0xe6, 0xbd, 0x24, 0xee, 0xde, 0x65, 0xf6, 0x0d, 0xfe, 0x5b, 0x6e, 0xab, 0xeb, 0x13, + 0xf7, 0x50, 0x7c, 0x93, 0x43, 0xcb, 0xe7, 0x17, 0x35, 0xce, 0xf7, 0xc7, 0x45, 0xee, 0xf6, 0xf1, + 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd0, 0xb4, 0x8d, 0xc7, 0x0b, 0x00, 0x00, +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go new file mode 100644 index 0000000000..5dba6a2a0a --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Type identifier for the resource. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Set of labels that describe the resource. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_584700775a2fc762, []int{0} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Resource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") +} + +func init() { + proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) +} + +var fileDescriptor_584700775a2fc762 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, + 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, + 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, + 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, + 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, + 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, + 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, + 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, + 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0x69, 0x06, 0x23, 0x97, 0x7c, 0x66, 0x3e, + 0x5e, 0xbb, 0x9d, 0x78, 0x61, 0x96, 0x07, 0x80, 0xa4, 0x02, 0x18, 0xa3, 0x5c, 0xd3, 0x33, 0x4b, + 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x21, 0xba, 0x74, 0x33, 0xf3, 0x8a, 0x4b, 0x8a, + 0x4a, 0x73, 0x53, 0xf3, 0x4a, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0xf4, 0x11, 0x06, 0xea, 0x42, 0x42, + 0x32, 0x3d, 0x35, 0x4f, 0x37, 0x1d, 0x25, 0x40, 0x5f, 0x31, 0xc9, 0xf8, 0x17, 0xa4, 0xe6, 0x39, + 0x43, 0xac, 0x05, 0x9b, 0x8d, 0xf0, 0x66, 0x98, 0x61, 0x12, 0x1b, 0x58, 0xa3, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xcf, 0x32, 0xff, 0x46, 0x96, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go new file mode 100644 index 0000000000..2f4ab19b5e --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -0,0 +1,1553 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SERVER Span_SpanKind = 1 + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + Span_CLIENT Span_SpanKind = 2 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SERVER", + 2: "CLIENT", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SERVER": 1, + "CLIENT": 2, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown, or known but other + // than parent-child. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace. And form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next id is 17. +// TODO(bdrutu): Add an example. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The Tracestate on the span. + Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to the value of end_time field if it was + // set. Or to the current time if neither was set. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to start_time value. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // A stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // The included time events. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // The included links. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. Semantically when Status + // wasn't set it is means span ended without errors and assume + // Status.Ok (code = 0). + Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // An optional resource that is associated with this span. If not set, this span + // should be part of a batch that does include the resource information, unless resource + // information is unknown. + Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` + // A highly recommended but not required flag that identifies when a + // trace crosses a process boundary. True when the parent_span belongs + // to the same process as the current span. This flag is most commonly + // used to indicate the need to adjust time as clocks in different + // processes may not be synchronized. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows an implementation to detect missing child spans. + ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() *TruncatableString { + if m != nil { + return m.Name + } + return nil +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// This field conveys information about request position in multiple distributed tracing graphs. +// It is a list of Tracestate.Entry with a maximum of 32 members in the list. +// +// See the https://github.com/w3c/distributed-tracing for more details about this field. +type Span_Tracestate struct { + // A list of entries that represent the Tracestate. + Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } +func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate) ProtoMessage() {} +func (*Span_Tracestate) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) +} +func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate.Merge(m, src) +} +func (m *Span_Tracestate) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate.Size(m) +} +func (m *Span_Tracestate) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo + +func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type Span_Tracestate_Entry struct { + // The key must begin with a lowercase letter, and can only contain + // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes + // '-', asterisks '*', and forward slashes '/'. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value is opaque string up to 256 characters printable ASCII + // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. + // Note that this also excludes tabs, newlines, carriage returns, etc. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } +func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate_Entry) ProtoMessage() {} +func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} +} + +func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) +} +func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) +} +func (m *Span_Tracestate_Entry) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate_Entry.Size(m) +} +func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo + +func (m *Span_Tracestate_Entry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Span_Tracestate_Entry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A set of attributes, each with a key and a value. +type Span_Attributes struct { + // The set of attributes. The value can be a string, an integer, a double + // or the Boolean values `true` or `false`. Note, global attributes like + // server name can be set as tags using resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0, then no attributes were dropped. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 1} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +// A text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // The type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. For example, this field could + // represent a sequence ID for a streaming RPC. It is recommended to be + // unique within a Span. + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` + // The number of compressed bytes sent or received. If zero, assumed to + // be the same size as uncompressed. + CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { + if m != nil { + return m.UncompressedSize + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { + if m != nil { + return m.CompressedSize + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key-value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 3} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + // The Tracestate associated with the link. + Tracestate *Span_Tracestate `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 5} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. This proto's fields +// are a subset of those of +// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +// which is used by [gRPC](https://github.com/grpc). +type Status struct { + // The status code. This is optional field. It is safe to assume 0 (OK) + // when not set. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{1} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// The value of an Attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + // *AttributeValue_DoubleValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{2} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AttributeValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AttributeValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + (*AttributeValue_DoubleValue)(nil), + } +} + +// The call stack which originated this span. +type StackTrace struct { + // Stack frames in this stack trace. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both + // `stack_frames` and a value in `stack_trace_hash_id`. + // + // Subsequent spans within the same request can refer + // to that stack trace by setting only `stack_trace_hash_id`. + // + // TODO: describe how to deal with the case where stack_trace_hash_id is + // zero because it was not set. + StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() uint64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// A single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame. + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully qualified. + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears. + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code. + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// A description of a binary module. +type Module struct { + // TODO: document the meaning of this field. + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so. + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents. + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{4} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// A string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string was 500 bytes long and + // the limit of the string was 128 bytes, then this value contains the first 128 + // bytes of the 500-byte string. Note that truncation always happens on a + // character boundary, to ensure that a truncated string is still valid UTF-8. + // Because it may contain multi-byte characters, the size of the truncated string + // may be less than the truncation limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{5} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") + proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") + proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") + proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") + proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") + proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") + proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") + proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) +} + +var fileDescriptor_8ea38bbb821bf584 = []byte{ + // 1581 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xdb, 0x6e, 0x1b, 0x41, + 0x19, 0xce, 0xfa, 0xec, 0xdf, 0x8e, 0xeb, 0x4c, 0xd3, 0x74, 0x63, 0x0a, 0x0d, 0x6e, 0x0b, 0x29, + 0x25, 0x9b, 0x26, 0x2d, 0x55, 0x8f, 0x2a, 0x71, 0xe2, 0x60, 0x37, 0xa9, 0xeb, 0x8e, 0xdd, 0x88, + 0x83, 0xd0, 0x6a, 0xed, 0x9d, 0x38, 0x4b, 0xec, 0xd9, 0x65, 0x77, 0x36, 0x28, 0x7d, 0x01, 0x84, + 0xe0, 0x86, 0x0b, 0xc4, 0x0b, 0x70, 0xc1, 0xeb, 0x20, 0xee, 0x79, 0x00, 0x24, 0x9e, 0x80, 0x1b, + 0x34, 0x33, 0x7b, 0x72, 0xd2, 0x26, 0xc6, 0xbd, 0xb1, 0xe6, 0xf0, 0x7f, 0xdf, 0x3f, 0xff, 0xcc, + 0x7f, 0x5a, 0xc3, 0x03, 0xdb, 0x21, 0x74, 0x48, 0xa8, 0xe7, 0x7b, 0x9b, 0x8e, 0x6b, 0x33, 0x7b, + 0x93, 0xb9, 0xc6, 0x90, 0x6c, 0x9e, 0x6d, 0xc9, 0x81, 0x26, 0x16, 0xd1, 0x6a, 0x2c, 0x26, 0x57, + 0x34, 0xb9, 0x7b, 0xb6, 0x55, 0x7b, 0x74, 0x89, 0xc1, 0x25, 0x9e, 0xed, 0xbb, 0x92, 0x24, 0x1c, + 0x4b, 0x54, 0xed, 0xee, 0xc8, 0xb6, 0x47, 0x63, 0x22, 0x05, 0x07, 0xfe, 0xf1, 0x26, 0xb3, 0x26, + 0xc4, 0x63, 0xc6, 0xc4, 0x09, 0x04, 0xbe, 0x77, 0x51, 0xe0, 0x77, 0xae, 0xe1, 0x38, 0xc4, 0x0d, + 0xd4, 0xd6, 0xff, 0xbc, 0x02, 0x99, 0x9e, 0x63, 0x50, 0xb4, 0x0a, 0x05, 0x71, 0x04, 0xdd, 0x32, + 0x55, 0x65, 0x4d, 0x59, 0x2f, 0xe3, 0xbc, 0x98, 0xb7, 0x4d, 0x74, 0x1b, 0xf2, 0x9e, 0x63, 0x50, + 0xbe, 0x93, 0x12, 0x3b, 0x39, 0x3e, 0x6d, 0x9b, 0xe8, 0x1d, 0x80, 0x90, 0xf1, 0x98, 0xc1, 0x88, + 0x7a, 0x63, 0x4d, 0x59, 0x2f, 0x6d, 0xff, 0x48, 0xfb, 0xaa, 0x69, 0x1a, 0x57, 0xa4, 0xf5, 0x23, + 0x04, 0x4e, 0xa0, 0xd1, 0x7d, 0xa8, 0x38, 0x86, 0x4b, 0x28, 0xd3, 0x43, 0x5d, 0x69, 0xa1, 0xab, + 0x2c, 0x57, 0x7b, 0x52, 0xe3, 0x4f, 0x21, 0x43, 0x8d, 0x09, 0x51, 0x33, 0x42, 0xd7, 0x8f, 0xaf, + 0xd0, 0xd5, 0x77, 0x7d, 0x3a, 0x34, 0x98, 0x31, 0x18, 0x93, 0x1e, 0x73, 0x2d, 0x3a, 0xc2, 0x02, + 0x89, 0x5e, 0x43, 0xe6, 0xd4, 0xa2, 0xa6, 0x5a, 0x59, 0x53, 0xd6, 0x2b, 0xdb, 0xeb, 0xd7, 0x9d, + 0x96, 0xff, 0x1c, 0x58, 0xd4, 0xc4, 0x02, 0x85, 0x5e, 0x00, 0x78, 0xcc, 0x70, 0x99, 0xce, 0xef, + 0x59, 0xcd, 0x8a, 0x53, 0xd4, 0x34, 0x79, 0xc7, 0x5a, 0x78, 0xc7, 0x5a, 0x3f, 0x7c, 0x04, 0x5c, + 0x14, 0xd2, 0x7c, 0x8e, 0x7e, 0x02, 0x05, 0x42, 0x4d, 0x09, 0xcc, 0x5d, 0x0b, 0xcc, 0x13, 0x6a, + 0x0a, 0xd8, 0x3b, 0x00, 0x83, 0x31, 0xd7, 0x1a, 0xf8, 0x8c, 0x78, 0x6a, 0x7e, 0xb6, 0x3b, 0xde, + 0x89, 0x10, 0x38, 0x81, 0x46, 0xfb, 0x50, 0xf2, 0x98, 0x31, 0x3c, 0xd5, 0x85, 0xb4, 0x5a, 0x10, + 0x64, 0x0f, 0xae, 0x22, 0xe3, 0xd2, 0xe2, 0xc1, 0x30, 0x78, 0xd1, 0x18, 0x1d, 0x40, 0x89, 0x9b, + 0xa1, 0x93, 0x33, 0x42, 0x99, 0xa7, 0x16, 0x67, 0x7c, 0x78, 0x6b, 0x42, 0x9a, 0x02, 0x81, 0x81, + 0x45, 0x63, 0xf4, 0x0a, 0xb2, 0x63, 0x8b, 0x9e, 0x7a, 0x2a, 0x5c, 0x7f, 0x1c, 0x4e, 0x73, 0xc8, + 0x85, 0xb1, 0xc4, 0xa0, 0x17, 0x90, 0xe3, 0xee, 0xe3, 0x7b, 0x6a, 0x49, 0xa0, 0xbf, 0x7f, 0xb5, + 0x31, 0xcc, 0xf7, 0x70, 0x00, 0x40, 0x0d, 0x28, 0x84, 0xc1, 0xa4, 0x56, 0x05, 0xf8, 0x07, 0x97, + 0xc1, 0x51, 0xb8, 0x9d, 0x6d, 0x69, 0x38, 0x18, 0xe3, 0x08, 0x87, 0x7e, 0x0e, 0xdf, 0xf1, 0x8c, + 0x09, 0xd1, 0x1d, 0xd7, 0x1e, 0x12, 0xcf, 0xd3, 0x0d, 0x4f, 0x4f, 0x38, 0xb1, 0x5a, 0xfe, 0xca, + 0x33, 0x37, 0x6c, 0x7b, 0x7c, 0x64, 0x8c, 0x7d, 0x82, 0x6f, 0x73, 0x78, 0x57, 0xa2, 0x77, 0xbc, + 0x6e, 0xe4, 0xea, 0x68, 0x1f, 0xaa, 0xc3, 0x13, 0x6b, 0x6c, 0xca, 0x68, 0x18, 0xda, 0x3e, 0x65, + 0xea, 0xa2, 0xa0, 0xbb, 0x73, 0x89, 0xee, 0x53, 0x9b, 0xb2, 0x27, 0xdb, 0x92, 0xb0, 0x22, 0x50, + 0x9c, 0x62, 0x97, 0x63, 0x6a, 0x7f, 0x50, 0x00, 0xe2, 0x88, 0x43, 0xef, 0x20, 0x4f, 0x28, 0x73, + 0x2d, 0xe2, 0xa9, 0xca, 0x5a, 0x7a, 0xbd, 0xb4, 0xfd, 0x78, 0xf6, 0x70, 0xd5, 0x9a, 0x94, 0xb9, + 0xe7, 0x38, 0x24, 0xa8, 0x6d, 0x42, 0x56, 0xac, 0xa0, 0x2a, 0xa4, 0x4f, 0xc9, 0xb9, 0xc8, 0x1a, + 0x45, 0xcc, 0x87, 0x68, 0x19, 0xb2, 0x67, 0xfc, 0x38, 0x22, 0x5f, 0x14, 0xb1, 0x9c, 0xd4, 0xfe, + 0x92, 0x02, 0x88, 0x3d, 0x13, 0x19, 0xb0, 0x18, 0xf9, 0xa6, 0x3e, 0x31, 0x9c, 0xe0, 0x44, 0xaf, + 0x67, 0x77, 0xee, 0x78, 0xf8, 0xde, 0x70, 0xe4, 0xe9, 0xca, 0x46, 0x62, 0x09, 0x3d, 0x07, 0xd5, + 0x74, 0x6d, 0xc7, 0x21, 0xa6, 0x1e, 0x87, 0x41, 0x70, 0x9b, 0xfc, 0x68, 0x59, 0xbc, 0x12, 0xec, + 0xc7, 0xa4, 0xf2, 0xde, 0x7e, 0x03, 0x4b, 0x97, 0xc8, 0xbf, 0x60, 0xe8, 0xdb, 0xa4, 0xa1, 0xa5, + 0xed, 0x87, 0x57, 0x9c, 0x3d, 0xa2, 0x93, 0x0f, 0x25, 0x71, 0x2f, 0x53, 0xcf, 0x95, 0xda, 0xdf, + 0xb2, 0x50, 0x8c, 0x82, 0x03, 0x69, 0x90, 0x11, 0x39, 0x42, 0xb9, 0x36, 0x47, 0x08, 0x39, 0x74, + 0x04, 0x60, 0x50, 0x6a, 0x33, 0x83, 0x59, 0x36, 0x0d, 0xce, 0xf1, 0x74, 0xe6, 0x58, 0xd4, 0x76, + 0x22, 0x6c, 0x6b, 0x01, 0x27, 0x98, 0xd0, 0xaf, 0x61, 0x71, 0x42, 0x3c, 0xcf, 0x18, 0x05, 0x71, + 0x2e, 0xf2, 0x71, 0x69, 0xfb, 0xd9, 0xec, 0xd4, 0xef, 0x25, 0x5c, 0x4c, 0x5a, 0x0b, 0xb8, 0x3c, + 0x49, 0xcc, 0x6b, 0x7f, 0x57, 0x00, 0x62, 0xdd, 0xa8, 0x03, 0x25, 0x93, 0x78, 0x43, 0xd7, 0x72, + 0x84, 0x19, 0xca, 0x1c, 0xf9, 0x3d, 0x49, 0x70, 0x21, 0x6d, 0xa6, 0xbe, 0x25, 0x6d, 0xd6, 0xfe, + 0xab, 0x40, 0x39, 0x69, 0x0b, 0xfa, 0x00, 0x19, 0x76, 0xee, 0xc8, 0x27, 0xaa, 0x6c, 0xbf, 0x9a, + 0xef, 0x46, 0xb4, 0xfe, 0xb9, 0x43, 0xb0, 0x20, 0x42, 0x15, 0x48, 0x05, 0xc5, 0x35, 0x83, 0x53, + 0x96, 0x89, 0x1e, 0xc1, 0x92, 0x4f, 0x87, 0xf6, 0xc4, 0x71, 0x89, 0xe7, 0x11, 0x53, 0xf7, 0xac, + 0xcf, 0x44, 0xdc, 0x7f, 0x06, 0x57, 0x93, 0x1b, 0x3d, 0xeb, 0x33, 0x41, 0x3f, 0x84, 0x1b, 0x17, + 0x45, 0x33, 0x42, 0xb4, 0x32, 0x2d, 0x58, 0x7f, 0x0a, 0x19, 0xae, 0x13, 0x2d, 0x43, 0xb5, 0xff, + 0x8b, 0x6e, 0x53, 0xff, 0xd4, 0xe9, 0x75, 0x9b, 0xbb, 0xed, 0xfd, 0x76, 0x73, 0xaf, 0xba, 0x80, + 0x0a, 0x90, 0xe9, 0x35, 0x3b, 0xfd, 0xaa, 0x82, 0xca, 0x50, 0xc0, 0xcd, 0xdd, 0x66, 0xfb, 0xa8, + 0xb9, 0x57, 0x4d, 0x35, 0xf2, 0x81, 0x8b, 0xd7, 0xfe, 0xc9, 0x53, 0x49, 0x9c, 0xb7, 0x5b, 0x00, + 0x71, 0x11, 0x08, 0x62, 0xf7, 0xe1, 0xcc, 0x57, 0x81, 0x8b, 0x51, 0x09, 0x40, 0x2f, 0x61, 0x35, + 0x8a, 0xd2, 0xc8, 0x23, 0xa6, 0xc3, 0xf4, 0x76, 0x18, 0xa6, 0xf1, 0xbe, 0x88, 0x53, 0xf4, 0x16, + 0xee, 0x84, 0xd8, 0x29, 0x6f, 0x0d, 0xe1, 0x69, 0x01, 0x0f, 0xf9, 0x93, 0xf7, 0x1f, 0x04, 0xfa, + 0xbf, 0x52, 0x90, 0xe1, 0x25, 0x65, 0xae, 0x06, 0xe8, 0x4d, 0xe0, 0x08, 0x69, 0xe1, 0x08, 0x0f, + 0x67, 0x29, 0x5d, 0xc9, 0x67, 0x9f, 0x76, 0xd2, 0xcc, 0x37, 0xd5, 0xf6, 0xe9, 0x5e, 0x2c, 0xfb, + 0x2d, 0xbd, 0x58, 0xfd, 0xe0, 0x4a, 0x47, 0xb9, 0x05, 0x4b, 0xbb, 0xad, 0xf6, 0xe1, 0x9e, 0x7e, + 0xd8, 0xee, 0x1c, 0x34, 0xf7, 0xf4, 0x5e, 0x77, 0xa7, 0x53, 0x55, 0xd0, 0x0a, 0xa0, 0xee, 0x0e, + 0x6e, 0x76, 0xfa, 0x53, 0xeb, 0xa9, 0xda, 0x6f, 0x21, 0x2b, 0x4a, 0x36, 0x7a, 0x0e, 0x19, 0x5e, + 0xb4, 0x03, 0x57, 0xb9, 0x3f, 0xcb, 0x65, 0x61, 0x81, 0x40, 0x1a, 0xdc, 0x0c, 0x1f, 0x59, 0x94, + 0xfd, 0x29, 0xd7, 0x58, 0x0a, 0xb6, 0x84, 0x12, 0xf1, 0xa6, 0xf5, 0x37, 0x50, 0x08, 0xfb, 0x36, + 0xb4, 0x0a, 0xb7, 0xf8, 0x41, 0xf4, 0x83, 0x76, 0x67, 0xef, 0x82, 0x21, 0x00, 0xb9, 0x5e, 0x13, + 0x1f, 0x35, 0x71, 0x55, 0xe1, 0xe3, 0xdd, 0xc3, 0x36, 0xf7, 0xff, 0x54, 0xfd, 0x19, 0xe4, 0x64, + 0xaf, 0x80, 0x10, 0x64, 0x86, 0xb6, 0x29, 0x03, 0x3d, 0x8b, 0xc5, 0x18, 0xa9, 0x90, 0x0f, 0x3c, + 0x2d, 0xa8, 0x6e, 0xe1, 0xb4, 0xfe, 0x0f, 0x05, 0x2a, 0xd3, 0x59, 0x1e, 0x7d, 0x84, 0xb2, 0x27, + 0xb2, 0x93, 0x2e, 0xcb, 0xc4, 0x1c, 0x79, 0xad, 0xb5, 0x80, 0x4b, 0x92, 0x43, 0x52, 0x7e, 0x17, + 0x8a, 0x16, 0x65, 0x7a, 0x5c, 0x76, 0xd2, 0xad, 0x05, 0x5c, 0xb0, 0x28, 0x93, 0xdb, 0x77, 0x01, + 0x06, 0xb6, 0x3d, 0x0e, 0xf6, 0xb9, 0x63, 0x16, 0x5a, 0x0b, 0xb8, 0x38, 0x08, 0x5b, 0x0e, 0x74, + 0x0f, 0xca, 0xa6, 0xed, 0x0f, 0xc6, 0x24, 0x10, 0xe1, 0x6e, 0xa7, 0x70, 0x25, 0x72, 0x55, 0x08, + 0x45, 0x41, 0x5f, 0xff, 0x63, 0x0e, 0x20, 0xee, 0x02, 0x51, 0x9f, 0xdb, 0xc3, 0x3b, 0xc8, 0x63, + 0xd7, 0x98, 0x88, 0x26, 0x82, 0xdb, 0xb3, 0x35, 0x53, 0x0b, 0x29, 0x87, 0xfb, 0x02, 0x88, 0x65, + 0x23, 0x2a, 0x27, 0x68, 0x03, 0x6e, 0x26, 0xfa, 0x52, 0xfd, 0xc4, 0xf0, 0x4e, 0xf4, 0x28, 0x1f, + 0x56, 0xe3, 0xc6, 0xb3, 0x65, 0x78, 0x27, 0x6d, 0xb3, 0xf6, 0x9f, 0x74, 0x70, 0x26, 0x01, 0x47, + 0x1f, 0x61, 0xf1, 0xd8, 0xa7, 0x43, 0x9e, 0x14, 0x74, 0xf1, 0x71, 0x30, 0x4f, 0xf1, 0x28, 0x87, + 0x14, 0x1d, 0x4e, 0x39, 0x80, 0x15, 0xdb, 0xb5, 0x46, 0x16, 0x35, 0xc6, 0xfa, 0x34, 0x77, 0x6a, + 0x0e, 0xee, 0xe5, 0x90, 0x6b, 0x3f, 0xa9, 0xa3, 0x0d, 0xc5, 0x63, 0x6b, 0x4c, 0x24, 0x6d, 0x7a, + 0x0e, 0xda, 0x02, 0x87, 0x0b, 0xaa, 0xbb, 0x50, 0x1a, 0x5b, 0x94, 0xe8, 0xd4, 0x9f, 0x0c, 0x88, + 0x2b, 0x5e, 0x34, 0x8d, 0x81, 0x2f, 0x75, 0xc4, 0x0a, 0xba, 0x07, 0x8b, 0x43, 0x7b, 0xec, 0x4f, + 0x68, 0x28, 0x92, 0x15, 0x22, 0x65, 0xb9, 0x18, 0x08, 0x35, 0xa0, 0x34, 0xb6, 0x0d, 0x53, 0x9f, + 0xd8, 0xa6, 0x3f, 0x0e, 0xbf, 0x51, 0xae, 0x6a, 0xa8, 0xdf, 0x0b, 0x41, 0x0c, 0x1c, 0x25, 0xc7, + 0xa8, 0x07, 0x15, 0xd9, 0x1a, 0xeb, 0x67, 0xc4, 0xf5, 0x78, 0x25, 0xcf, 0xcf, 0x61, 0xd9, 0xa2, + 0xe4, 0x38, 0x92, 0x14, 0xb5, 0xdf, 0x2b, 0x50, 0x4a, 0xf8, 0x0e, 0xda, 0x87, 0xac, 0x70, 0xbf, + 0x59, 0x5a, 0xd8, 0x2f, 0x79, 0x1f, 0x96, 0x70, 0xf4, 0x18, 0x96, 0xc3, 0xb4, 0x22, 0xdd, 0x79, + 0x2a, 0xaf, 0xa0, 0x60, 0x4f, 0x2a, 0x95, 0x89, 0xe5, 0xaf, 0x0a, 0xe4, 0x02, 0x4b, 0xf7, 0x20, + 0x17, 0x5c, 0xd4, 0x3c, 0xee, 0x16, 0x60, 0xd1, 0xcf, 0xa0, 0x30, 0xf0, 0x79, 0x9b, 0x1f, 0xb8, + 0xfb, 0xff, 0xcb, 0x93, 0x17, 0xe8, 0xb6, 0x59, 0xff, 0x15, 0x2c, 0x5d, 0xda, 0x8d, 0xdb, 0x70, + 0x25, 0xd1, 0x86, 0x73, 0xb3, 0x99, 0x14, 0x25, 0xa6, 0x3e, 0x38, 0x67, 0x64, 0xda, 0xec, 0x68, + 0xaf, 0x71, 0xce, 0x88, 0x30, 0xbb, 0xf1, 0x27, 0x05, 0xee, 0x58, 0xf6, 0xd7, 0x0f, 0xd6, 0x90, + 0x9f, 0x18, 0x5d, 0xbe, 0xd8, 0x55, 0x7e, 0xd9, 0x18, 0x59, 0xec, 0xc4, 0x1f, 0x68, 0x43, 0x7b, + 0xb2, 0x29, 0xe5, 0x37, 0x2c, 0xea, 0x31, 0xd7, 0x9f, 0x10, 0x2a, 0x8b, 0xf7, 0x66, 0x4c, 0xb5, + 0x21, 0xff, 0xe3, 0x18, 0x11, 0xba, 0x31, 0x8a, 0xff, 0x2c, 0xf9, 0x77, 0x6a, 0xf5, 0x83, 0x43, + 0xe8, 0xae, 0xd4, 0x26, 0x88, 0x65, 0xb1, 0xd2, 0x8e, 0xb6, 0x06, 0x39, 0x01, 0x79, 0xf2, 0xbf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x56, 0xb6, 0xfd, 0x6c, 0x11, 0x00, 0x00, +} diff --git a/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go new file mode 100644 index 0000000000..025387784f --- /dev/null +++ b/test/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -0,0 +1,359 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ProbabilitySampler + // *TraceConfig_ConstantSampler + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` + // The global default max number of message events per span. + MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{0} +} + +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ProbabilitySampler struct { + ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` +} + +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` +} + +func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { + if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { + return x.ProbabilitySampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { + if m != nil { + return m.MaxNumberOfAnnotations + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { + if m != nil { + return m.MaxNumberOfMessageEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ProbabilitySampler)(nil), + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that tries to uniformly sample traces with a given probability. +// The probability of sampling a trace is equal to that of the specified probability. +type ProbabilitySampler struct { + // The desired probability of sampling. Must be within [0.0, 1.0]. + SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } +func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } +func (*ProbabilitySampler) ProtoMessage() {} +func (*ProbabilitySampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{1} +} + +func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) +} +func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) +} +func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbabilitySampler.Merge(m, src) +} +func (m *ProbabilitySampler) XXX_Size() int { + return xxx_messageInfo_ProbabilitySampler.Size(m) +} +func (m *ProbabilitySampler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo + +func (m *ProbabilitySampler) GetSamplingProbability() float64 { + if m != nil { + return m.SamplingProbability + } + return 0 +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2} +} + +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{3} +} + +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") + proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") + proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") + proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) +} + +var fileDescriptor_5359209b41ff50c5 = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0x97, 0x76, 0x6c, 0xec, 0x9b, 0xb6, 0x05, 0x57, 0x43, 0xa9, 0xb4, 0xc3, 0x94, 0x0b, + 0x13, 0x22, 0x09, 0x1d, 0x07, 0x84, 0x90, 0x90, 0xda, 0x6e, 0x15, 0x87, 0xd2, 0x56, 0xd9, 0x44, + 0x05, 0x97, 0xe0, 0x64, 0x6e, 0xb0, 0x68, 0xec, 0x60, 0x3b, 0xd5, 0x78, 0x0d, 0xce, 0x3c, 0x04, + 0xcf, 0xc5, 0x53, 0xa0, 0x3a, 0x21, 0x49, 0xdb, 0x6d, 0xe2, 0x96, 0xef, 0xfb, 0x7f, 0xbf, 0x9f, + 0xad, 0xd8, 0x86, 0x17, 0x3c, 0x25, 0x2c, 0x22, 0x4c, 0x66, 0xd2, 0x4b, 0x05, 0x57, 0xdc, 0x53, + 0x02, 0x47, 0xc4, 0x5b, 0x74, 0xf2, 0x8f, 0x20, 0xe2, 0x6c, 0x46, 0x63, 0x57, 0x67, 0xa8, 0x5d, + 0x4d, 0xe7, 0x1d, 0x57, 0x0f, 0xb9, 0x8b, 0x8e, 0xfd, 0x6b, 0x1b, 0xf6, 0xaf, 0x97, 0x45, 0x5f, + 0x03, 0xe8, 0x0b, 0xb4, 0x52, 0xc1, 0x43, 0x1c, 0xd2, 0x39, 0x55, 0x3f, 0x02, 0x89, 0x93, 0x74, + 0x4e, 0x84, 0x65, 0x9c, 0x1a, 0x67, 0xfb, 0xe7, 0x8e, 0x7b, 0xaf, 0xc8, 0x9d, 0x54, 0xd4, 0x55, + 0x0e, 0xbd, 0xdf, 0xf2, 0x51, 0xba, 0xd1, 0x45, 0x53, 0x30, 0x23, 0xce, 0xa4, 0xc2, 0x4c, 0x95, + 0xfa, 0x86, 0xd6, 0x3f, 0x7f, 0x40, 0xdf, 0x2f, 0x90, 0xca, 0x7d, 0x14, 0xad, 0xb6, 0xd0, 0x0d, + 0x1c, 0x0b, 0xac, 0x48, 0x30, 0xa7, 0x09, 0x55, 0x94, 0xc5, 0xa5, 0xbd, 0xa9, 0xed, 0xee, 0x03, + 0x76, 0x1f, 0x2b, 0x32, 0x2c, 0xb0, 0x6a, 0x85, 0x96, 0xd8, 0x6c, 0xa3, 0xd7, 0x60, 0x25, 0xf8, + 0x36, 0x60, 0x59, 0x12, 0x12, 0x11, 0xf0, 0x59, 0x80, 0x95, 0x12, 0x34, 0xcc, 0x14, 0x91, 0xd6, + 0xf6, 0xa9, 0x71, 0xd6, 0xf4, 0x8f, 0x13, 0x7c, 0x3b, 0xd2, 0xf1, 0x78, 0xd6, 0x2d, 0x43, 0xf4, + 0x06, 0xda, 0x6b, 0x20, 0x63, 0x5c, 0x61, 0x45, 0x39, 0x93, 0xd6, 0x23, 0x4d, 0x3e, 0xad, 0x93, + 0x55, 0x8a, 0xde, 0xc1, 0xc9, 0x2a, 0x9a, 0x10, 0x29, 0x71, 0x4c, 0x02, 0xb2, 0x20, 0x4c, 0x49, + 0x6b, 0x47, 0xd3, 0x56, 0x8d, 0xfe, 0x90, 0x0f, 0x5c, 0xea, 0x1c, 0x39, 0xd0, 0x5a, 0xe5, 0xe7, + 0x94, 0x7d, 0x93, 0xd6, 0xae, 0xc6, 0xcc, 0x1a, 0x36, 0x5c, 0xf6, 0x7b, 0x7b, 0xb0, 0x5b, 0xfc, + 0x3a, 0x7b, 0x00, 0x68, 0xf3, 0x60, 0xd1, 0x4b, 0x68, 0xe9, 0x01, 0xca, 0xe2, 0x5a, 0xaa, 0x2f, + 0x89, 0xe1, 0xdf, 0x15, 0xd9, 0xbf, 0x0d, 0x38, 0x5a, 0x3b, 0x42, 0x34, 0x85, 0xc7, 0x37, 0x24, + 0xa2, 0x92, 0x72, 0xa6, 0xd1, 0xc3, 0xf3, 0xb7, 0xff, 0x7f, 0x01, 0xca, 0xfa, 0xa2, 0x50, 0xf8, + 0xa5, 0xcc, 0xbe, 0x00, 0x73, 0x3d, 0x45, 0x87, 0x00, 0xdd, 0xe1, 0xb4, 0xfb, 0xe9, 0x2a, 0x18, + 0x0f, 0x06, 0xe6, 0x16, 0x3a, 0x80, 0xbd, 0x7f, 0xf5, 0xc8, 0x34, 0xd0, 0x13, 0x38, 0x28, 0xca, + 0x49, 0xd7, 0xbf, 0x1c, 0x5d, 0x9b, 0x0d, 0xfb, 0x19, 0xb4, 0xee, 0xb8, 0x16, 0xc8, 0x84, 0xe6, + 0xf7, 0x54, 0xea, 0x0d, 0x37, 0xfd, 0xe5, 0x67, 0xef, 0xa7, 0x01, 0x27, 0x94, 0xdf, 0xbf, 0xf5, + 0x9e, 0x59, 0x7b, 0x60, 0x93, 0x65, 0x34, 0x31, 0x3e, 0xf7, 0x62, 0xaa, 0xbe, 0x66, 0xa1, 0x1b, + 0xf1, 0xc4, 0xcb, 0x29, 0x87, 0x32, 0xa9, 0x44, 0x96, 0x10, 0x96, 0x1f, 0xbb, 0x57, 0x09, 0x9d, + 0xfc, 0x89, 0xc7, 0x84, 0x39, 0x71, 0xf5, 0xd2, 0xff, 0x34, 0xda, 0xe3, 0x94, 0xb0, 0x7e, 0xbe, + 0xa6, 0x16, 0xbb, 0x7a, 0x25, 0xf7, 0x63, 0x27, 0xdc, 0xd1, 0xc8, 0xab, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x50, 0x0c, 0xfe, 0x32, 0x29, 0x04, 0x00, 0x00, +} diff --git a/test/vendor/github.com/ghodss/yaml/LICENSE b/test/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 0000000000..7805d36de7 --- /dev/null +++ b/test/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/github.com/ghodss/yaml/fields.go b/test/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 0000000000..5860074026 --- /dev/null +++ b/test/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/test/vendor/github.com/ghodss/yaml/yaml.go b/test/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 0000000000..4fb4054a8b --- /dev/null +++ b/test/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/test/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/test/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 0000000000..081c86fa8e --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/test/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/test/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 0000000000..1e91766aee --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/test/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/test/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 0000000000..390d4e4be6 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/test/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/test/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100755 index 0000000000..3893c02d46 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1421 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 0000000000..a85bf1984c --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..d1307d9223 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 0000000000..165b2110df --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 0000000000..e0846a357d --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/test/vendor/github.com/gogo/protobuf/types/any.go b/test/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 0000000000..df4787de37 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,140 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/test/vendor/github.com/gogo/protobuf/types/any.pb.go b/test/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 0000000000..3074a3d8a0 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e, + 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, + 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, + 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed, + 0x00, 0x00, 0x00, +} + +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + offset -= sovAny(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAny(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAny + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAny(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/api.pb.go b/test/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 0000000000..61612e21a8 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2169 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(m, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(m, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(m, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) } + +var fileDescriptor_a2ec32096296c143 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04, + 0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b, + 0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52, + 0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e, + 0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2, + 0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16, + 0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3, + 0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43, + 0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f, + 0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76, + 0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9, + 0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9, + 0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b, + 0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79, + 0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7, + 0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4, + 0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40, + 0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39, + 0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1, + 0x03, 0x00, 0x00, +} + +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Mixins) > 0 { + for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Methods) > 0 { + for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ResponseStreaming { + i-- + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ResponseTypeUrl) > 0 { + i -= len(m.ResponseTypeUrl) + copy(dAtA[i:], m.ResponseTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.RequestStreaming { + i-- + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RequestTypeUrl) > 0 { + i -= len(m.RequestTypeUrl) + copy(dAtA[i:], m.RequestTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + repeatedStringForMethods := "[]*Method{" + for _, f := range this.Methods { + repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + "," + } + repeatedStringForMethods += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + repeatedStringForMixins := "[]*Mixin{" + for _, f := range this.Mixins { + repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + "," + } + repeatedStringForMixins += "}" + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + repeatedStringForMethods + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + repeatedStringForMixins + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/doc.go b/test/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 0000000000..ff2810af1e --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/test/vendor/github.com/gogo/protobuf/types/duration.go b/test/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 0000000000..979b8e78a4 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/test/vendor/github.com/gogo/protobuf/types/duration.pb.go b/test/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 0000000000..32b957c2bd --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,546 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c, + 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, + 0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00, + 0x00, +} + +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + offset -= sovDuration(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDuration(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDuration + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDuration(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/test/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 0000000000..90e7670e21 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/test/vendor/github.com/gogo/protobuf/types/empty.pb.go b/test/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 0000000000..b061be5e4b --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,491 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, + 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, + 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00, +} + +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + offset -= sovEmpty(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEmpty(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEmpty(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/test/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 0000000000..61ef57e2ca --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,767 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c, + 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee, + 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00, +} + +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + offset -= sovFieldMask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovFieldMask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFieldMask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFieldMask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/protosize.go b/test/vendor/github.com/gogo/protobuf/types/protosize.go new file mode 100644 index 0000000000..3a2d1b7e11 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/protosize.go @@ -0,0 +1,34 @@ +package types + +func (m *Any) ProtoSize() (n int) { return m.Size() } +func (m *Api) ProtoSize() (n int) { return m.Size() } +func (m *Method) ProtoSize() (n int) { return m.Size() } +func (m *Mixin) ProtoSize() (n int) { return m.Size() } +func (m *Duration) ProtoSize() (n int) { return m.Size() } +func (m *Empty) ProtoSize() (n int) { return m.Size() } +func (m *FieldMask) ProtoSize() (n int) { return m.Size() } +func (m *SourceContext) ProtoSize() (n int) { return m.Size() } +func (m *Struct) ProtoSize() (n int) { return m.Size() } +func (m *Value) ProtoSize() (n int) { return m.Size() } +func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() } +func (m *ListValue) ProtoSize() (n int) { return m.Size() } +func (m *Timestamp) ProtoSize() (n int) { return m.Size() } +func (m *Type) ProtoSize() (n int) { return m.Size() } +func (m *Field) ProtoSize() (n int) { return m.Size() } +func (m *Enum) ProtoSize() (n int) { return m.Size() } +func (m *EnumValue) ProtoSize() (n int) { return m.Size() } +func (m *Option) ProtoSize() (n int) { return m.Size() } +func (m *DoubleValue) ProtoSize() (n int) { return m.Size() } +func (m *FloatValue) ProtoSize() (n int) { return m.Size() } +func (m *Int64Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt64Value) ProtoSize() (n int) { return m.Size() } +func (m *Int32Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt32Value) ProtoSize() (n int) { return m.Size() } +func (m *BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *StringValue) ProtoSize() (n int) { return m.Size() } +func (m *BytesValue) ProtoSize() (n int) { return m.Size() } diff --git a/test/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/test/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 0000000000..9b0752ed50 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,553 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_b686cdb126d509db, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(m, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) +} + +var fileDescriptor_b686cdb126d509db = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, + 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, + 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, + 0xf9, 0x00, 0x00, 0x00, +} + +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FileName) > 0 { + i -= len(m.FileName) + copy(dAtA[i:], m.FileName) + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + offset -= sovSourceContext(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSourceContext + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSourceContext(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/struct.pb.go b/test/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 0000000000..f0a2d36ebb --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,2300 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6, + 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63, + 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c, + 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6, + 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c, + 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea, + 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d, + 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8, + 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b, + 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18, + 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54, + 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2, + 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87, + 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74, + 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9, + 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28, + 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e, + 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11, + 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16, + 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe, + 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00, +} + +func (this *Struct) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Kind == nil { + if this.Kind != nil { + return 1 + } + } else if this.Kind == nil { + return -1 + } else { + thisType := -1 + switch this.Kind.(type) { + case *Value_NullValue: + thisType = 0 + case *Value_NumberValue: + thisType = 1 + case *Value_StringValue: + thisType = 2 + case *Value_BoolValue: + thisType = 3 + case *Value_StructValue: + thisType = 4 + case *Value_ListValue: + thisType = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind)) + } + that1Type := -1 + switch that1.Kind.(type) { + case *Value_NullValue: + that1Type = 0 + case *Value_NumberValue: + that1Type = 1 + case *Value_StringValue: + that1Type = 2 + case *Value_BoolValue: + that1Type = 3 + case *Value_StructValue: + that1Type = 4 + case *Value_ListValue: + that1Type = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind)) + } + if thisType == that1Type { + if c := this.Kind.Compare(that1.Kind); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value_NullValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NullValue != that1.NullValue { + if this.NullValue < that1.NullValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_NumberValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NumberValue != that1.NumberValue { + if this.NumberValue < that1.NumberValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.StringValue != that1.StringValue { + if this.StringValue < that1.StringValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.BoolValue != that1.BoolValue { + if !this.BoolValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StructValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.StructValue.Compare(that1.StructValue); c != 0 { + return c + } + return 0 +} +func (this *Value_ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.ListValue.Compare(that1.ListValue); c != 0 { + return c + } + return 0 +} +func (this *ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Values) != len(that1.Values) { + if len(this.Values) < len(that1.Values) { + return -1 + } + return 1 + } + for i := range this.Values { + if c := this.Values[i].Compare(that1.Values[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStruct(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + { + size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + { + size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + offset -= sovStruct(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(5) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(5) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStruct(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*Value{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&ListValue{`, + `Values:` + repeatedStringForValues + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStruct + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStruct + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStruct(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/timestamp.go b/test/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 0000000000..232ada57ce --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,130 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + ts := &Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/test/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/test/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 0000000000..63975b8e57 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,566 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d, + 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, + 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd, + 0xfa, 0x00, 0x00, 0x00, +} + +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + offset -= sovTimestamp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTimestamp(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/test/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 0000000000..e03fa13158 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/test/vendor/github.com/gogo/protobuf/types/type.pb.go b/test/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 0000000000..a3a4f354e9 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3396 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} + +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} + +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(m, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(m, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(m, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(m, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) } + +var fileDescriptor_dd271cc1e348c538 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d, + 0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2, + 0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27, + 0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b, + 0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e, + 0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38, + 0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f, + 0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74, + 0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1, + 0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34, + 0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79, + 0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e, + 0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74, + 0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e, + 0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3, + 0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16, + 0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2, + 0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28, + 0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7, + 0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03, + 0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b, + 0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04, + 0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13, + 0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0, + 0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d, + 0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0, + 0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7, + 0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc, + 0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5, + 0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3, + 0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc, + 0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b, + 0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a, + 0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0, + 0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0, + 0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70, + 0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d, + 0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d, + 0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01, + 0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf, + 0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf, + 0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4, + 0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93, + 0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb, + 0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00, +} + +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x30 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Oneofs) > 0 { + for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Oneofs[iNdEx]) + copy(dAtA[i:], m.Oneofs[iNdEx]) + i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x5a + } + if len(m.JsonName) > 0 { + i -= len(m.JsonName) + copy(dAtA[i:], m.JsonName) + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i-- + dAtA[i] = 0x52 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.Packed { + i-- + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OneofIndex != 0 { + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x32 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x18 + } + if m.Cardinality != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x28 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Enumvalue) > 0 { + for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + offset -= sovType(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(5) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(5) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + repeatedStringForFields := "[]*Field{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + "," + } + repeatedStringForFields += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + repeatedStringForFields + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnumvalue := "[]*EnumValue{" + for _, f := range this.Enumvalue { + repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + "," + } + repeatedStringForEnumvalue += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + repeatedStringForEnumvalue + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Field_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= Field_Cardinality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthType + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipType(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/test/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 0000000000..5628dffa40 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2756 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, + 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00, +} + +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + offset -= sovWrappers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FloatValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWrappers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWrappers(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go b/test/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go new file mode 100644 index 0000000000..d905df3605 --- /dev/null +++ b/test/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go @@ -0,0 +1,300 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 { + v := NewPopulatedDoubleValue(r, easy) + return &v.Value +} + +func SizeOfStdDouble(v float64) int { + pv := &DoubleValue{Value: v} + return pv.Size() +} + +func StdDoubleMarshal(v float64) ([]byte, error) { + size := SizeOfStdDouble(v) + buf := make([]byte, size) + _, err := StdDoubleMarshalTo(v, buf) + return buf, err +} + +func StdDoubleMarshalTo(v float64, data []byte) (int, error) { + pv := &DoubleValue{Value: v} + return pv.MarshalTo(data) +} + +func StdDoubleUnmarshal(v *float64, data []byte) error { + pv := &DoubleValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 { + v := NewPopulatedFloatValue(r, easy) + return &v.Value +} + +func SizeOfStdFloat(v float32) int { + pv := &FloatValue{Value: v} + return pv.Size() +} + +func StdFloatMarshal(v float32) ([]byte, error) { + size := SizeOfStdFloat(v) + buf := make([]byte, size) + _, err := StdFloatMarshalTo(v, buf) + return buf, err +} + +func StdFloatMarshalTo(v float32, data []byte) (int, error) { + pv := &FloatValue{Value: v} + return pv.MarshalTo(data) +} + +func StdFloatUnmarshal(v *float32, data []byte) error { + pv := &FloatValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 { + v := NewPopulatedInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdInt64(v int64) int { + pv := &Int64Value{Value: v} + return pv.Size() +} + +func StdInt64Marshal(v int64) ([]byte, error) { + size := SizeOfStdInt64(v) + buf := make([]byte, size) + _, err := StdInt64MarshalTo(v, buf) + return buf, err +} + +func StdInt64MarshalTo(v int64, data []byte) (int, error) { + pv := &Int64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt64Unmarshal(v *int64, data []byte) error { + pv := &Int64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 { + v := NewPopulatedUInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt64(v uint64) int { + pv := &UInt64Value{Value: v} + return pv.Size() +} + +func StdUInt64Marshal(v uint64) ([]byte, error) { + size := SizeOfStdUInt64(v) + buf := make([]byte, size) + _, err := StdUInt64MarshalTo(v, buf) + return buf, err +} + +func StdUInt64MarshalTo(v uint64, data []byte) (int, error) { + pv := &UInt64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt64Unmarshal(v *uint64, data []byte) error { + pv := &UInt64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 { + v := NewPopulatedInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdInt32(v int32) int { + pv := &Int32Value{Value: v} + return pv.Size() +} + +func StdInt32Marshal(v int32) ([]byte, error) { + size := SizeOfStdInt32(v) + buf := make([]byte, size) + _, err := StdInt32MarshalTo(v, buf) + return buf, err +} + +func StdInt32MarshalTo(v int32, data []byte) (int, error) { + pv := &Int32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt32Unmarshal(v *int32, data []byte) error { + pv := &Int32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 { + v := NewPopulatedUInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt32(v uint32) int { + pv := &UInt32Value{Value: v} + return pv.Size() +} + +func StdUInt32Marshal(v uint32) ([]byte, error) { + size := SizeOfStdUInt32(v) + buf := make([]byte, size) + _, err := StdUInt32MarshalTo(v, buf) + return buf, err +} + +func StdUInt32MarshalTo(v uint32, data []byte) (int, error) { + pv := &UInt32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt32Unmarshal(v *uint32, data []byte) error { + pv := &UInt32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBool(r randyWrappers, easy bool) *bool { + v := NewPopulatedBoolValue(r, easy) + return &v.Value +} + +func SizeOfStdBool(v bool) int { + pv := &BoolValue{Value: v} + return pv.Size() +} + +func StdBoolMarshal(v bool) ([]byte, error) { + size := SizeOfStdBool(v) + buf := make([]byte, size) + _, err := StdBoolMarshalTo(v, buf) + return buf, err +} + +func StdBoolMarshalTo(v bool, data []byte) (int, error) { + pv := &BoolValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBoolUnmarshal(v *bool, data []byte) error { + pv := &BoolValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdString(r randyWrappers, easy bool) *string { + v := NewPopulatedStringValue(r, easy) + return &v.Value +} + +func SizeOfStdString(v string) int { + pv := &StringValue{Value: v} + return pv.Size() +} + +func StdStringMarshal(v string) ([]byte, error) { + size := SizeOfStdString(v) + buf := make([]byte, size) + _, err := StdStringMarshalTo(v, buf) + return buf, err +} + +func StdStringMarshalTo(v string, data []byte) (int, error) { + pv := &StringValue{Value: v} + return pv.MarshalTo(data) +} + +func StdStringUnmarshal(v *string, data []byte) error { + pv := &StringValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte { + v := NewPopulatedBytesValue(r, easy) + return &v.Value +} + +func SizeOfStdBytes(v []byte) int { + pv := &BytesValue{Value: v} + return pv.Size() +} + +func StdBytesMarshal(v []byte) ([]byte, error) { + size := SizeOfStdBytes(v) + buf := make([]byte, size) + _, err := StdBytesMarshalTo(v, buf) + return buf, err +} + +func StdBytesMarshalTo(v []byte, data []byte) (int, error) { + pv := &BytesValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBytesUnmarshal(v *[]byte, data []byte) error { + pv := &BytesValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} diff --git a/test/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/test/vendor/github.com/golang/protobuf/descriptor/descriptor.go new file mode 100644 index 0000000000..ac7e51bfb1 --- /dev/null +++ b/test/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -0,0 +1,93 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/golang/protobuf/proto" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(protobuf.FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} diff --git a/test/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/test/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 0000000000..e9cc202585 --- /dev/null +++ b/test/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1284 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/test/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/test/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..1ded05bbe7 --- /dev/null +++ b/test/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2887 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{0} +} + +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{1} +} + +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2} +} + +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4} +} + +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{5} +} + +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6} +} + +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{7} +} + +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{8} +} + +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{9} +} + +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18} +} + +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19} +} + +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19, 0} +} + +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20} +} + +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } + +var fileDescriptor_e5baabe45344a177 = []byte{ + // 2589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, + 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, + 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, + 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, + 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, + 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, + 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, + 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, + 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, + 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, + 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, + 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, + 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, + 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, + 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, + 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, + 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, + 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, + 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, + 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, + 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, + 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, + 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, + 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, + 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, + 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, + 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, + 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, + 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, + 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, + 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, + 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, + 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, + 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, + 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, + 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, + 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, + 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, + 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, + 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, + 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, + 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, + 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, + 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, + 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, + 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, + 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, + 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, + 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, + 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, + 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, + 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, + 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, + 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, + 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, + 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, + 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, + 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, + 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, + 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, + 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, + 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, + 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, + 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, + 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, + 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, + 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, + 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, + 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, + 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, + 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, + 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, + 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, + 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, + 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, + 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, + 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, + 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, + 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, + 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, + 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, + 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, + 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, + 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, + 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, + 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, + 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, + 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, + 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, + 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, + 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, + 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, + 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, + 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, + 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, + 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, + 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, + 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, + 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, + 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, + 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, + 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, + 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, + 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, + 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, + 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, + 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, + 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, + 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, + 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, + 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, + 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, + 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, + 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, + 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, + 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, + 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, + 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, + 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, + 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, + 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, + 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, + 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, + 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, + 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, + 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, + 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, + 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, + 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, + 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, + 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, + 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, + 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, + 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, + 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, + 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, + 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, + 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, + 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, + 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, + 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, + 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, + 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, + 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, + 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, + 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, + 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, + 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, + 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, + 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, + 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, + 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, + 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, + 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, + 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, + 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, + 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, + 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, + 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, +} diff --git a/test/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/test/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 0000000000..b4eb03eccf --- /dev/null +++ b/test/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package empty + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} + +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/test/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/test/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 0000000000..33daa73dd2 --- /dev/null +++ b/test/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} + +func (*Value) XXX_WellKnownType() string { return "Value" } + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} + +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/test/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/test/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 0000000000..add19a1adb --- /dev/null +++ b/test/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,461 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} + +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} + +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} + +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} + +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} + +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} + +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} + +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} + +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} + +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go index cca3034053..c02d82e5df 100644 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ b/test/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -15,7 +15,6 @@ package name import ( - "errors" "fmt" ) @@ -46,5 +45,5 @@ func ParseReference(s string, opts ...Option) (Reference, error) { return d, nil } // TODO: Combine above errors into something more useful? - return nil, errors.New("could not parse reference") + return nil, NewErrBadName("could not parse reference") } diff --git a/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go index f333779884..5eeb8ace97 100644 --- a/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go +++ b/test/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -50,6 +50,7 @@ func (r Repository) Name() string { if regName != "" { return regName + regRepoDelimiter + r.RepositoryStr() } + // TODO: As far as I can tell, this is unreachable. return r.RepositoryStr() } diff --git a/test/vendor/github.com/google/uuid/CONTRIBUTORS b/test/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000000..b4bb97f6bc --- /dev/null +++ b/test/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/test/vendor/github.com/google/uuid/LICENSE b/test/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000000..5dc68268d9 --- /dev/null +++ b/test/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/github.com/google/uuid/dce.go b/test/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000000..fa820b9d30 --- /dev/null +++ b/test/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/test/vendor/github.com/google/uuid/doc.go b/test/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000000..5b8a4b9af8 --- /dev/null +++ b/test/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/test/vendor/github.com/google/uuid/hash.go b/test/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000000..b174616315 --- /dev/null +++ b/test/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/test/vendor/github.com/google/uuid/marshal.go b/test/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000000..7f9e0c6c0e --- /dev/null +++ b/test/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/test/vendor/github.com/google/uuid/node.go b/test/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000000..d651a2b061 --- /dev/null +++ b/test/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/test/vendor/github.com/google/uuid/node_js.go b/test/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000000..24b78edc90 --- /dev/null +++ b/test/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/test/vendor/github.com/google/uuid/node_net.go b/test/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000000..0cbbcddbd6 --- /dev/null +++ b/test/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/test/vendor/github.com/google/uuid/sql.go b/test/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000000..f326b54db3 --- /dev/null +++ b/test/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/test/vendor/github.com/google/uuid/time.go b/test/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000000..e6ef06cdc8 --- /dev/null +++ b/test/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/test/vendor/github.com/google/uuid/util.go b/test/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000000..5ea6c73780 --- /dev/null +++ b/test/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/test/vendor/github.com/google/uuid/uuid.go b/test/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000000..524404cc52 --- /dev/null +++ b/test/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/test/vendor/github.com/google/uuid/version1.go b/test/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000000..199a1ac654 --- /dev/null +++ b/test/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/test/vendor/github.com/google/uuid/version4.go b/test/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000000..84af91c9f5 --- /dev/null +++ b/test/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/test/vendor/github.com/googleapis/gax-go/LICENSE b/test/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 0000000000..6d16b6578a --- /dev/null +++ b/test/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/github.com/googleapis/gax-go/v2/call_option.go b/test/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 0000000000..b1d53dd19c --- /dev/null +++ b/test/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,161 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/test/vendor/github.com/googleapis/gax-go/v2/gax.go b/test/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 0000000000..3fd1b0b84b --- /dev/null +++ b/test/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +// Version specifies the gax-go version being used. +const Version = "2.0.4" diff --git a/test/vendor/github.com/googleapis/gax-go/v2/header.go b/test/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 0000000000..139371a0bf --- /dev/null +++ b/test/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,53 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/test/vendor/github.com/googleapis/gax-go/v2/invoke.go b/test/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 0000000000..fe31dd004e --- /dev/null +++ b/test/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,99 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt new file mode 100644 index 0000000000..364516251b --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go new file mode 100644 index 0000000000..61101d7177 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/errors.proto + +package internal + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Error is the generic error returned from unary RPCs. +type Error struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{0} +} + +func (m *Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Error.Unmarshal(m, b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) +} +func (m *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(m, src) +} +func (m *Error) XXX_Size() int { + return xxx_messageInfo_Error.Size(m) +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Error) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Error) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{1} +} + +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (m *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(m, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } + +var fileDescriptor_9b093362ca6d1e03 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, + 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, + 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, + 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, + 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, + 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, + 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, + 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, + 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, + 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, + 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, + 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, + 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, + 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go new file mode 100644 index 0000000000..f8083821f3 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -0,0 +1,236 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + for _, val := range vals { + key = textproto.CanonicalMIMEHeaderKey(key) + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } else { + grpclog.Infof("invalid remote addr: %s", addr) + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go new file mode 100644 index 0000000000..2c279344dc --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r timestamp.Timestamp + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r duration.Duration + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go new file mode 100644 index 0000000000..b6e5ddf7a9 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go new file mode 100644 index 0000000000..0118ca0479 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -0,0 +1,130 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "github.com/grpc-ecosystem/grpc-gateway/internal" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with the error. + // You can set a custom function to this variable to customize error format. + HTTPError = DefaultHTTPError + // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest + OtherErrorHandler = DefaultOtherErrorHandler +) + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &internal.Error{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go new file mode 100644 index 0000000000..341aad5a3e --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go @@ -0,0 +1,82 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + descriptor2 "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/genproto/protobuf/field_mask" +) + +func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { + // TODO - should really gate this with a test that the marshaller has used json names + if md != nil { + for _, f := range md.Field { + if f.JsonName != nil && f.Name != nil && *f.JsonName == name { + var subType *descriptor.DescriptorProto + + // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. + if f.TypeName != nil { + typeSplit := strings.Split(*f.TypeName, ".") + typeName := typeSplit[len(typeSplit)-1] + for _, t := range md.NestedType { + if typeName == *t.Name { + subType = t + } + } + } + return *f.Name, subType + } + } + } + return name, nil +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, md: md}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + protoName, subMd := translateName(k, item.md) + if subMsg, ok := v.(descriptor2.Message); ok { + _, subMd = descriptor2.ForMessage(subMsg) + } + queue = append(queue, fieldMaskPathItem{path: append(item.path, protoName), node: v, md: subMd}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node + path []string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // descriptor for parent message + md *descriptor.DescriptorProto +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go new file mode 100644 index 0000000000..2af900650d --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -0,0 +1,209 @@ +package runtime + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + + "context" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/grpclog" +) + +var errEmptyResponse = errors.New("empty response") + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + + buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler)) + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { + serr := streamError(ctx, mux.streamErrorHandler, err) + if !wroteHeader { + w.WriteHeader(int(serr.HttpCode)) + } + buf, merr := marshaler.Marshal(errorChunk(serr)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +// streamChunk returns a chunk in a response stream for the given result. The +// given errHandler is used to render an error chunk if result is nil. +func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message { + if result == nil { + return errorChunk(streamError(ctx, errHandler, errEmptyResponse)) + } + return map[string]proto.Message{"result": result} +} + +// streamError returns the payload for the final message in a response stream +// that represents the given err. +func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { + serr := errHandler(ctx, err) + if serr != nil { + return serr + } + // TODO: log about misbehaving stream error handler? + return DefaultHTTPStreamErrorHandler(ctx, err) +} + +func errorChunk(err *StreamError) map[string]proto.Message { + return map[string]proto.Message{"error": (*internal.StreamError)(err)} +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go new file mode 100644 index 0000000000..f55285b5d6 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatability with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go new file mode 100644 index 0000000000..f9d3a585a4 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go new file mode 100644 index 0000000000..f0de351b21 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go new file mode 100644 index 0000000000..f65d1a2676 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go new file mode 100644 index 0000000000..98fe6e88ac --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go @@ -0,0 +1,48 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record seperator for the stream. + Delimiter() []byte +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go new file mode 100644 index 0000000000..5cc53ae4f6 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "errors" + "net/http" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go new file mode 100644 index 0000000000..1da3a58854 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -0,0 +1,303 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when +// a request is received with a URI path that does not match any registered +// service method. +// +// Since gRPC servers return an "Unimplemented" code for requests with an +// unrecognized URI path, this error also has a gRPC "Unimplemented" code. +var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + streamErrorHandler StreamErrorHandlerFunc + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool + lastMatchWins bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithLastMatchWins returns a ServeMuxOption that will enable "last +// match wins" behavior, where if multiple path patterns match a +// request path, the last one defined in the .proto file will be used. +func WithLastMatchWins() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.lastMatchWins = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + streamErrorHandler: DefaultHTTPStreamErrorHandler, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if s.lastMatchWins { + s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) + } else { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + } +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go new file mode 100644 index 0000000000..09053695da --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string + // assumeColonVerb indicates whether a path suffix after a final + // colon may only be interpreted as a verb. + assumeColonVerb bool +} + +type patternOptions struct { + assumeColonVerb bool +} + +// PatternOpt is an option for creating Patterns. +type PatternOpt func(*patternOptions) + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { + options := patternOptions{ + assumeColonVerb: true, + } + for _, o := range opts { + o(&options) + } + + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + assumeColonVerb: options.assumeColonVerb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + if p.assumeColonVerb || p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + verb = "" + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +// AssumeColonVerbOpt indicates whether a path suffix after a final +// colon may only be interpreted as a verb. +func AssumeColonVerbOpt(val bool) PatternOpt { + return PatternOpt(func(o *patternOptions) { + o.assumeColonVerb = val + }) +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go new file mode 100644 index 0000000000..a3151e2a55 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 0000000000..ca76324efb --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,106 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a +// a proto struct used to represent error at the end of a stream. +type StreamErrorHandlerFunc func(context.Context, error) *StreamError + +// StreamError is the payload for the final message in a server stream in the event that the server returns an +// error after a response message has already been sent. +type StreamError internal.StreamError + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via +// default logic. +// +// It extracts the gRPC status from err if possible. The fields of the status are +// used to populate the returned StreamError, and the HTTP status code is derived +// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a +// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. +func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return &StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + } +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go new file mode 100644 index 0000000000..80ff21c3a4 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -0,0 +1,391 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") + +// PopulateQueryParameters populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + match := valuesKeyRegexp.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + for _, op := range props.OneofTypes { + if name == op.Prop.OrigName || name == op.Prop.JSONName { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + var name string + switch m := i.(type) { + case interface{ XXX_WellKnownType() string }: + name = m.XXX_WellKnownType() + case proto.Message: + const wktPrefix = "google.protobuf." + if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { + name = fullName[len(wktPrefix):] + } + } + switch name { + case "Timestamp": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.FieldByName("Seconds").SetInt(int64(t.Unix())) + f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.FieldByName("Seconds").SetInt(s) + f.FieldByName("Nanos").SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.FieldByName("Value").SetBool(true) + } else if value == "false" { + f.FieldByName("Value").SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.FieldByName("Value").SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.FieldByName("Value").SetBytes(bytesVal) + return nil + case "FieldMask": + p := f.FieldByName("Paths") + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go new file mode 100644 index 0000000000..cf79a4d588 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go new file mode 100644 index 0000000000..dfe7de4864 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go new file mode 100644 index 0000000000..6dd3854665 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go new file mode 100644 index 0000000000..c2b7b30dd9 --- /dev/null +++ b/test/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/LICENSE b/test/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000000..b03310a91f --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/test/vendor/github.com/jmespath/go-jmespath/api.go b/test/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000000..8e26ffeecf --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/test/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000000..1cd2d239c9 --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/functions.go b/test/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000000..9b7cd89b4b --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/interpreter.go b/test/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000000..13c74604c2 --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/lexer.go b/test/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000000..817900c8f5 --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/parser.go b/test/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 0000000000..1240a17552 --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/test/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000000..dae79cbdf3 --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/test/vendor/github.com/jmespath/go-jmespath/util.go b/test/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000000..ddc1b7d7d4 --- /dev/null +++ b/test/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/test/vendor/github.com/knative/pkg/apis/condition_set.go b/test/vendor/github.com/knative/pkg/apis/condition_set.go deleted file mode 100644 index d4c70098e1..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/condition_set.go +++ /dev/null @@ -1,335 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "reflect" - "sort" - "time" - - "fmt" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Conditions is the interface for a Resource that implements the getter and -// setter for accessing a Condition collection. -// +k8s:deepcopy-gen=true -type ConditionsAccessor interface { - GetConditions() Conditions - SetConditions(Conditions) -} - -// ConditionSet is an abstract collection of the possible ConditionType values -// that a particular resource might expose. It also holds the "happy condition" -// for that resource, which we define to be one of Ready or Succeeded depending -// on whether it is a Living or Batch process respectively. -// +k8s:deepcopy-gen=false -type ConditionSet struct { - happy ConditionType - dependents []ConditionType -} - -// ConditionManager allows a resource to operate on its Conditions using higher -// order operations. -type ConditionManager interface { - // IsHappy looks at the happy condition and returns true if that condition is - // set to true. - IsHappy() bool - - // GetCondition finds and returns the Condition that matches the ConditionType - // previously set on Conditions. - GetCondition(t ConditionType) *Condition - - // SetCondition sets or updates the Condition on Conditions for Condition.Type. - // If there is an update, Conditions are stored back sorted. - SetCondition(new Condition) - - // MarkTrue sets the status of t to true, and then marks the happy condition to - // true if all dependents are true. - MarkTrue(t ConditionType) - - // MarkUnknown sets the status of t to Unknown and also sets the happy condition - // to Unknown if no other dependent condition is in an error state. - MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{}) - - // MarkFalse sets the status of t and the happy condition to False. - MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{}) - - // InitializeConditions updates all Conditions in the ConditionSet to Unknown - // if not set. - InitializeConditions() -} - -// NewLivingConditionSet returns a ConditionSet to hold the conditions for the -// living resource. ConditionReady is used as the happy condition. -// The set of condition types provided are those of the terminal subconditions. -func NewLivingConditionSet(d ...ConditionType) ConditionSet { - return newConditionSet(ConditionReady, d...) -} - -// NewBatchConditionSet returns a ConditionSet to hold the conditions for the -// batch resource. ConditionSucceeded is used as the happy condition. -// The set of condition types provided are those of the terminal subconditions. -func NewBatchConditionSet(d ...ConditionType) ConditionSet { - return newConditionSet(ConditionSucceeded, d...) -} - -// newConditionSet returns a ConditionSet to hold the conditions that are -// important for the caller. The first ConditionType is the overarching status -// for that will be used to signal the resources' status is Ready or Succeeded. -func newConditionSet(happy ConditionType, dependents ...ConditionType) ConditionSet { - var deps []ConditionType - for _, d := range dependents { - // Skip duplicates - if d == happy || contains(deps, d) { - continue - } - deps = append(deps, d) - } - return ConditionSet{ - happy: happy, - dependents: deps, - } -} - -func contains(ct []ConditionType, t ConditionType) bool { - for _, c := range ct { - if c == t { - return true - } - } - return false -} - -// Check that conditionsImpl implements ConditionManager. -var _ ConditionManager = (*conditionsImpl)(nil) - -// conditionsImpl implements the helper methods for evaluating Conditions. -// +k8s:deepcopy-gen=false -type conditionsImpl struct { - ConditionSet - accessor ConditionsAccessor -} - -// Manage creates a ConditionManager from a accessor object using the original -// ConditionSet as a reference. Status must be or point to a struct. -func (r ConditionSet) Manage(status ConditionsAccessor) ConditionManager { - return conditionsImpl{ - accessor: status, - ConditionSet: r, - } -} - -// IsHappy looks at the happy condition and returns true if that condition is -// set to true. -func (r conditionsImpl) IsHappy() bool { - if c := r.GetCondition(r.happy); c == nil || !c.IsTrue() { - return false - } - return true -} - -// GetCondition finds and returns the Condition that matches the ConditionType -// previously set on Conditions. -func (r conditionsImpl) GetCondition(t ConditionType) *Condition { - if r.accessor == nil { - return nil - } - - for _, c := range r.accessor.GetConditions() { - if c.Type == t { - return &c - } - } - return nil -} - -// SetCondition sets or updates the Condition on Conditions for Condition.Type. -// If there is an update, Conditions are stored back sorted. -func (r conditionsImpl) SetCondition(new Condition) { - if r.accessor == nil { - return - } - t := new.Type - var conditions Conditions - for _, c := range r.accessor.GetConditions() { - if c.Type != t { - conditions = append(conditions, c) - } else { - // If we'd only update the LastTransitionTime, then return. - new.LastTransitionTime = c.LastTransitionTime - if reflect.DeepEqual(&new, &c) { - return - } - } - } - new.LastTransitionTime = VolatileTime{Inner: metav1.NewTime(time.Now())} - conditions = append(conditions, new) - // Sorted for convenience of the consumer, i.e. kubectl. - sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type }) - r.accessor.SetConditions(conditions) -} - -func (r conditionsImpl) isTerminal(t ConditionType) bool { - for _, cond := range r.dependents { - if cond == t { - return true - } - } - - if t == r.happy { - return true - } - - return false -} - -func (r conditionsImpl) severity(t ConditionType) ConditionSeverity { - if r.isTerminal(t) { - return ConditionSeverityError - } - return ConditionSeverityInfo -} - -// MarkTrue sets the status of t to true, and then marks the happy condition to -// true if all other dependents are also true. -func (r conditionsImpl) MarkTrue(t ConditionType) { - // set the specified condition - r.SetCondition(Condition{ - Type: t, - Status: corev1.ConditionTrue, - Severity: r.severity(t), - }) - - // check the dependents. - for _, cond := range r.dependents { - c := r.GetCondition(cond) - // Failed or Unknown conditions trump true conditions - if !c.IsTrue() { - return - } - } - - // set the happy condition - r.SetCondition(Condition{ - Type: r.happy, - Status: corev1.ConditionTrue, - Severity: r.severity(r.happy), - }) -} - -// MarkUnknown sets the status of t to Unknown and also sets the happy condition -// to Unknown if no other dependent condition is in an error state. -func (r conditionsImpl) MarkUnknown(t ConditionType, reason, messageFormat string, messageA ...interface{}) { - // set the specified condition - r.SetCondition(Condition{ - Type: t, - Status: corev1.ConditionUnknown, - Reason: reason, - Message: fmt.Sprintf(messageFormat, messageA...), - Severity: r.severity(t), - }) - - // check the dependents. - isDependent := false - for _, cond := range r.dependents { - c := r.GetCondition(cond) - // Failed conditions trump Unknown conditions - if c.IsFalse() { - // Double check that the happy condition is also false. - happy := r.GetCondition(r.happy) - if !happy.IsFalse() { - r.MarkFalse(r.happy, reason, messageFormat, messageA...) - } - return - } - if cond == t { - isDependent = true - } - } - - if isDependent { - // set the happy condition, if it is one of our dependent subconditions. - r.SetCondition(Condition{ - Type: r.happy, - Status: corev1.ConditionUnknown, - Reason: reason, - Message: fmt.Sprintf(messageFormat, messageA...), - Severity: r.severity(r.happy), - }) - } -} - -// MarkFalse sets the status of t and the happy condition to False. -func (r conditionsImpl) MarkFalse(t ConditionType, reason, messageFormat string, messageA ...interface{}) { - types := []ConditionType{t} - for _, cond := range r.dependents { - if cond == t { - types = append(types, r.happy) - } - } - - for _, t := range types { - r.SetCondition(Condition{ - Type: t, - Status: corev1.ConditionFalse, - Reason: reason, - Message: fmt.Sprintf(messageFormat, messageA...), - Severity: r.severity(t), - }) - } -} - -// InitializeConditions updates all Conditions in the ConditionSet to Unknown -// if not set. -func (r conditionsImpl) InitializeConditions() { - happy := r.GetCondition(r.happy) - if happy == nil { - happy = &Condition{ - Type: r.happy, - Status: corev1.ConditionUnknown, - Severity: ConditionSeverityError, - } - r.SetCondition(*happy) - } - // If the happy state is true, it implies that all of the terminal - // subconditions must be true, so initialize any unset conditions to - // true if our happy condition is true, otherwise unknown. - status := corev1.ConditionUnknown - if happy.Status == corev1.ConditionTrue { - status = corev1.ConditionTrue - } - for _, t := range r.dependents { - r.initializeTerminalCondition(t, status) - } -} - -// initializeTerminalCondition initializes a Condition to the given status if unset. -func (r conditionsImpl) initializeTerminalCondition(t ConditionType, status corev1.ConditionStatus) *Condition { - if c := r.GetCondition(t); c != nil { - return c - } - c := Condition{ - Type: t, - Status: status, - Severity: ConditionSeverityError, - } - r.SetCondition(c) - return &c -} diff --git a/test/vendor/github.com/knative/pkg/apis/condition_types.go b/test/vendor/github.com/knative/pkg/apis/condition_types.go deleted file mode 100644 index 8f5603c0fa..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/condition_types.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - corev1 "k8s.io/api/core/v1" -) - -// Conditions is the schema for the conditions portion of the payload -type Conditions []Condition - -// ConditionType is a camel-cased condition type. -type ConditionType string - -const ( - // ConditionReady specifies that the resource is ready. - // For long-running resources. - ConditionReady ConditionType = "Ready" - // ConditionSucceeded specifies that the resource has finished. - // For resource which run to completion. - ConditionSucceeded ConditionType = "Succeeded" -) - -// ConditionSeverity expresses the severity of a Condition Type failing. -type ConditionSeverity string - -const ( - // ConditionSeverityError specifies that a failure of a condition type - // should be viewed as an error. As "Error" is the default for conditions - // we use the empty string (coupled with omitempty) to avoid confusion in - // the case where the condition is in state "True" (aka nothing is wrong). - ConditionSeverityError ConditionSeverity = "" - // ConditionSeverityWarning specifies that a failure of a condition type - // should be viewed as a warning, but that things could still work. - ConditionSeverityWarning ConditionSeverity = "Warning" - // ConditionSeverityInfo specifies that a failure of a condition type - // should be viewed as purely informational, and that things could still work. - ConditionSeverityInfo ConditionSeverity = "Info" -) - -// Conditions defines a readiness condition for a Knative resource. -// See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties -// +k8s:deepcopy-gen=true -type Condition struct { - // Type of condition. - // +required - Type ConditionType `json:"type" description:"type of status condition"` - - // Status of the condition, one of True, False, Unknown. - // +required - Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` - - // Severity with which to treat failures of this type of condition. - // When this is not specified, it defaults to Error. - // +optional - Severity ConditionSeverity `json:"severity,omitempty" description:"how to interpret failures of this condition, one of Error, Warning, Info"` - - // LastTransitionTime is the last time the condition transitioned from one status to another. - // We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic - // differences (all other things held constant). - // +optional - LastTransitionTime VolatileTime `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` - - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` - - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` -} - -// IsTrue is true if the condition is True -func (c *Condition) IsTrue() bool { - if c == nil { - return false - } - return c.Status == corev1.ConditionTrue -} - -// IsFalse is true if the condition is False -func (c *Condition) IsFalse() bool { - if c == nil { - return false - } - return c.Status == corev1.ConditionFalse -} - -// IsUnknown is true if the condition is Unknown -func (c *Condition) IsUnknown() bool { - if c == nil { - return true - } - return c.Status == corev1.ConditionUnknown -} diff --git a/test/vendor/github.com/knative/pkg/apis/contexts.go b/test/vendor/github.com/knative/pkg/apis/contexts.go deleted file mode 100644 index 287761e16c..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/contexts.go +++ /dev/null @@ -1,182 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "context" - - authenticationv1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// This is attached to contexts passed to webhook interfaces when -// the receiver being validated is being created. -type inCreateKey struct{} - -// WithinCreate is used to note that the webhook is calling within -// the context of a Create operation. -func WithinCreate(ctx context.Context) context.Context { - return context.WithValue(ctx, inCreateKey{}, struct{}{}) -} - -// IsInCreate checks whether the context is a Create. -func IsInCreate(ctx context.Context) bool { - return ctx.Value(inCreateKey{}) != nil -} - -// This is attached to contexts passed to webhook interfaces when -// the receiver being validated is being updated. -type inUpdateKey struct{} - -type updatePayload struct { - base interface{} - subresource string -} - -// WithinUpdate is used to note that the webhook is calling within -// the context of a Update operation. -func WithinUpdate(ctx context.Context, base interface{}) context.Context { - return context.WithValue(ctx, inUpdateKey{}, &updatePayload{ - base: base, - }) -} - -// WithinSubResourceUpdate is used to note that the webhook is calling within -// the context of a Update operation on a subresource. -func WithinSubResourceUpdate(ctx context.Context, base interface{}, sr string) context.Context { - return context.WithValue(ctx, inUpdateKey{}, &updatePayload{ - base: base, - subresource: sr, - }) -} - -// IsInUpdate checks whether the context is an Update. -func IsInUpdate(ctx context.Context) bool { - return ctx.Value(inUpdateKey{}) != nil -} - -// IsInStatusUpdate checks whether the context is an Update. -func IsInStatusUpdate(ctx context.Context) bool { - value := ctx.Value(inUpdateKey{}) - if value == nil { - return false - } - up := value.(*updatePayload) - return up.subresource == "status" -} - -// GetBaseline returns the baseline of the update, or nil when we -// are not within an update context. -func GetBaseline(ctx context.Context) interface{} { - value := ctx.Value(inUpdateKey{}) - if value == nil { - return nil - } - return value.(*updatePayload).base -} - -// This is attached to contexts passed to webhook interfaces when -// the receiver being validated is being created. -type userInfoKey struct{} - -// WithUserInfo is used to note that the webhook is calling within -// the context of a Create operation. -func WithUserInfo(ctx context.Context, ui *authenticationv1.UserInfo) context.Context { - return context.WithValue(ctx, userInfoKey{}, ui) -} - -// GetUserInfo accesses the UserInfo attached to the webhook context. -func GetUserInfo(ctx context.Context) *authenticationv1.UserInfo { - if ui, ok := ctx.Value(userInfoKey{}).(*authenticationv1.UserInfo); ok { - return ui - } - return nil -} - -// This is attached to contexts as they are passed down through a resource -// being validated or defaulted to signal the ObjectMeta of the enclosing -// resource. -type parentMetaKey struct{} - -// WithinParent attaches the ObjectMeta of the resource enclosing the -// nested resources we are validating. This is intended for use with -// interfaces like apis.Defaultable and apis.Validatable. -func WithinParent(ctx context.Context, om metav1.ObjectMeta) context.Context { - return context.WithValue(ctx, parentMetaKey{}, om) -} - -// ParentMeta accesses the ObjectMeta of the enclosing parent resource -// from the context. See WithinParent for how to attach the parent's -// ObjectMeta to the context. -func ParentMeta(ctx context.Context) metav1.ObjectMeta { - if om, ok := ctx.Value(parentMetaKey{}).(metav1.ObjectMeta); ok { - return om - } - return metav1.ObjectMeta{} -} - -// This is attached to contexts as they are passed down through a resource -// being validated or defaulted to signal that we are within a Spec. -type inSpec struct{} - -// WithinSpec notes on the context that further validation or defaulting -// is within the context of a Spec. This is intended for use with -// interfaces like apis.Defaultable and apis.Validatable. -func WithinSpec(ctx context.Context) context.Context { - return context.WithValue(ctx, inSpec{}, struct{}{}) -} - -// IsInSpec returns whether the context of validation or defaulting is -// the Spec of the parent resource. -func IsInSpec(ctx context.Context) bool { - return ctx.Value(inSpec{}) != nil -} - -// This is attached to contexts as they are passed down through a resource -// being validated or defaulted to signal that we are within a Status. -type inStatus struct{} - -// WithinStatus notes on the context that further validation or defaulting -// is within the context of a Status. This is intended for use with -// interfaces like apis.Defaultable and apis.Validatable. -func WithinStatus(ctx context.Context) context.Context { - return context.WithValue(ctx, inStatus{}, struct{}{}) -} - -// IsInStatus returns whether the context of validation or defaulting is -// the Status of the parent resource. -func IsInStatus(ctx context.Context) bool { - return ctx.Value(inStatus{}) != nil -} - -// This is attached to contexts as they are passed down through a resource -// being validated to direct them to disallow deprecated fields. -type disallowDeprecated struct{} - -// DisallowDeprecated notes on the context that further validation -// should disallow the used of deprecated fields. This may be used -// to ensure that new paths through resources to a common type don't -// allow the mistakes of old versions to be introduced. -func DisallowDeprecated(ctx context.Context) context.Context { - return context.WithValue(ctx, disallowDeprecated{}, struct{}{}) -} - -// IsDeprecatedAllowed checks the context to see whether deprecated fields -// are allowed. -func IsDeprecatedAllowed(ctx context.Context) bool { - return ctx.Value(disallowDeprecated{}) == nil -} diff --git a/test/vendor/github.com/knative/pkg/apis/deprecated.go b/test/vendor/github.com/knative/pkg/apis/deprecated.go deleted file mode 100644 index c73f5be7c9..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/deprecated.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "context" - "reflect" - "strings" -) - -const ( - deprecatedPrefix = "Deprecated" -) - -// CheckDeprecated checks whether the provided named deprecated fields -// are set in a context where deprecation is disallowed. -// This is a shallow check. -func CheckDeprecated(ctx context.Context, obj interface{}) *FieldError { - return CheckDeprecatedUpdate(ctx, obj, nil) -} - -// CheckDeprecated checks whether the provided named deprecated fields -// are set in a context where deprecation is disallowed. -// This is a json shallow check. We will recursively check inlined structs. -func CheckDeprecatedUpdate(ctx context.Context, obj interface{}, original interface{}) *FieldError { - if IsDeprecatedAllowed(ctx) { - return nil - } - - var errs *FieldError - objFields, objInlined := getPrefixedNamedFieldValues(deprecatedPrefix, obj) - - if nonZero(reflect.ValueOf(original)) { - originalFields, originalInlined := getPrefixedNamedFieldValues(deprecatedPrefix, original) - - // We only have to walk obj Fields because the assumption is that obj - // and original are of the same type. - for name, value := range objFields { - if nonZero(value) { - if differ(originalFields[name], value) { - // Not allowed to update the value. - errs = errs.Also(ErrDisallowedUpdateDeprecatedFields(name)) - } - } - } - // Look for deprecated inlined updates. - if len(objInlined) > 0 { - for name, value := range objInlined { - errs = errs.Also(CheckDeprecatedUpdate(ctx, value, originalInlined[name])) - } - } - } else { - for name, value := range objFields { - if nonZero(value) { - // Not allowed to set the value. - errs = errs.Also(ErrDisallowedFields(name)) - } - } - // Look for deprecated inlined creates. - if len(objInlined) > 0 { - for _, value := range objInlined { - errs = errs.Also(CheckDeprecated(ctx, value)) - } - } - } - return errs -} - -func getPrefixedNamedFieldValues(prefix string, obj interface{}) (map[string]reflect.Value, map[string]interface{}) { - fields := make(map[string]reflect.Value, 0) - inlined := make(map[string]interface{}, 0) - - objValue := reflect.Indirect(reflect.ValueOf(obj)) - - // If res is not valid or a struct, don't even try to use it. - if !objValue.IsValid() || objValue.Kind() != reflect.Struct { - return fields, inlined - } - - for i := 0; i < objValue.NumField(); i++ { - tf := objValue.Type().Field(i) - if v := objValue.Field(i); v.IsValid() { - jTag := tf.Tag.Get("json") - if strings.HasPrefix(tf.Name, prefix) { - name := strings.Split(jTag, ",")[0] - if name == "" { - // Default to field name in go struct if no json name. - name = tf.Name - } - fields[name] = v - } else if jTag == ",inline" { - inlined[tf.Name] = getInterface(v) - } - } - } - return fields, inlined -} - -// getInterface returns the interface value of the reflected object. -func getInterface(a reflect.Value) interface{} { - switch a.Kind() { - case reflect.Ptr: - if a.IsNil() { - return nil - } - return a.Elem().Interface() - - case reflect.Map, reflect.Slice, reflect.Array: - return a.Elem().Interface() - - // This is a nil interface{} type. - case reflect.Invalid: - return nil - - default: - return a.Interface() - } -} - -// nonZero returns true if a is nil or reflect.Zero. -func nonZero(a reflect.Value) bool { - switch a.Kind() { - case reflect.Ptr: - if a.IsNil() { - return false - } - return nonZero(a.Elem()) - - case reflect.Map, reflect.Slice, reflect.Array: - if a.IsNil() { - return false - } - return true - - // This is a nil interface{} type. - case reflect.Invalid: - return false - - default: - if reflect.DeepEqual(a.Interface(), reflect.Zero(a.Type()).Interface()) { - return false - } - return true - } -} - -// differ returns true if a != b -func differ(a, b reflect.Value) bool { - if a.Kind() != b.Kind() { - return true - } - - switch a.Kind() { - case reflect.Ptr: - if a.IsNil() || b.IsNil() { - return a.IsNil() != b.IsNil() - } - return differ(a.Elem(), b.Elem()) - - default: - if reflect.DeepEqual(a.Interface(), b.Interface()) { - return false - } - return true - } -} diff --git a/test/vendor/github.com/knative/pkg/apis/field_error.go b/test/vendor/github.com/knative/pkg/apis/field_error.go deleted file mode 100644 index 8b56be976e..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/field_error.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Copyright 2017 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "fmt" - "sort" - "strings" - - "github.com/knative/pkg/kmp" -) - -// CurrentField is a constant to supply as a fieldPath for when there is -// a problem with the current field itself. -const CurrentField = "" - -// FieldError is used to propagate the context of errors pertaining to -// specific fields in a manner suitable for use in a recursive walk, so -// that errors contain the appropriate field context. -// FieldError methods are non-mutating. -// +k8s:deepcopy-gen=true -type FieldError struct { - Message string - Paths []string - // Details contains an optional longer payload. - // +optional - Details string - errors []FieldError -} - -// FieldError implements error -var _ error = (*FieldError)(nil) - -// ViaField is used to propagate a validation error along a field access. -// For example, if a type recursively validates its "spec" via: -// if err := foo.Spec.Validate(); err != nil { -// // Augment any field paths with the context that they were accessed -// // via "spec". -// return err.ViaField("spec") -// } -func (fe *FieldError) ViaField(prefix ...string) *FieldError { - if fe == nil { - return nil - } - // Copy over message and details, paths will be updated and errors come - // along using .Also(). - newErr := &FieldError{ - Message: fe.Message, - Details: fe.Details, - } - - // Prepend the Prefix to existing errors. - newPaths := make([]string, 0, len(fe.Paths)) - for _, oldPath := range fe.Paths { - newPaths = append(newPaths, flatten(append(prefix, oldPath))) - } - newErr.Paths = newPaths - for _, e := range fe.errors { - newErr = newErr.Also(e.ViaField(prefix...)) - } - return newErr -} - -// ViaIndex is used to attach an index to the next ViaField provided. -// For example, if a type recursively validates a parameter that has a collection: -// for i, c := range spec.Collection { -// if err := doValidation(c); err != nil { -// return err.ViaIndex(i).ViaField("collection") -// } -// } -func (fe *FieldError) ViaIndex(index int) *FieldError { - return fe.ViaField(asIndex(index)) -} - -// ViaFieldIndex is the short way to chain: err.ViaIndex(bar).ViaField(foo) -func (fe *FieldError) ViaFieldIndex(field string, index int) *FieldError { - return fe.ViaIndex(index).ViaField(field) -} - -// ViaKey is used to attach a key to the next ViaField provided. -// For example, if a type recursively validates a parameter that has a collection: -// for k, v := range spec.Bag { -// if err := doValidation(v); err != nil { -// return err.ViaKey(k).ViaField("bag") -// } -// } -func (fe *FieldError) ViaKey(key string) *FieldError { - return fe.ViaField(asKey(key)) -} - -// ViaFieldKey is the short way to chain: err.ViaKey(bar).ViaField(foo) -func (fe *FieldError) ViaFieldKey(field string, key string) *FieldError { - return fe.ViaKey(key).ViaField(field) -} - -// Also collects errors, returns a new collection of existing errors and new errors. -func (fe *FieldError) Also(errs ...*FieldError) *FieldError { - var newErr *FieldError - // collect the current objects errors, if it has any - if !fe.isEmpty() { - newErr = fe.DeepCopy() - } else { - newErr = &FieldError{} - } - // and then collect the passed in errors - for _, e := range errs { - if !e.isEmpty() { - newErr.errors = append(newErr.errors, *e) - } - } - if newErr.isEmpty() { - return nil - } - return newErr -} - -func (fe *FieldError) isEmpty() bool { - if fe == nil { - return true - } - return fe.Message == "" && fe.Details == "" && len(fe.errors) == 0 && len(fe.Paths) == 0 -} - -// normalized returns a flattened copy of all the errors. -func (fe *FieldError) normalized() []*FieldError { - // In case we call normalized on a nil object, return just an empty - // list. This can happen when .Error() is called on a nil object. - if fe == nil { - return []*FieldError(nil) - } - - // Allocate errors with at least as many objects as we'll get on the first pass. - errors := make([]*FieldError, 0, len(fe.errors)+1) - // If this FieldError is a leaf, add it. - if fe.Message != "" { - errors = append(errors, &FieldError{ - Message: fe.Message, - Paths: fe.Paths, - Details: fe.Details, - }) - } - // And then collect all other errors recursively. - for _, e := range fe.errors { - errors = append(errors, e.normalized()...) - } - return errors -} - -// Error implements error -func (fe *FieldError) Error() string { - // Get the list of errors as a flat merged list. - normedErrors := merge(fe.normalized()) - errs := make([]string, 0, len(normedErrors)) - for _, e := range normedErrors { - if e.Details == "" { - errs = append(errs, fmt.Sprintf("%v: %v", e.Message, strings.Join(e.Paths, ", "))) - } else { - errs = append(errs, fmt.Sprintf("%v: %v\n%v", e.Message, strings.Join(e.Paths, ", "), e.Details)) - } - } - return strings.Join(errs, "\n") -} - -// Helpers --- - -func asIndex(index int) string { - return fmt.Sprintf("[%d]", index) -} - -func isIndex(part string) bool { - return strings.HasPrefix(part, "[") && strings.HasSuffix(part, "]") -} - -func asKey(key string) string { - return fmt.Sprintf("[%s]", key) -} - -// flatten takes in a array of path components and looks for chances to flatten -// objects that have index prefixes, examples: -// err([0]).ViaField(bar).ViaField(foo) -> foo.bar.[0] converts to foo.bar[0] -// err(bar).ViaIndex(0).ViaField(foo) -> foo.[0].bar converts to foo[0].bar -// err(bar).ViaField(foo).ViaIndex(0) -> [0].foo.bar converts to [0].foo.bar -// err(bar).ViaIndex(0).ViaIndex(1).ViaField(foo) -> foo.[1].[0].bar converts to foo[1][0].bar -func flatten(path []string) string { - var newPath []string - for _, part := range path { - for _, p := range strings.Split(part, ".") { - if p == CurrentField { - continue - } else if len(newPath) > 0 && isIndex(p) { - newPath[len(newPath)-1] += p - } else { - newPath = append(newPath, p) - } - } - } - return strings.Join(newPath, ".") -} - -// mergePaths takes in two string slices and returns the combination of them -// without any duplicate entries. -func mergePaths(a, b []string) []string { - newPaths := make([]string, 0, len(a)+len(b)) - newPaths = append(newPaths, a...) - for _, bi := range b { - if !containsString(newPaths, bi) { - newPaths = append(newPaths, bi) - } - } - return newPaths -} - -// containsString takes in a string slice and looks for the provided string -// within the slice. -func containsString(slice []string, s string) bool { - for _, item := range slice { - if item == s { - return true - } - } - return false -} - -// merge takes in a flat list of FieldErrors and returns back a merged list of -// FieldErrors. FieldErrors have their Paths combined (and de-duped) if their -// Message and Details are the same. Merge will not inspect FieldError.errors. -// Merge will also sort the .Path slice, and the errors slice before returning. -func merge(errs []*FieldError) []*FieldError { - // make a map big enough for all the errors. - m := make(map[string]*FieldError, len(errs)) - - // Convert errs to a map where the key is -
and the value - // is the error. If an error already exists in the map with the same key, - // then the paths will be merged. - for _, e := range errs { - k := key(e) - if v, ok := m[k]; ok { - // Found a match, merge the keys. - v.Paths = mergePaths(v.Paths, e.Paths) - } else { - // Does not exist in the map, save the error. - m[k] = e - } - } - - // Take the map made previously and flatten it back out again. - newErrs := make([]*FieldError, 0, len(m)) - for _, v := range m { - // While we have access to the merged paths, sort them too. - sort.Slice(v.Paths, func(i, j int) bool { return v.Paths[i] < v.Paths[j] }) - newErrs = append(newErrs, v) - } - - // Sort the flattened map. - sort.Slice(newErrs, func(i, j int) bool { - if newErrs[i].Message == newErrs[j].Message { - return newErrs[i].Details < newErrs[j].Details - } - return newErrs[i].Message < newErrs[j].Message - }) - - // return back the merged list of sorted errors. - return newErrs -} - -// key returns the key using the fields .Message and .Details. -func key(err *FieldError) string { - return fmt.Sprintf("%s-%s", err.Message, err.Details) -} - -// Public helpers --- - -// ErrMissingField is a variadic helper method for constructing a FieldError for -// a set of missing fields. -func ErrMissingField(fieldPaths ...string) *FieldError { - return &FieldError{ - Message: "missing field(s)", - Paths: fieldPaths, - } -} - -// ErrDisallowedFields is a variadic helper method for constructing a FieldError -// for a set of disallowed fields. -func ErrDisallowedFields(fieldPaths ...string) *FieldError { - return &FieldError{ - Message: "must not set the field(s)", - Paths: fieldPaths, - } -} - -// ErrDisallowedUpdateDeprecatedFields is a variadic helper method for -// constructing a FieldError for updating of deprecated fields. -func ErrDisallowedUpdateDeprecatedFields(fieldPaths ...string) *FieldError { - return &FieldError{ - Message: "must not update deprecated field(s)", - Paths: fieldPaths, - } -} - -// ErrInvalidArrayValue constructs a FieldError for a repetetive `field` -// at `index` that has received an invalid string value. -func ErrInvalidArrayValue(value interface{}, field string, index int) *FieldError { - return ErrInvalidValue(value, CurrentField).ViaFieldIndex(field, index) -} - -// ErrInvalidValue constructs a FieldError for a field that has received an -// invalid string value. -func ErrInvalidValue(value interface{}, fieldPath string) *FieldError { - return &FieldError{ - Message: fmt.Sprintf("invalid value: %v", value), - Paths: []string{fieldPath}, - } -} - -// ErrMissingOneOf is a variadic helper method for constructing a FieldError for -// not having at least one field in a mutually exclusive field group. -func ErrMissingOneOf(fieldPaths ...string) *FieldError { - return &FieldError{ - Message: "expected exactly one, got neither", - Paths: fieldPaths, - } -} - -// ErrMultipleOneOf is a variadic helper method for constructing a FieldError -// for having more than one field set in a mutually exclusive field group. -func ErrMultipleOneOf(fieldPaths ...string) *FieldError { - return &FieldError{ - Message: "expected exactly one, got both", - Paths: fieldPaths, - } -} - -// ErrInvalidKeyName is a variadic helper method for constructing a FieldError -// that specifies a key name that is invalid. -func ErrInvalidKeyName(key, fieldPath string, details ...string) *FieldError { - return &FieldError{ - Message: fmt.Sprintf("invalid key name %q", key), - Paths: []string{fieldPath}, - Details: strings.Join(details, ", "), - } -} - -// ErrOutOfBoundsValue constructs a FieldError for a field that has received an -// out of bound value. -func ErrOutOfBoundsValue(value, lower, upper interface{}, fieldPath string) *FieldError { - return &FieldError{ - Message: fmt.Sprintf("expected %v <= %v <= %v", lower, value, upper), - Paths: []string{fieldPath}, - } -} - -// CheckDisallowedFields compares the request object against a masked request object. Fields -// that are set in the request object that are unset in the mask are reported back as disallowed fields. If -// there is an error comparing the two objects FieldError of "Internal Error" is returned. -func CheckDisallowedFields(request, maskedRequest interface{}) *FieldError { - if disallowed, err := kmp.CompareSetFields(request, maskedRequest); err != nil { - return &FieldError{ - Message: fmt.Sprintf("Internal Error"), - Paths: []string{CurrentField}, - } - } else if len(disallowed) > 0 { - return ErrDisallowedFields(disallowed...) - } - return nil -} diff --git a/test/vendor/github.com/knative/pkg/apis/interfaces.go b/test/vendor/github.com/knative/pkg/apis/interfaces.go deleted file mode 100644 index 601d083dd6..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/interfaces.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" -) - -// Defaultable defines an interface for setting the defaults for the -// uninitialized fields of this instance. -type Defaultable interface { - SetDefaults(context.Context) -} - -// Validatable indicates that a particular type may have its fields validated. -type Validatable interface { - // Validate checks the validity of this types fields. - Validate(context.Context) *FieldError -} - -// Convertible indicates that a particular type supports conversions to/from -// "higher" versions of the same type. -type Convertible interface { - // ConvertUp up-converts the receiver into `to`. - ConvertUp(ctx context.Context, to Convertible) error - - // ConvertDown down-converts from `from` into the receiver. - ConvertDown(ctx context.Context, from Convertible) error -} - -// Immutable indicates that a particular type has fields that should -// not change after creation. -// DEPRECATED: Use WithinUpdate / GetBaseline from within Validatable instead. -type Immutable interface { - // CheckImmutableFields checks that the current instance's immutable - // fields haven't changed from the provided original. - CheckImmutableFields(ctx context.Context, original Immutable) *FieldError -} - -// Listable indicates that a particular type can be returned via the returned -// list type by the API server. -type Listable interface { - runtime.Object - - GetListType() runtime.Object -} - -// Annotatable indicates that a particular type applies various annotations. -// DEPRECATED: Use WithUserInfo / GetUserInfo from within SetDefaults instead. -// The webhook functionality for this has been turned down, which is why this -// interface is empty. -type Annotatable interface{} diff --git a/test/vendor/github.com/knative/pkg/apis/kind2resource.go b/test/vendor/github.com/knative/pkg/apis/kind2resource.go deleted file mode 100644 index 37ffe08034..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/kind2resource.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// KindToResource converts a GroupVersionKind to a GroupVersionResource -// through the world's simplest (worst) pluralizer. -func KindToResource(gvk schema.GroupVersionKind) schema.GroupVersionResource { - return schema.GroupVersionResource{ - Group: gvk.Group, - Version: gvk.Version, - Resource: pluralizeKind(gvk.Kind), - } -} - -// Takes a kind and pluralizes it. This is super terrible, but I am -// not aware of a generic way to do this. -// I am not alone in thinking this and I haven't found a better solution: -// This seems relevant: -// https://github.com/kubernetes/kubernetes/issues/18622 -func pluralizeKind(kind string) string { - ret := strings.ToLower(kind) - if strings.HasSuffix(ret, "s") { - return fmt.Sprintf("%ses", ret) - } - return fmt.Sprintf("%ss", ret) -} diff --git a/test/vendor/github.com/knative/pkg/apis/metadata_validation.go b/test/vendor/github.com/knative/pkg/apis/metadata_validation.go deleted file mode 100644 index d65fa7dd21..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/metadata_validation.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ValidateObjectMetadata validates that `metadata` stanza of the -// resources is correct. -func ValidateObjectMetadata(meta metav1.Object) *FieldError { - name := meta.GetName() - generateName := meta.GetGenerateName() - - if generateName != "" { - msgs := validation.NameIsDNS1035Label(generateName, true) - - if len(msgs) > 0 { - return &FieldError{ - Message: fmt.Sprintf("not a DNS 1035 label prefix: %v", msgs), - Paths: []string{"generateName"}, - } - } - } - - if name != "" { - msgs := validation.NameIsDNS1035Label(name, false) - - if len(msgs) > 0 { - return &FieldError{ - Message: fmt.Sprintf("not a DNS 1035 label: %v", msgs), - Paths: []string{"name"}, - } - } - } - - if generateName == "" && name == "" { - return &FieldError{ - Message: "name or generateName is required", - Paths: []string{"name"}, - } - } - - return nil -} diff --git a/test/vendor/github.com/knative/pkg/apis/url.go b/test/vendor/github.com/knative/pkg/apis/url.go deleted file mode 100644 index c0402016f1..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/url.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "encoding/json" - "fmt" - "net/url" -) - -// URL is an alias of url.URL. -// It has custom json marshal methods that enable it to be used in K8s CRDs -// such that the CRD resource will have the URL but operator code can can work with url.URL struct -type URL url.URL - -// ParseURL attempts to parse the given string as a URL. -func ParseURL(u string) (*URL, error) { - if u == "" { - return nil, nil - } - pu, err := url.Parse(u) - if err != nil { - return nil, err - } - return (*URL)(pu), nil -} - -// MarshalJSON implements a custom json marshal method used when this type is -// marshaled using json.Marshal. -// json.Marshaler impl -func (u URL) MarshalJSON() ([]byte, error) { - b := fmt.Sprintf("%q", u.String()) - return []byte(b), nil -} - -// UnmarshalJSON implements the json unmarshal method used when this type is -// unmarsheled using json.Unmarshal. -// json.Unmarshaler impl -func (u *URL) UnmarshalJSON(b []byte) error { - var ref string - if err := json.Unmarshal(b, &ref); err != nil { - return err - } - r, err := ParseURL(ref) - if err != nil { - return err - } - *u = *r - return nil -} - -// String returns the full string representation of the URL. -func (u *URL) String() string { - if u == nil { - return "" - } - uu := url.URL(*u) - return uu.String() -} diff --git a/test/vendor/github.com/knative/pkg/apis/volatile_time.go b/test/vendor/github.com/knative/pkg/apis/volatile_time.go deleted file mode 100644 index 3d2daa2772..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/volatile_time.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2018 The Knative Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apis - -import ( - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// VolatileTime wraps metav1.Time -type VolatileTime struct { - Inner metav1.Time -} - -// MarshalJSON implements the json.Marshaler interface. -func (t VolatileTime) MarshalJSON() ([]byte, error) { - return t.Inner.MarshalJSON() -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (t *VolatileTime) UnmarshalJSON(b []byte) error { - return t.Inner.UnmarshalJSON(b) -} - -func init() { - equality.Semantic.AddFunc( - // Always treat VolatileTime fields as equivalent. - func(a, b VolatileTime) bool { - return true - }, - ) -} diff --git a/test/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go b/test/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go deleted file mode 100644 index be670d4a87..0000000000 --- a/test/vendor/github.com/knative/pkg/apis/zz_generated.deepcopy.go +++ /dev/null @@ -1,130 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package apis - -import ( - url "net/url" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Conditions) DeepCopyInto(out *Conditions) { - { - in := &in - *out = make(Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. -func (in Conditions) DeepCopy() Conditions { - if in == nil { - return nil - } - out := new(Conditions) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FieldError) DeepCopyInto(out *FieldError) { - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.errors != nil { - in, out := &in.errors, &out.errors - *out = make([]FieldError, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldError. -func (in *FieldError) DeepCopy() *FieldError { - if in == nil { - return nil - } - out := new(FieldError) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *URL) DeepCopyInto(out *URL) { - *out = *in - if in.User != nil { - in, out := &in.User, &out.User - *out = new(url.Userinfo) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new URL. -func (in *URL) DeepCopy() *URL { - if in == nil { - return nil - } - out := new(URL) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolatileTime) DeepCopyInto(out *VolatileTime) { - *out = *in - in.Inner.DeepCopyInto(&out.Inner) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolatileTime. -func (in *VolatileTime) DeepCopy() *VolatileTime { - if in == nil { - return nil - } - out := new(VolatileTime) - in.DeepCopyInto(out) - return out -} diff --git a/test/vendor/github.com/knative/pkg/kmeta/names.go b/test/vendor/github.com/knative/pkg/kmeta/names.go deleted file mode 100644 index 1efa7108b6..0000000000 --- a/test/vendor/github.com/knative/pkg/kmeta/names.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -copyright 2019 the knative authors - -licensed under the apache license, version 2.0 (the "license"); -you may not use this file except in compliance with the license. -you may obtain a copy of the license at - - http://www.apache.org/licenses/license-2.0 - -unless required by applicable law or agreed to in writing, software -distributed under the license is distributed on an "as is" basis, -without warranties or conditions of any kind, either express or implied. -see the license for the specific language governing permissions and -limitations under the license. -*/ - -package kmeta - -import ( - "crypto/md5" - "fmt" -) - -// The longest name supported by the K8s is 63. -// These constants -const ( - longest = 63 - md5Len = 32 - head = longest - md5Len -) - -// ChildName generates a name for the resource based upong the parent resource and suffix. -// If the concatenated name is longer than K8s permits the name is hashed and truncated to permit -// construction of the resource, but still keeps it unique. -func ChildName(parent, suffix string) string { - n := parent - if len(parent) > (longest - len(suffix)) { - n = fmt.Sprintf("%s%x", parent[:head-len(suffix)], md5.Sum([]byte(parent))) - } - return n + suffix -} diff --git a/test/vendor/github.com/knative/pkg/kmp/diff.go b/test/vendor/github.com/knative/pkg/kmp/diff.go deleted file mode 100644 index 09c041446b..0000000000 --- a/test/vendor/github.com/knative/pkg/kmp/diff.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kmp - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" - "k8s.io/apimachinery/pkg/api/resource" -) - -// Commonly used Comparers and other Options go here. -var defaultOpts []cmp.Option - -func init() { - defaultOpts = []cmp.Option{ - cmp.Comparer(func(x, y resource.Quantity) bool { - return x.Cmp(y) == 0 - }), - } -} - -// SafeDiff wraps cmp.Diff but recovers from panics and uses custom Comparers for: -// * k8s.io/apimachinery/pkg/api/resource.Quantity -// SafeDiff should be used instead of cmp.Diff in non-test code to protect the running -// process from crashing. -func SafeDiff(x, y interface{}, opts ...cmp.Option) (diff string, err error) { - // cmp.Diff will panic if we miss something; return error instead of crashing. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("recovered in kmp.SafeDiff: %v", r) - } - }() - - opts = append(opts, defaultOpts...) - diff = cmp.Diff(x, y, opts...) - - return -} - -// SafeEqual wraps cmp.Equal but recovers from panics and uses custom Comparers for: -// * k8s.io/apimachinery/pkg/api/resource.Quantity -// SafeEqual should be used instead of cmp.Equal in non-test code to protect the running -// process from crashing. -func SafeEqual(x, y interface{}, opts ...cmp.Option) (equal bool, err error) { - // cmp.Equal will panic if we miss something; return error instead of crashing. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("recovered in kmp.SafeEqual: %v", r) - } - }() - - opts = append(opts, defaultOpts...) - equal = cmp.Equal(x, y, opts...) - - return -} - -// CompareSetFields returns a list of field names that differ between -// x and y. Uses SafeEqual for comparison. -func CompareSetFields(x, y interface{}, opts ...cmp.Option) ([]string, error) { - r := new(FieldListReporter) - opts = append(opts, cmp.Reporter(r)) - _, err := SafeEqual(x, y, opts...) - return r.Fields(), err -} - -// ShortDiff returns a zero-context, unified human-readable diff. -// Uses SafeEqual for comparison. -func ShortDiff(prev, cur interface{}, opts ...cmp.Option) (string, error) { - r := new(ShortDiffReporter) - opts = append(opts, cmp.Reporter(r)) - var err error - if _, err = SafeEqual(prev, cur, opts...); err != nil { - return "", err - } - return r.Diff() -} diff --git a/test/vendor/github.com/knative/pkg/kmp/reporters.go b/test/vendor/github.com/knative/pkg/kmp/reporters.go deleted file mode 100644 index e09cf2f37a..0000000000 --- a/test/vendor/github.com/knative/pkg/kmp/reporters.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kmp - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/google/go-cmp/cmp" -) - -// FieldListReporter implements the cmp.Reporter interface. It keeps -// track of the field names that differ between two structs and reports -// them through the Fields() function. -type FieldListReporter struct { - path cmp.Path - fieldNames []string -} - -// PushStep implements the cmp.Reporter. -func (r *FieldListReporter) PushStep(ps cmp.PathStep) { - r.path = append(r.path, ps) -} - -// fieldName returns a readable name for the field. If the field has JSON annotations it -// returns the JSON key. If the field does not have JSON annotations or the JSON annotation -// marks the field as ignored it returns the field's go name -func (r *FieldListReporter) fieldName() string { - if len(r.path) < 2 { - return r.path.Index(0).String() - } else { - fieldName := strings.TrimPrefix(r.path.Index(1).String(), ".") - // Prefer JSON name to fieldName if it exists - structField, exists := r.path.Index(0).Type().FieldByName(fieldName) - if exists { - tag := structField.Tag.Get("json") - if tag != "" && tag != "-" { - return strings.SplitN(tag, ",", 2)[0] - } - - } - return fieldName - } -} - -// Report implements the cmp.Reporter. -func (r *FieldListReporter) Report(rs cmp.Result) { - if rs.Equal() { - return - } - name := r.fieldName() - // Only append elements we don't already have. - for _, v := range r.fieldNames { - if name == v { - return - } - } - r.fieldNames = append(r.fieldNames, name) -} - -// PopStep implements cmp.Reporter. -func (r *FieldListReporter) PopStep() { - r.path = r.path[:len(r.path)-1] -} - -// Fields returns the field names that differed between the two -// objects after calling cmp.Equal with the FieldListReporter. Field names -// are returned in alphabetical order. -func (r *FieldListReporter) Fields() []string { - sort.Strings(r.fieldNames) - return r.fieldNames -} - -// ShortDiffReporter implements the cmp.Reporter interface. It reports -// on fields which have diffing values in a short zero-context, unified diff -// format. -type ShortDiffReporter struct { - path cmp.Path - diffs []string - err error -} - -// PushStep implements the cmp.Reporter. -func (r *ShortDiffReporter) PushStep(ps cmp.PathStep) { - r.path = append(r.path, ps) -} - -// Report implements the cmp.Reporter. -func (r *ShortDiffReporter) Report(rs cmp.Result) { - if rs.Equal() { - return - } - cur := r.path.Last() - vx, vy := cur.Values() - t := cur.Type() - var diff string - // Prefix struct values with the types to add clarity in output - if !vx.IsValid() || !vy.IsValid() { - r.err = fmt.Errorf("Unable to diff %+v and %+v on path %#v", vx, vy, r.path) - } else if t.Kind() == reflect.Struct { - diff = fmt.Sprintf("%#v:\n\t-: %+v: \"%+v\"\n\t+: %+v: \"%+v\"\n", r.path, t, vx, t, vy) - } else { - diff = fmt.Sprintf("%#v:\n\t-: \"%+v\"\n\t+: \"%+v\"\n", r.path, vx, vy) - } - r.diffs = append(r.diffs, diff) -} - -// PopStep implements the cmp.Reporter. -func (r *ShortDiffReporter) PopStep() { - r.path = r.path[:len(r.path)-1] -} - -// Diff returns the generated short diff for this object. -// cmp.Equal should be called before this method. -func (r *ShortDiffReporter) Diff() (string, error) { - if r.err != nil { - return "", r.err - } - return strings.Join(r.diffs, ""), nil -} diff --git a/test/vendor/github.com/knative/serving/config/300-imagecache.yaml b/test/vendor/github.com/knative/serving/config/300-imagecache.yaml deleted file mode 120000 index f10d6dacf6..0000000000 --- a/test/vendor/github.com/knative/serving/config/300-imagecache.yaml +++ /dev/null @@ -1 +0,0 @@ -../vendor/github.com/knative/caching/config/image.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/annotation_validation.go b/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/annotation_validation.go deleted file mode 100644 index 9777914f9e..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/annotation_validation.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "fmt" - "strconv" - - "github.com/knative/pkg/apis" -) - -func getIntGE0(m map[string]string, k string) (int64, *apis.FieldError) { - v, ok := m[k] - if !ok { - return 0, nil - } - i, err := strconv.ParseInt(v, 10, 32) - if err != nil || i < 0 { - return 0, &apis.FieldError{ - Message: fmt.Sprintf("Invalid %s annotation value: must be an integer equal or greater than 0", k), - Paths: []string{k}, - } - } - return i, nil -} - -func ValidateAnnotations(annotations map[string]string) *apis.FieldError { - if len(annotations) == 0 { - return nil - } - - min, err := getIntGE0(annotations, MinScaleAnnotationKey) - if err != nil { - return err - } - max, err := getIntGE0(annotations, MaxScaleAnnotationKey) - if err != nil { - return err - } - - if max != 0 && max < min { - return &apis.FieldError{ - Message: fmt.Sprintf("%s=%v is less than %s=%v", MaxScaleAnnotationKey, max, MinScaleAnnotationKey, min), - Paths: []string{MaxScaleAnnotationKey, MinScaleAnnotationKey}, - } - } - - return nil -} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go b/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go deleted file mode 100644 index 286f2a9af8..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/autoscaling" - "github.com/knative/serving/pkg/apis/serving" - "k8s.io/apimachinery/pkg/api/equality" -) - -func (pa *PodAutoscaler) Validate(ctx context.Context) *apis.FieldError { - errs := serving.ValidateObjectMetadata(pa.GetObjectMeta()).ViaField("metadata") - errs = errs.Also(pa.validateMetric()) - return errs.Also(pa.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) -} - -// Validate validates PodAutoscaler Spec. -func (rs *PodAutoscalerSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(rs, &PodAutoscalerSpec{}) { - return apis.ErrMissingField(apis.CurrentField) - } - errs := serving.ValidateNamespacedObjectReference(&rs.ScaleTargetRef).ViaField("scaleTargetRef") - errs = errs.Also(rs.ContainerConcurrency.Validate(ctx). - ViaField("containerConcurrency")) - return errs.Also(validateSKSFields(ctx, rs)) -} - -func validateSKSFields(ctx context.Context, rs *PodAutoscalerSpec) *apis.FieldError { - var all *apis.FieldError - // TODO(vagababov) stop permitting empty protocol type, once SKS controller is live. - if string(rs.ProtocolType) != "" { - all = all.Also(rs.ProtocolType.Validate(ctx)).ViaField("protocolType") - } - return all -} - -func (pa *PodAutoscaler) validateMetric() *apis.FieldError { - if metric, ok := pa.Annotations[autoscaling.MetricAnnotationKey]; ok { - switch pa.Class() { - case autoscaling.KPA: - switch metric { - case autoscaling.Concurrency: - return nil - } - case autoscaling.HPA: - switch metric { - case autoscaling.CPU, autoscaling.Concurrency: - return nil - } - // TODO: implement OPS autoscaling. - default: - // Leave other classes of PodAutoscaler alone. - return nil - } - return &apis.FieldError{ - Message: fmt.Sprintf("Unsupported metric %q for PodAutoscaler class %q", - metric, pa.Class()), - Paths: []string{"annotations[autoscaling.knative.dev/metric]"}, - } - } - return nil -} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/config/testdata/config-defaults.yaml b/test/vendor/github.com/knative/serving/pkg/apis/config/testdata/config-defaults.yaml deleted file mode 120000 index 4f00ffb571..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/apis/config/testdata/config-defaults.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../config/config-defaults.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_lifecycle.go b/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_lifecycle.go deleted file mode 100644 index 92fefe85d9..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_lifecycle.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2019 The Knative Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func (ci *ClusterIngress) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind("ClusterIngress") -} - -// IsPublic returns whether the ClusterIngress should be exposed publicly. -func (ci *ClusterIngress) IsPublic() bool { - return ci.Spec.Visibility == "" || ci.Spec.Visibility == IngressVisibilityExternalIP -} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_types.go b/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_types.go deleted file mode 100644 index 1886b3bb95..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_types.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 The Knative Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/knative/pkg/apis" - "github.com/knative/pkg/kmeta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:nonNamespaced - -// ClusterIngress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. A ClusterIngress can be configured to give services -// externally-reachable URLs, load balance traffic, offer name based virtual hosting, etc. -// -// This is heavily based on K8s Ingress https://godoc.org/k8s.io/api/extensions/v1beta1#Ingress -// which some highlighted modifications. -type ClusterIngress struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec is the desired state of the ClusterIngress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec IngressSpec `json:"spec,omitempty"` - - // Status is the current state of the ClusterIngress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status IngressStatus `json:"status,omitempty"` -} - -// Verify that ClusterIngress adheres to the appropriate interfaces. -var ( - // Check that ClusterIngress may be validated and defaulted. - _ apis.Validatable = (*ClusterIngress)(nil) - _ apis.Defaultable = (*ClusterIngress)(nil) - - // Check that we can create OwnerReferences to a ClusterIngress. - _ kmeta.OwnerRefable = (*ClusterIngress)(nil) -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterIngressList is a collection of ClusterIngress objects. -type ClusterIngressList struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - // Items is the list of ClusterIngress objects. - Items []ClusterIngress `json:"items"` -} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_defaults.go b/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_defaults.go deleted file mode 100644 index 44ebbc7722..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_defaults.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - - "github.com/knative/serving/pkg/apis/config" -) - -// SetDefaults implements apis.Defaultable -func (r *Revision) SetDefaults(ctx context.Context) { - r.Spec.SetDefaults(ctx) -} - -// SetDefaults implements apis.Defaultable -func (rts *RevisionTemplateSpec) SetDefaults(ctx context.Context) { - rts.Spec.SetDefaults(ctx) -} - -// SetDefaults implements apis.Defaultable -func (rs *RevisionSpec) SetDefaults(ctx context.Context) { - cfg := config.FromContextOrDefaults(ctx) - - // Default TimeoutSeconds based on our configmap - if rs.TimeoutSeconds == nil { - ts := cfg.Defaults.RevisionTimeoutSeconds - rs.TimeoutSeconds = &ts - } - - var container corev1.Container - if len(rs.PodSpec.Containers) == 1 { - container = rs.PodSpec.Containers[0] - } - defer func() { - rs.PodSpec.Containers = []corev1.Container{container} - }() - - if container.Name == "" { - container.Name = cfg.Defaults.UserContainerName(ctx) - } - - if container.Resources.Requests == nil { - container.Resources.Requests = corev1.ResourceList{} - } - if _, ok := container.Resources.Requests[corev1.ResourceCPU]; !ok { - if rsrc := cfg.Defaults.RevisionCPURequest; rsrc != nil { - container.Resources.Requests[corev1.ResourceCPU] = *rsrc - } - } - if _, ok := container.Resources.Requests[corev1.ResourceMemory]; !ok { - if rsrc := cfg.Defaults.RevisionMemoryRequest; rsrc != nil { - container.Resources.Requests[corev1.ResourceMemory] = *rsrc - } - } - - if container.Resources.Limits == nil { - container.Resources.Limits = corev1.ResourceList{} - } - if _, ok := container.Resources.Limits[corev1.ResourceCPU]; !ok { - if rsrc := cfg.Defaults.RevisionCPULimit; rsrc != nil { - container.Resources.Limits[corev1.ResourceCPU] = *rsrc - } - } - if _, ok := container.Resources.Limits[corev1.ResourceMemory]; !ok { - if rsrc := cfg.Defaults.RevisionMemoryLimit; rsrc != nil { - container.Resources.Limits[corev1.ResourceMemory] = *rsrc - } - } - - vms := container.VolumeMounts - for i := range vms { - vms[i].ReadOnly = true - } -} diff --git a/test/vendor/github.com/knative/serving/pkg/autoscaler/testdata/config-autoscaler.yaml b/test/vendor/github.com/knative/serving/pkg/autoscaler/testdata/config-autoscaler.yaml deleted file mode 120000 index 855fbd8994..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/autoscaler/testdata/config-autoscaler.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../config/config-autoscaler.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/clusteringress.go b/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/clusteringress.go deleted file mode 100644 index 55777ab244..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/clusteringress.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2019 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/knative/serving/pkg/apis/networking/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClusterIngressesGetter has a method to return a ClusterIngressInterface. -// A group's client should implement this interface. -type ClusterIngressesGetter interface { - ClusterIngresses() ClusterIngressInterface -} - -// ClusterIngressInterface has methods to work with ClusterIngress resources. -type ClusterIngressInterface interface { - Create(*v1alpha1.ClusterIngress) (*v1alpha1.ClusterIngress, error) - Update(*v1alpha1.ClusterIngress) (*v1alpha1.ClusterIngress, error) - UpdateStatus(*v1alpha1.ClusterIngress) (*v1alpha1.ClusterIngress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ClusterIngress, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterIngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterIngress, err error) - ClusterIngressExpansion -} - -// clusterIngresses implements ClusterIngressInterface -type clusterIngresses struct { - client rest.Interface -} - -// newClusterIngresses returns a ClusterIngresses -func newClusterIngresses(c *NetworkingV1alpha1Client) *clusterIngresses { - return &clusterIngresses{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterIngress, and returns the corresponding clusterIngress object, and an error if there is any. -func (c *clusterIngresses) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterIngress, err error) { - result = &v1alpha1.ClusterIngress{} - err = c.client.Get(). - Resource("clusteringresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterIngresses that match those selectors. -func (c *clusterIngresses) List(opts v1.ListOptions) (result *v1alpha1.ClusterIngressList, err error) { - result = &v1alpha1.ClusterIngressList{} - err = c.client.Get(). - Resource("clusteringresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterIngresses. -func (c *clusterIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusteringresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterIngress and creates it. Returns the server's representation of the clusterIngress, and an error, if there is any. -func (c *clusterIngresses) Create(clusterIngress *v1alpha1.ClusterIngress) (result *v1alpha1.ClusterIngress, err error) { - result = &v1alpha1.ClusterIngress{} - err = c.client.Post(). - Resource("clusteringresses"). - Body(clusterIngress). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterIngress and updates it. Returns the server's representation of the clusterIngress, and an error, if there is any. -func (c *clusterIngresses) Update(clusterIngress *v1alpha1.ClusterIngress) (result *v1alpha1.ClusterIngress, err error) { - result = &v1alpha1.ClusterIngress{} - err = c.client.Put(). - Resource("clusteringresses"). - Name(clusterIngress.Name). - Body(clusterIngress). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusterIngresses) UpdateStatus(clusterIngress *v1alpha1.ClusterIngress) (result *v1alpha1.ClusterIngress, err error) { - result = &v1alpha1.ClusterIngress{} - err = c.client.Put(). - Resource("clusteringresses"). - Name(clusterIngress.Name). - SubResource("status"). - Body(clusterIngress). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterIngress and deletes it. Returns an error if one occurs. -func (c *clusterIngresses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusteringresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusteringresses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterIngress. -func (c *clusterIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterIngress, err error) { - result = &v1alpha1.ClusterIngress{} - err = c.client.Patch(pt). - Resource("clusteringresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/test/vendor/github.com/knative/serving/pkg/deployment/testdata/config-deployment.yaml b/test/vendor/github.com/knative/serving/pkg/deployment/testdata/config-deployment.yaml deleted file mode 120000 index a1c7629c19..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/deployment/testdata/config-deployment.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../config/config-deployment.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/gc/testdata/config-gc.yaml b/test/vendor/github.com/knative/serving/pkg/gc/testdata/config-gc.yaml deleted file mode 120000 index f6ee95a326..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/gc/testdata/config-gc.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../config/config-gc.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/logging/testdata/config-logging.yaml b/test/vendor/github.com/knative/serving/pkg/logging/testdata/config-logging.yaml deleted file mode 120000 index 581e985f9c..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/logging/testdata/config-logging.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../config/config-logging.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/metrics/testdata/config-observability.yaml b/test/vendor/github.com/knative/serving/pkg/metrics/testdata/config-observability.yaml deleted file mode 120000 index 7827630e3a..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/metrics/testdata/config-observability.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../config/config-observability.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/network/testdata/config-network.yaml b/test/vendor/github.com/knative/serving/pkg/network/testdata/config-network.yaml deleted file mode 120000 index e316b0466b..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/network/testdata/config-network.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../config/config-network.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml deleted file mode 120000 index 6d8b9bae96..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-certmanager.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/configuration/config/testdata/config-gc.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/configuration/config/testdata/config-gc.yaml deleted file mode 120000 index 929b7c057a..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/configuration/config/testdata/config-gc.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-gc.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml deleted file mode 120000 index 342ffffed5..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-istio.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml deleted file mode 120000 index b774d24cbb..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-network.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml deleted file mode 120000 index 05b9f33bbc..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-autoscaler.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml deleted file mode 120000 index 8e4b6d96a0..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-deployment.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml deleted file mode 120000 index def106f120..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-logging.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-network.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-network.yaml deleted file mode 120000 index b774d24cbb..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-network.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-network.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml deleted file mode 120000 index ecbbeaaee7..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-observability.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-domain.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-domain.yaml deleted file mode 120000 index fd6402b7c4..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-domain.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-domain.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-gc.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-gc.yaml deleted file mode 120000 index 929b7c057a..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-gc.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-gc.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-network.yaml b/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-network.yaml deleted file mode 120000 index b774d24cbb..0000000000 --- a/test/vendor/github.com/knative/serving/pkg/reconciler/route/config/testdata/config-network.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../../config/config-network.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/test/config/100-istio-default-domain.yaml b/test/vendor/github.com/knative/serving/test/config/100-istio-default-domain.yaml deleted file mode 120000 index 7d6227ed31..0000000000 --- a/test/vendor/github.com/knative/serving/test/config/100-istio-default-domain.yaml +++ /dev/null @@ -1 +0,0 @@ -../../config/post-install/100-istio-default-domain.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/third_party/istio-1.0-latest b/test/vendor/github.com/knative/serving/third_party/istio-1.0-latest deleted file mode 120000 index 28c3c58ea4..0000000000 --- a/test/vendor/github.com/knative/serving/third_party/istio-1.0-latest +++ /dev/null @@ -1 +0,0 @@ -istio-1.0.7 \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/third_party/istio-1.1-latest b/test/vendor/github.com/knative/serving/third_party/istio-1.1-latest deleted file mode 120000 index eeb30440bc..0000000000 --- a/test/vendor/github.com/knative/serving/third_party/istio-1.1-latest +++ /dev/null @@ -1 +0,0 @@ -istio-1.1.7 \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/third_party/config/monitoring/logging/elasticsearch/LICENSE b/test/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/third_party/config/monitoring/logging/elasticsearch/LICENSE rename to test/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE diff --git a/test/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/test/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 0000000000..5d8cb5b72e --- /dev/null +++ b/test/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 0000000000..258c0636aa --- /dev/null +++ b/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 0000000000..c318385cbe --- /dev/null +++ b/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 0000000000..8fb59ad226 --- /dev/null +++ b/test/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/test/vendor/github.com/prometheus/client_golang/LICENSE b/test/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/prometheus/client_golang/NOTICE b/test/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 0000000000..dd878a30ee --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/test/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 0000000000..288f0e8548 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/test/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 0000000000..6609e2877c --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/test/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 0000000000..1e839650d4 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/test/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 0000000000..d463e36d3e --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/test/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 0000000000..1d034f871c --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(labelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/test/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 0000000000..01977de661 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. Alternatively, you +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still +// be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/test/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 0000000000..18a99d5faa --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/test/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 0000000000..3d383a735c --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/test/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 0000000000..71d406bd92 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/test/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 0000000000..dc9247fed9 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,396 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "runtime" + "runtime/debug" + "sync" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return "go_memstats_" + s +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.msMetrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/test/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 0000000000..d7ea67bd2b --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,586 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + + upperBounds []float64 + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if i < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[i], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/test/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 0000000000..351c26e1ae --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/test/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 0000000000..2744443ac2 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/test/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 0000000000..55e6d86d59 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,174 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "time" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair + +func (s labelPairSorter) Len() int { + return len(s) +} + +func (s labelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s labelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/test/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 0000000000..5806cd09e3 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 0000000000..9b80979421 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,151 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "os" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if canCollectProcess() { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 0000000000..3117461cde --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 0000000000..e0b935d1fe --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,112 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 + PrivateUsage uint64 +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 0000000000..fa535684f9 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,357 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000000..cea5a90fd9 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,349 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + } + + if lastErr != nil { + httpError(rsp, lastErr) + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 0000000000..83c49b66a8 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 0000000000..9db2438053 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/test/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 0000000000..6c32516aa2 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,945 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe methed does not yield any descriptors) are excluded from the check. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + uncheckedCollectors []Collector + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // A Collector yielding no Desc at all is considered unchecked. + if len(newDescIDs) == 0 { + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil + } + if existing, exists := r.collectorsByID[collectorID]; exists { + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + for _, collector := range r.collectorsByID { + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + defer func() { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } + } + }() + + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calls are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } + } + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) + } + } + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } + } + return nil +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, +) error { + name := metricFamily.GetName() + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), + ) + } + + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := hashNew() + h = hashAdd(h, name) + h = hashAddByte(h, separatorByte) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetName()) + h = hashAddByte(h, separatorByte) + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(labelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/test/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 0000000000..c970fdee0e --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,736 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Problem with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/test/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 0000000000..8d5f105233 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/test/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 0000000000..0f9ce63f40 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/value.go b/test/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 0000000000..eb248f1087 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) + return labelPairs +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/test/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 0000000000..14ed9e856d --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,472 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/test/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/test/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 0000000000..e303eef6d3 --- /dev/null +++ b/test/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,200 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/test/vendor/github.com/prometheus/client_model/LICENSE b/test/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/prometheus/client_model/NOTICE b/test/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 0000000000..20110e410e --- /dev/null +++ b/test/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/test/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/test/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 0000000000..2f4930d9dd --- /dev/null +++ b/test/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{0} +} + +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{1} +} + +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{2} +} + +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{3} +} + +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{4} +} + +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{5} +} + +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{6} +} + +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{7} +} + +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Exemplar struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{8} +} + +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Exemplar.Unmarshal(m, b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return xxx_messageInfo_Exemplar.Size(m) +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{9} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_6039342a2ba47b72, []int{10} +} + +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } + +var fileDescriptor_6039342a2ba47b72 = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, + 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, + 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, + 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, + 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, + 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, + 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, + 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, + 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, + 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, + 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, + 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, + 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, + 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, + 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, + 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, + 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, + 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, + 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, + 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, + 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, + 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, + 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, + 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, + 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, + 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, + 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, + 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, + 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, + 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, + 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, + 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, + 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, + 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, + 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, + 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, + 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, + 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, + 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, + 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +} diff --git a/test/vendor/github.com/prometheus/common/LICENSE b/test/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/prometheus/common/NOTICE b/test/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 0000000000..636a2c1a5e --- /dev/null +++ b/test/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/test/vendor/github.com/prometheus/common/expfmt/decode.go b/test/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 0000000000..c092723e84 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/test/vendor/github.com/prometheus/common/expfmt/encode.go b/test/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 0000000000..bd4e347454 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,162 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +// Closer is implemented by Encoders that need to be closed to finalize +// encoding. (For example, OpenMetrics needs a final `# EOF` line.) +// +// Note that all Encoder implementations returned from this package implement +// Closer, too, even if the Close call is a no-op. This happens in preparation +// for adding a Close method to the Encoder interface directly in a (mildly +// breaking) release in the future. +type Closer interface { + Close() error +} + +type encoderCloser struct { + encode func(*dto.MetricFamily) error + close func() error +} + +func (ec encoderCloser) Encode(v *dto.MetricFamily) error { + return ec.encode(v) +} + +func (ec encoderCloser) Close() error { + return ec.close() +} + +// Negotiate returns the Content-Type based on the given Accept header. If no +// appropriate accepted type is found, FmtText is returned (which is the +// Prometheus text format). This function will never negotiate FmtOpenMetrics, +// as the support is still experimental. To include the option to negotiate +// FmtOpenMetrics, use NegotiateOpenMetrics. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NegotiateIncludingOpenMetrics works like Negotiate but includes +// FmtOpenMetrics as an option for the result. Note that this function is +// temporary and will disappear once FmtOpenMetrics is fully supported and as +// such may be negotiated by the normal Negotiate function. +func NegotiateIncludingOpenMetrics(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + ver := ac.Params["version"] + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { + return FmtOpenMetrics + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. All +// Encoder implementations returned by NewEncoder also implement Closer, and +// callers should always call the Close method. It is currently only required +// for FmtOpenMetrics, but a future (breaking) release will add the Close method +// to the Encoder interface directly. The current version of the Encoder +// interface is kept for backwards compatibility. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtProtoCompact: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }, + close: func() error { return nil }, + } + case FmtProtoText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }, + close: func() error { return nil }, + } + case FmtText: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }, + close: func() error { return nil }, + } + case FmtOpenMetrics: + return encoderCloser{ + encode: func(v *dto.MetricFamily) error { + _, err := MetricFamilyToOpenMetrics(w, v) + return err + }, + close: func() error { + _, err := FinalizeOpenMetrics(w) + return err + }, + } + } + panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) +} diff --git a/test/vendor/github.com/prometheus/common/expfmt/expfmt.go b/test/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 0000000000..0f176fa64f --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion = "0.0.1" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/test/vendor/github.com/prometheus/common/expfmt/fuzz.go b/test/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 0000000000..dc2eedeefc --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/test/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/test/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go new file mode 100644 index 0000000000..8a9313a3be --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -0,0 +1,527 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the +// OpenMetrics text format and writes the resulting lines to 'out'. It returns +// the number of bytes written and any error encountered. The output will have +// the same order as the input, no further sorting is performed. Furthermore, +// this function assumes the input is already sanitized and does not perform any +// sanity checks. If the input contains duplicate metrics or invalid metric or +// label names, the conversion will result in invalid text format output. +// +// This function fulfills the type 'expfmt.encoder'. +// +// Note that OpenMetrics requires a final `# EOF` line. Since this function acts +// on individual metric families, it is the responsibility of the caller to +// append this line to 'out' once all metric families have been written. +// Conveniently, this can be done by calling FinalizeOpenMetrics. +// +// The output should be fully OpenMetrics compliant. However, there are a few +// missing features and peculiarities to avoid complications when switching from +// Prometheus to OpenMetrics or vice versa: +// +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. +// +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. +// +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var ( + n int + metricType = in.GetType() + shortName = name + ) + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { + shortName = name[:len(name)-6] + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(shortName) + written += n + if err != nil { + return + } + switch metricType { + case dto.MetricType_COUNTER: + if strings.HasSuffix(name, "_total") { + n, err = w.WriteString(" counter\n") + } else { + n, err = w.WriteString(" unknown\n") + } + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" unknown\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + // Note that we have ensured above that either the name + // ends on `_total` or that the rendered type is + // `unknown`. Therefore, no `_total` must be added here. + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), 0, false, + metric.Counter.Exemplar, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), 0, false, + nil, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeOpenMetricsSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), 0, false, + nil, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeOpenMetricsSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Summary.GetSampleCount(), true, + nil, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + 0, b.GetCumulativeCount(), true, + b.Exemplar, + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeOpenMetricsSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + written += n + if err != nil { + return + } + } + n, err = writeOpenMetricsSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), 0, false, + nil, + ) + written += n + if err != nil { + return + } + n, err = writeOpenMetricsSample( + w, name, "_count", metric, "", 0, + 0, metric.Histogram.GetSampleCount(), true, + nil, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. +func FinalizeOpenMetrics(w io.Writer) (written int, err error) { + return w.Write([]byte("# EOF\n")) +} + +// writeOpenMetricsSample writes a single sample in OpenMetrics text format to +// w, given the metric name, the metric proto message itself, optionally an +// additional label name with a float64 value (use empty string as label name if +// not required), the value (optionally as float64 or uint64, determined by +// useIntValue), and optionally an exemplar (use nil if not required). The +// function returns the number of bytes written and any error encountered. +func writeOpenMetricsSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + floatValue float64, intValue uint64, useIntValue bool, + exemplar *dto.Exemplar, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeOpenMetricsLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + if useIntValue { + n, err = writeUint(w, intValue) + } else { + n, err = writeOpenMetricsFloat(w, floatValue) + } + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly without converting to a float first. + n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) + written += n + if err != nil { + return written, err + } + } + if exemplar != nil { + n, err = writeExemplar(w, exemplar) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float +// in OpenMetrics style. +func writeOpenMetricsLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeExemplar writes the provided exemplar in OpenMetrics format to w. The +// function returns the number of bytes written and any error encountered. +func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { + written := 0 + n, err := w.WriteString(" # ") + written += n + if err != nil { + return written, err + } + n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeOpenMetricsFloat(w, e.GetValue()) + written += n + if err != nil { + return written, err + } + if e.Timestamp != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + ts, err := ptypes.Timestamp((*e).Timestamp) + if err != nil { + return written, err + } + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting +// number would otherwise contain neither a "." nor an "e". +func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return w.WriteString("1.0") + case f == 0: + return w.WriteString("0.0") + case f == -1: + return w.WriteString("-1.0") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if !bytes.ContainsAny(*bp, "e.") { + *bp = append(*bp, '.', '0') + } + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeUint is like writeInt just for uint64. +func writeUint(w enhancedWriter, u uint64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendUint((*bp)[:0], u, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/test/vendor/github.com/prometheus/common/expfmt/text_create.go b/test/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 0000000000..5ba503b065 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,465 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriter(ioutil.Discard) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bufio.Writer from the sync.Pool. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) + w = b + defer func() { + bErr := b.Flush() + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } + return escaper.WriteString(w, v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/test/vendor/github.com/prometheus/common/expfmt/text_parse.go b/test/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 0000000000..342e5940d0 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,764 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := parseFloat(p.currentToken.String()) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/test/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/test/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 0000000000..26e92288c7 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/test/vendor/github.com/prometheus/common/model/alert.go b/test/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 0000000000..35e739c7ad --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/test/vendor/github.com/prometheus/common/model/fingerprinting.go b/test/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 0000000000..fc4de4106e --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/test/vendor/github.com/prometheus/common/model/fnv.go b/test/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 0000000000..038fc1c900 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/test/vendor/github.com/prometheus/common/model/labels.go b/test/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 0000000000..41051a01a3 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/test/vendor/github.com/prometheus/common/model/labelset.go b/test/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 0000000000..6eda08a739 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/test/vendor/github.com/prometheus/common/model/metric.go b/test/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 0000000000..00804b7fed --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,102 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/test/vendor/github.com/prometheus/common/model/model.go b/test/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 0000000000..a7b9691707 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/test/vendor/github.com/prometheus/common/model/signature.go b/test/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 0000000000..8762b13c63 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/test/vendor/github.com/prometheus/common/model/silence.go b/test/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 0000000000..bb99889d2c --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/test/vendor/github.com/prometheus/common/model/time.go b/test/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 0000000000..7b0064fdb2 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes an interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/test/vendor/github.com/prometheus/common/model/value.go b/test/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 0000000000..c9d8fb1a28 --- /dev/null +++ b/test/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/test/vendor/github.com/prometheus/procfs/LICENSE b/test/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/prometheus/procfs/NOTICE b/test/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 0000000000..53c5e9aa11 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/test/vendor/github.com/prometheus/procfs/arp.go b/test/vendor/github.com/prometheus/procfs/arp.go new file mode 100644 index 0000000000..916c9182a8 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/arp.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "net" + "strings" +) + +// ARPEntry contains a single row of the columnar data represented in +// /proc/net/arp. +type ARPEntry struct { + // IP address + IPAddr net.IP + // MAC address + HWAddr net.HardwareAddr + // Name of the device + Device string +} + +// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, +// and then return a slice of ARPEntry's. +func (fs FS) GatherARPEntries() ([]ARPEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + if err != nil { + return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) + } + + return parseARPEntries(data) +} + +func parseARPEntries(data []byte) ([]ARPEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]ARPEntry, 0) + var err error + const ( + expectedDataWidth = 6 + expectedHeaderWidth = 9 + ) + for _, line := range lines { + columns := strings.Fields(line) + width := len(columns) + + if width == expectedHeaderWidth || width == 0 { + continue + } else if width == expectedDataWidth { + entry, err := parseARPEntry(columns) + if err != nil { + return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) + } + entries = append(entries, entry) + } else { + return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + } + + } + + return entries, err +} + +func parseARPEntry(columns []string) (ARPEntry, error) { + ip := net.ParseIP(columns[0]) + mac := net.HardwareAddr(columns[3]) + + entry := ARPEntry{ + IPAddr: ip, + HWAddr: mac, + Device: columns[5], + } + + return entry, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/buddyinfo.go b/test/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 0000000000..10bd067a0a --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) BuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.proc.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/test/vendor/github.com/prometheus/procfs/cpuinfo.go b/test/vendor/github.com/prometheus/procfs/cpuinfo.go new file mode 100644 index 0000000000..2e02215528 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +type CPUInfo struct { + Processor uint + VendorID string + CPUFamily string + Model string + ModelName string + Stepping string + Microcode string + CPUMHz float64 + CacheSize string + PhysicalID string + Siblings uint + CoreID string + CPUCores uint + APICID string + InitialAPICID string + FPU string + FPUException string + CPUIDLevel uint + WP string + Flags []string + Bugs []string + BogoMips float64 + CLFlushSize uint + CacheAlignment uint + AddressSizes string + PowerManagement string +} + +// CPUInfo returns information about current system CPUs. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) CPUInfo() ([]CPUInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) + if err != nil { + return nil, err + } + return parseCPUInfo(data) +} + +// parseCPUInfo parses data from /proc/cpuinfo +func parseCPUInfo(info []byte) ([]CPUInfo, error) { + cpuinfo := []CPUInfo{} + i := -1 + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + line := scanner.Text() + if strings.TrimSpace(line) == "" { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + i++ + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Processor = uint(v) + case "vendor_id": + cpuinfo[i].VendorID = field[1] + case "cpu family": + cpuinfo[i].CPUFamily = field[1] + case "model": + cpuinfo[i].Model = field[1] + case "model name": + cpuinfo[i].ModelName = field[1] + case "stepping": + cpuinfo[i].Stepping = field[1] + case "microcode": + cpuinfo[i].Microcode = field[1] + case "cpu MHz": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].CPUMHz = v + case "cache size": + cpuinfo[i].CacheSize = field[1] + case "physical id": + cpuinfo[i].PhysicalID = field[1] + case "siblings": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].Siblings = uint(v) + case "core id": + cpuinfo[i].CoreID = field[1] + case "cpu cores": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUCores = uint(v) + case "apicid": + cpuinfo[i].APICID = field[1] + case "initial apicid": + cpuinfo[i].InitialAPICID = field[1] + case "fpu": + cpuinfo[i].FPU = field[1] + case "fpu_exception": + cpuinfo[i].FPUException = field[1] + case "cpuid level": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CPUIDLevel = uint(v) + case "wp": + cpuinfo[i].WP = field[1] + case "flags": + cpuinfo[i].Flags = strings.Fields(field[1]) + case "bugs": + cpuinfo[i].Bugs = strings.Fields(field[1]) + case "bogomips": + v, err := strconv.ParseFloat(field[1], 64) + if err != nil { + return nil, err + } + cpuinfo[i].BogoMips = v + case "clflush size": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CLFlushSize = uint(v) + case "cache_alignment": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + cpuinfo[i].CacheAlignment = uint(v) + case "address sizes": + cpuinfo[i].AddressSizes = field[1] + case "power management": + cpuinfo[i].PowerManagement = field[1] + } + } + return cpuinfo, nil + +} diff --git a/test/vendor/github.com/prometheus/procfs/crypto.go b/test/vendor/github.com/prometheus/procfs/crypto.go new file mode 100644 index 0000000000..a958933757 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/crypto.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Crypto holds info parsed from /proc/crypto. +type Crypto struct { + Alignmask *uint64 + Async bool + Blocksize *uint64 + Chunksize *uint64 + Ctxsize *uint64 + Digestsize *uint64 + Driver string + Geniv string + Internal string + Ivsize *uint64 + Maxauthsize *uint64 + MaxKeysize *uint64 + MinKeysize *uint64 + Module string + Name string + Priority *int64 + Refcnt *int64 + Seedsize *uint64 + Selftest string + Type string + Walksize *uint64 +} + +// Crypto parses an crypto-file (/proc/crypto) and returns a slice of +// structs containing the relevant info. More information available here: +// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html +func (fs FS) Crypto() ([]Crypto, error) { + path := fs.proc.Path("crypto") + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, fmt.Errorf("error reading crypto %s: %s", path, err) + } + + crypto, err := parseCrypto(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) + } + + return crypto, nil +} + +// parseCrypto parses a /proc/crypto stream into Crypto elements. +func parseCrypto(r io.Reader) ([]Crypto, error) { + var out []Crypto + + s := bufio.NewScanner(r) + for s.Scan() { + text := s.Text() + switch { + case strings.HasPrefix(text, "name"): + // Each crypto element begins with its name. + out = append(out, Crypto{}) + case text == "": + continue + } + + kv := strings.Split(text, ":") + if len(kv) != 2 { + return nil, fmt.Errorf("malformed crypto line: %q", text) + } + + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + + // Parse the key/value pair into the currently focused element. + c := &out[len(out)-1] + if err := c.parseKV(k, v); err != nil { + return nil, err + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +// parseKV parses a key/value pair into the appropriate field of c. +func (c *Crypto) parseKV(k, v string) error { + vp := util.NewValueParser(v) + + switch k { + case "async": + // Interpret literal yes as true. + c.Async = v == "yes" + case "blocksize": + c.Blocksize = vp.PUInt64() + case "chunksize": + c.Chunksize = vp.PUInt64() + case "digestsize": + c.Digestsize = vp.PUInt64() + case "driver": + c.Driver = v + case "geniv": + c.Geniv = v + case "internal": + c.Internal = v + case "ivsize": + c.Ivsize = vp.PUInt64() + case "maxauthsize": + c.Maxauthsize = vp.PUInt64() + case "max keysize": + c.MaxKeysize = vp.PUInt64() + case "min keysize": + c.MinKeysize = vp.PUInt64() + case "module": + c.Module = v + case "name": + c.Name = v + case "priority": + c.Priority = vp.PInt64() + case "refcnt": + c.Refcnt = vp.PInt64() + case "seedsize": + c.Seedsize = vp.PUInt64() + case "selftest": + c.Selftest = v + case "type": + c.Type = v + case "walksize": + c.Walksize = vp.PUInt64() + } + + return vp.Err() +} diff --git a/test/vendor/github.com/prometheus/procfs/doc.go b/test/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 0000000000..e2acd6d40a --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/test/vendor/github.com/prometheus/procfs/fs.go b/test/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 0000000000..0102ab0fd8 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "github.com/prometheus/procfs/internal/fs" +) + +// FS represents the pseudo-filesystem sys, which provides an interface to +// kernel data structures. +type FS struct { + proc fs.FS +} + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = fs.DefaultProcMountPoint + +// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. +// It will error if the mount point directory can't be read or is a file. +func NewDefaultFS() (FS, error) { + return NewFS(DefaultMountPoint) +} + +// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error +// if the mount point directory can't be read or is a file. +func NewFS(mountPoint string) (FS, error) { + fs, err := fs.NewFS(mountPoint) + if err != nil { + return FS{}, err + } + return FS{fs}, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/test/vendor/github.com/prometheus/procfs/internal/fs/fs.go new file mode 100644 index 0000000000..565e89e42c --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + // DefaultProcMountPoint is the common mount point of the proc filesystem. + DefaultProcMountPoint = "/proc" + + // DefaultSysMountPoint is the common mount point of the sys filesystem. + DefaultSysMountPoint = "/sys" + + // DefaultConfigfsMountPoint is the common mount point of the configfs + DefaultConfigfsMountPoint = "/sys/kernel/config" +) + +// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an +// interface to kernel data structures. +type FS string + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path appends the given path elements to the filesystem path, adding separators +// as necessary. +func (fs FS) Path(p ...string) string { + return filepath.Join(append([]string{string(fs)}, p...)...) +} diff --git a/test/vendor/github.com/prometheus/procfs/internal/util/parse.go b/test/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 0000000000..755591d9a5 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,88 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ParsePInt64s parses a slice of strings into a slice of int64 pointers. +func ParsePInt64s(ss []string) ([]*int64, error) { + us := make([]*int64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// ParseBool parses a string into a boolean pointer. +func ParseBool(b string) *bool { + var truth bool + switch b { + case "enabled": + truth = true + case "disabled": + truth = false + default: + return nil + } + return &truth +} diff --git a/test/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/test/vendor/github.com/prometheus/procfs/internal/util/readfile.go new file mode 100644 index 0000000000..8051161b2a --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io" + "io/ioutil" + "os" +) + +// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. +// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). +// Reads a max file size of 512kB. For files larger than this, a scanner +// should be used. +func ReadFileNoStat(filename string) ([]byte, error) { + const maxBufferSize = 1024 * 512 + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + reader := io.LimitReader(f, maxBufferSize) + return ioutil.ReadAll(reader) +} diff --git a/test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go new file mode 100644 index 0000000000..c07de0b6c9 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!appengine + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go new file mode 100644 index 0000000000..bd55b45377 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,appengine !linux + +package util + +import ( + "fmt" +) + +// SysReadFile is here implemented as a noop for builds that do not support +// the read syscall. For example Windows, or Linux on Google App Engine. +func SysReadFile(file string) (string, error) { + return "", fmt.Errorf("not supported on this platform") +} diff --git a/test/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/test/vendor/github.com/prometheus/procfs/internal/util/valueparser.go new file mode 100644 index 0000000000..fe2355d3c6 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "strconv" +) + +// TODO(mdlayher): util packages are an anti-pattern and this should be moved +// somewhere else that is more focused in the future. + +// A ValueParser enables parsing a single string into a variety of data types +// in a concise and safe way. The Err method must be invoked after invoking +// any other methods to ensure a value was successfully parsed. +type ValueParser struct { + v string + err error +} + +// NewValueParser creates a ValueParser using the input string. +func NewValueParser(v string) *ValueParser { + return &ValueParser{v: v} +} + +// Int interprets the underlying value as an int and returns that value. +func (vp *ValueParser) Int() int { return int(vp.int64()) } + +// PInt64 interprets the underlying value as an int64 and returns a pointer to +// that value. +func (vp *ValueParser) PInt64() *int64 { + if vp.err != nil { + return nil + } + + v := vp.int64() + return &v +} + +// int64 interprets the underlying value as an int64 and returns that value. +// TODO: export if/when necessary. +func (vp *ValueParser) int64() int64 { + if vp.err != nil { + return 0 + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseInt(vp.v, base, 64) + if err != nil { + vp.err = err + return 0 + } + + return v +} + +// PUInt64 interprets the underlying value as an uint64 and returns a pointer to +// that value. +func (vp *ValueParser) PUInt64() *uint64 { + if vp.err != nil { + return nil + } + + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(vp.v, base, 64) + if err != nil { + vp.err = err + return nil + } + + return &v +} + +// Err returns the last error, if any, encountered by the ValueParser. +func (vp *ValueParser) Err() error { + return vp.err +} diff --git a/test/vendor/github.com/prometheus/procfs/ipvs.go b/test/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 0000000000..89e447746c --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,241 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) IPVSStats() (IPVSStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + + return parseIPVSStats(bytes.NewReader(data)) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(r io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(r) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.proc.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/test/vendor/github.com/prometheus/procfs/loadavg.go b/test/vendor/github.com/prometheus/procfs/loadavg.go new file mode 100644 index 0000000000..00bbe14417 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/loadavg.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// LoadAvg represents an entry in /proc/loadavg +type LoadAvg struct { + Load1 float64 + Load5 float64 + Load15 float64 +} + +// LoadAvg returns loadavg from /proc. +func (fs FS) LoadAvg() (*LoadAvg, error) { + path := fs.proc.Path("loadavg") + + data, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + return parseLoad(data) +} + +// Parse /proc loadavg and return 1m, 5m and 15m. +func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { + loads := make([]float64, 3) + parts := strings.Fields(string(loadavgBytes)) + if len(parts) < 3 { + return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) + } + + var err error + for i, load := range parts[0:3] { + loads[i], err = strconv.ParseFloat(load, 64) + if err != nil { + return nil, fmt.Errorf("could not parse load '%s': %s", load, err) + } + } + return &LoadAvg{ + Load1: loads[0], + Load5: loads[1], + Load15: loads[2], + }, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/mdstat.go b/test/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 0000000000..2af3ada180 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,194 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device requires. + DisksTotal int64 + // Number of failed disks. + DisksFailed int64 + // Spare disks in the device. + DisksSpare int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. More information available here: +// https://raid.wiki.kernel.org/index.php/Mdstat +func (fs FS) MDStat() ([]MDStat, error) { + data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + mdstat, err := parseMDStat(data) + if err != nil { + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) + } + return mdstat, nil +} + +// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of +// structs containing the relevant info. +func parseMDStat(mdStatData []byte) ([]MDStat, error) { + mdStats := []MDStat{} + lines := strings.Split(string(mdStatData), "\n") + + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || + strings.HasPrefix(line, "Personalities") || + strings.HasPrefix(line, "unused") { + continue + } + + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 { + return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + if len(lines) <= i+3 { + return nil, fmt.Errorf( + "error parsing %s: too few lines for md device", + mdName, + ) + } + + // Failed disks have the suffix (F) & Spare disks have the suffix (S). + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + + if err != nil { + return nil, fmt.Errorf("error parsing md device lines: %s", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + + // Append recovery and resyncing state info. + if recovering || resyncing { + if recovering { + state = "recovering" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || + strings.Contains(lines[syncLineIdx], "DELAYED") { + syncedBlocks = 0 + } else { + syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) + } + } + } + + mdStats = append(mdStats, MDStat{ + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStats, nil +} + +func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { + + sizeStr := strings.Fields(statusLine)[0] + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total = int64(strings.Count(deviceLine, "[")) + return total, total, size, nil + } + + if strings.Contains(deviceLine, "inactive") { + return 0, 0, size, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLine) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) + } + + return active, total, size, nil +} + +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { + matches := recoveryLineRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) + } + + return syncedBlocks, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/meminfo.go b/test/vendor/github.com/prometheus/procfs/meminfo.go new file mode 100644 index 0000000000..50dab4bcd5 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/meminfo.go @@ -0,0 +1,277 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Meminfo represents memory statistics. +type Meminfo struct { + // Total usable ram (i.e. physical ram minus a few reserved + // bits and the kernel binary code) + MemTotal uint64 + // The sum of LowFree+HighFree + MemFree uint64 + // An estimate of how much memory is available for starting + // new applications, without swapping. Calculated from + // MemFree, SReclaimable, the size of the file LRU lists, and + // the low watermarks in each zone. The estimate takes into + // account that the system needs some page cache to function + // well, and that not all reclaimable slab will be + // reclaimable, due to items being in use. The impact of those + // factors will vary from system to system. + MemAvailable uint64 + // Relatively temporary storage for raw disk blocks shouldn't + // get tremendously large (20MB or so) + Buffers uint64 + Cached uint64 + // Memory that once was swapped out, is swapped back in but + // still also is in the swapfile (if memory is needed it + // doesn't need to be swapped out AGAIN because it is already + // in the swapfile. This saves I/O) + SwapCached uint64 + // Memory that has been used more recently and usually not + // reclaimed unless absolutely necessary. + Active uint64 + // Memory which has been less recently used. It is more + // eligible to be reclaimed for other purposes + Inactive uint64 + ActiveAnon uint64 + InactiveAnon uint64 + ActiveFile uint64 + InactiveFile uint64 + Unevictable uint64 + Mlocked uint64 + // total amount of swap space available + SwapTotal uint64 + // Memory which has been evicted from RAM, and is temporarily + // on the disk + SwapFree uint64 + // Memory which is waiting to get written back to the disk + Dirty uint64 + // Memory which is actively being written back to the disk + Writeback uint64 + // Non-file backed pages mapped into userspace page tables + AnonPages uint64 + // files which have been mapped, such as libraries + Mapped uint64 + Shmem uint64 + // in-kernel data structures cache + Slab uint64 + // Part of Slab, that might be reclaimed, such as caches + SReclaimable uint64 + // Part of Slab, that cannot be reclaimed on memory pressure + SUnreclaim uint64 + KernelStack uint64 + // amount of memory dedicated to the lowest level of page + // tables. + PageTables uint64 + // NFS pages sent to the server, but not yet committed to + // stable storage + NFSUnstable uint64 + // Memory used for block device "bounce buffers" + Bounce uint64 + // Memory used by FUSE for temporary writeback buffers + WritebackTmp uint64 + // Based on the overcommit ratio ('vm.overcommit_ratio'), + // this is the total amount of memory currently available to + // be allocated on the system. This limit is only adhered to + // if strict overcommit accounting is enabled (mode 2 in + // 'vm.overcommit_memory'). + // The CommitLimit is calculated with the following formula: + // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * + // overcommit_ratio / 100 + [total swap pages] + // For example, on a system with 1G of physical RAM and 7G + // of swap with a `vm.overcommit_ratio` of 30 it would + // yield a CommitLimit of 7.3G. + // For more details, see the memory overcommit documentation + // in vm/overcommit-accounting. + CommitLimit uint64 + // The amount of memory presently allocated on the system. + // The committed memory is a sum of all of the memory which + // has been allocated by processes, even if it has not been + // "used" by them as of yet. A process which malloc()'s 1G + // of memory, but only touches 300M of it will show up as + // using 1G. This 1G is memory which has been "committed" to + // by the VM and can be used at any time by the allocating + // application. With strict overcommit enabled on the system + // (mode 2 in 'vm.overcommit_memory'),allocations which would + // exceed the CommitLimit (detailed above) will not be permitted. + // This is useful if one needs to guarantee that processes will + // not fail due to lack of memory once that memory has been + // successfully allocated. + CommittedAS uint64 + // total size of vmalloc memory area + VmallocTotal uint64 + // amount of vmalloc area which is used + VmallocUsed uint64 + // largest contiguous block of vmalloc area which is free + VmallocChunk uint64 + HardwareCorrupted uint64 + AnonHugePages uint64 + ShmemHugePages uint64 + ShmemPmdMapped uint64 + CmaTotal uint64 + CmaFree uint64 + HugePagesTotal uint64 + HugePagesFree uint64 + HugePagesRsvd uint64 + HugePagesSurp uint64 + Hugepagesize uint64 + DirectMap4k uint64 + DirectMap2M uint64 + DirectMap1G uint64 +} + +// Meminfo returns an information about current kernel/system memory statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Meminfo() (Meminfo, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) + if err != nil { + return Meminfo{}, err + } + + m, err := parseMemInfo(bytes.NewReader(b)) + if err != nil { + return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) + } + + return *m, nil +} + +func parseMemInfo(r io.Reader) (*Meminfo, error) { + var m Meminfo + s := bufio.NewScanner(r) + for s.Scan() { + // Each line has at least a name and value; we ignore the unit. + fields := strings.Fields(s.Text()) + if len(fields) < 2 { + return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) + } + + v, err := strconv.ParseUint(fields[1], 0, 64) + if err != nil { + return nil, err + } + + switch fields[0] { + case "MemTotal:": + m.MemTotal = v + case "MemFree:": + m.MemFree = v + case "MemAvailable:": + m.MemAvailable = v + case "Buffers:": + m.Buffers = v + case "Cached:": + m.Cached = v + case "SwapCached:": + m.SwapCached = v + case "Active:": + m.Active = v + case "Inactive:": + m.Inactive = v + case "Active(anon):": + m.ActiveAnon = v + case "Inactive(anon):": + m.InactiveAnon = v + case "Active(file):": + m.ActiveFile = v + case "Inactive(file):": + m.InactiveFile = v + case "Unevictable:": + m.Unevictable = v + case "Mlocked:": + m.Mlocked = v + case "SwapTotal:": + m.SwapTotal = v + case "SwapFree:": + m.SwapFree = v + case "Dirty:": + m.Dirty = v + case "Writeback:": + m.Writeback = v + case "AnonPages:": + m.AnonPages = v + case "Mapped:": + m.Mapped = v + case "Shmem:": + m.Shmem = v + case "Slab:": + m.Slab = v + case "SReclaimable:": + m.SReclaimable = v + case "SUnreclaim:": + m.SUnreclaim = v + case "KernelStack:": + m.KernelStack = v + case "PageTables:": + m.PageTables = v + case "NFS_Unstable:": + m.NFSUnstable = v + case "Bounce:": + m.Bounce = v + case "WritebackTmp:": + m.WritebackTmp = v + case "CommitLimit:": + m.CommitLimit = v + case "Committed_AS:": + m.CommittedAS = v + case "VmallocTotal:": + m.VmallocTotal = v + case "VmallocUsed:": + m.VmallocUsed = v + case "VmallocChunk:": + m.VmallocChunk = v + case "HardwareCorrupted:": + m.HardwareCorrupted = v + case "AnonHugePages:": + m.AnonHugePages = v + case "ShmemHugePages:": + m.ShmemHugePages = v + case "ShmemPmdMapped:": + m.ShmemPmdMapped = v + case "CmaTotal:": + m.CmaTotal = v + case "CmaFree:": + m.CmaFree = v + case "HugePages_Total:": + m.HugePagesTotal = v + case "HugePages_Free:": + m.HugePagesFree = v + case "HugePages_Rsvd:": + m.HugePagesRsvd = v + case "HugePages_Surp:": + m.HugePagesSurp = v + case "Hugepagesize:": + m.Hugepagesize = v + case "DirectMap4k:": + m.DirectMap4k = v + case "DirectMap2M:": + m.DirectMap2M = v + case "DirectMap1G:": + m.DirectMap1G = v + } + } + + return &m, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/mountinfo.go b/test/vendor/github.com/prometheus/procfs/mountinfo.go new file mode 100644 index 0000000000..bb01bb5a2a --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/mountinfo.go @@ -0,0 +1,180 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A MountInfo is a type that describes the details, options +// for each mount, parsed from /proc/self/mountinfo. +// The fields described in each entry of /proc/self/mountinfo +// is described in the following man page. +// http://man7.org/linux/man-pages/man5/proc.5.html +type MountInfo struct { + // Unique Id for the mount + MountId int + // The Id of the parent mount + ParentId int + // The value of `st_dev` for the files on this FS + MajorMinorVer string + // The pathname of the directory in the FS that forms + // the root for this mount + Root string + // The pathname of the mount point relative to the root + MountPoint string + // Mount options + Options map[string]string + // Zero or more optional fields + OptionalFields map[string]string + // The Filesystem type + FSType string + // FS specific information or "none" + Source string + // Superblock options + SuperOptions map[string]string +} + +// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. +func parseMountInfo(info []byte) ([]*MountInfo, error) { + mounts := []*MountInfo{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + for scanner.Scan() { + mountString := scanner.Text() + parsedMounts, err := parseMountInfoString(mountString) + if err != nil { + return nil, err + } + mounts = append(mounts, parsedMounts) + } + + err := scanner.Err() + return mounts, err +} + +// Parses a mountinfo file line, and converts it to a MountInfo struct. +// An important check here is to see if the hyphen separator, as if it does not exist, +// it means that the line is malformed. +func parseMountInfoString(mountString string) (*MountInfo, error) { + var err error + + mountInfo := strings.Split(mountString, " ") + mountInfoLength := len(mountInfo) + if mountInfoLength < 11 { + return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + } + + if mountInfo[mountInfoLength-4] != "-" { + return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + } + + mount := &MountInfo{ + MajorMinorVer: mountInfo[2], + Root: mountInfo[3], + MountPoint: mountInfo[4], + Options: mountOptionsParser(mountInfo[5]), + OptionalFields: nil, + FSType: mountInfo[mountInfoLength-3], + Source: mountInfo[mountInfoLength-2], + SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), + } + + mount.MountId, err = strconv.Atoi(mountInfo[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse mount ID") + } + mount.ParentId, err = strconv.Atoi(mountInfo[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse parent ID") + } + // Has optional fields, which is a space separated list of values. + // Example: shared:2 master:7 + if mountInfo[6] != "" { + mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) + if err != nil { + return nil, err + } + } + return mount, nil +} + +// mountOptionsIsValidField checks a string against a valid list of optional fields keys. +func mountOptionsIsValidField(s string) bool { + switch s { + case + "shared", + "master", + "propagate_from", + "unbindable": + return true + } + return false +} + +// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. +func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { + optionalFields := make(map[string]string) + for _, field := range o { + optionSplit := strings.SplitN(field, ":", 2) + value := "" + if len(optionSplit) == 2 { + value = optionSplit[1] + } + if mountOptionsIsValidField(optionSplit[0]) { + optionalFields[optionSplit[0]] = value + } + } + return optionalFields, nil +} + +// Parses the mount options, superblock options. +func mountOptionsParser(mountOptions string) map[string]string { + opts := make(map[string]string) + options := strings.Split(mountOptions, ",") + for _, opt := range options { + splitOption := strings.Split(opt, "=") + if len(splitOption) < 2 { + key := splitOption[0] + opts[key] = "" + } else { + key, value := splitOption[0], splitOption[1] + opts[key] = value + } + } + return opts +} + +// Retrieves mountinfo information from `/proc/self/mountinfo`. +func GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat("/proc/self/mountinfo") + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// Retrieves mountinfo information from a processes' `/proc//mountinfo`. +func GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/test/vendor/github.com/prometheus/procfs/mountstats.go b/test/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 0000000000..35b2ef3513 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,621 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The mount options of the NFS mount. + Opts map[string]string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueMilliseconds uint64 + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseMilliseconds uint64 + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestMilliseconds uint64 +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTimeSeconds uint64 + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldOpts = "opts:" + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldOpts: + if stats.Opts == nil { + stats.Opts = map[string]string{} + } + for _, opt := range strings.Split(ss[1], ",") { + split := strings.Split(opt, "=") + if len(split) == 2 { + stats.Opts[split[0]] = split[1] + } else { + stats.Opts[opt] = "" + } + } + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueMilliseconds: ns[5], + CumulativeTotalResponseMilliseconds: ns[6], + CumulativeTotalRequestMilliseconds: ns[7], + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTimeSeconds: ns[4], + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/test/vendor/github.com/prometheus/procfs/net_conntrackstat.go new file mode 100644 index 0000000000..1e27c83d50 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -0,0 +1,153 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A ConntrackStatEntry represents one line from net/stat/nf_conntrack +// and contains netfilter conntrack statistics at one CPU core +type ConntrackStatEntry struct { + Entries uint64 + Found uint64 + Invalid uint64 + Ignore uint64 + Insert uint64 + InsertFailed uint64 + Drop uint64 + EarlyDrop uint64 + SearchRestart uint64 +} + +// Retrieves netfilter's conntrack statistics, split by CPU cores +func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { + return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) +} + +// Parses a slice of ConntrackStatEntries from the given filepath +func readConntrackStat(path string) ([]ConntrackStatEntry, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(path) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseConntrackStat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) + } + + return stat, nil +} + +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { + var entries []ConntrackStatEntry + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + conntrackEntry, err := parseConntrackStatEntry(fields) + if err != nil { + return nil, err + } + entries = append(entries, *conntrackEntry) + } + + return entries, nil +} + +// Parses a ConntrackStatEntry from given array of fields +func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { + if len(fields) != 17 { + return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") + } + entry := &ConntrackStatEntry{} + + entries, err := parseConntrackStatField(fields[0]) + if err != nil { + return nil, err + } + entry.Entries = entries + + found, err := parseConntrackStatField(fields[2]) + if err != nil { + return nil, err + } + entry.Found = found + + invalid, err := parseConntrackStatField(fields[4]) + if err != nil { + return nil, err + } + entry.Invalid = invalid + + ignore, err := parseConntrackStatField(fields[5]) + if err != nil { + return nil, err + } + entry.Ignore = ignore + + insert, err := parseConntrackStatField(fields[8]) + if err != nil { + return nil, err + } + entry.Insert = insert + + insertFailed, err := parseConntrackStatField(fields[9]) + if err != nil { + return nil, err + } + entry.InsertFailed = insertFailed + + drop, err := parseConntrackStatField(fields[10]) + if err != nil { + return nil, err + } + entry.Drop = drop + + earlyDrop, err := parseConntrackStatField(fields[11]) + if err != nil { + return nil, err + } + entry.EarlyDrop = earlyDrop + + searchRestart, err := parseConntrackStatField(fields[16]) + if err != nil { + return nil, err + } + entry.SearchRestart = searchRestart + + return entry, nil +} + +// Parses a uint64 from given hex in string +func parseConntrackStatField(field string) (uint64, error) { + val, err := strconv.ParseUint(field, 16, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) + } + return val, err +} diff --git a/test/vendor/github.com/prometheus/procfs/net_dev.go b/test/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 0000000000..47a710befb --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,205 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NetDev() (NetDev, error) { + return newNetDev(fs.proc.Path("net/dev")) +} + +// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + netDev := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := netDev.parseLine(s.Text()) + if err != nil { + return netDev, err + } + + netDev[line.Name] = *line + } + + return netDev, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (netDev NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(netDev)) + for _, ifc := range netDev { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/test/vendor/github.com/prometheus/procfs/net_sockstat.go b/test/vendor/github.com/prometheus/procfs/net_sockstat.go new file mode 100644 index 0000000000..f91ef55237 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -0,0 +1,163 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, +// respectively. +type NetSockstat struct { + // Used is non-nil for IPv4 sockstat results, but nil for IPv6. + Used *int + Protocols []NetSockstatProtocol +} + +// A NetSockstatProtocol contains statistics about a given socket protocol. +// Pointer fields indicate that the value may or may not be present on any +// given protocol. +type NetSockstatProtocol struct { + Protocol string + InUse int + Orphan *int + TW *int + Alloc *int + Mem *int + Memory *int +} + +// NetSockstat retrieves IPv4 socket statistics. +func (fs FS) NetSockstat() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat")) +} + +// NetSockstat6 retrieves IPv6 socket statistics. +// +// If IPv6 is disabled on this kernel, the returned error can be checked with +// os.IsNotExist. +func (fs FS) NetSockstat6() (*NetSockstat, error) { + return readSockstat(fs.proc.Path("net", "sockstat6")) +} + +// readSockstat opens and parses a NetSockstat from the input file. +func readSockstat(name string) (*NetSockstat, error) { + // This file is small and can be read with one syscall. + b, err := util.ReadFileNoStat(name) + if err != nil { + // Do not wrap this error so the caller can detect os.IsNotExist and + // similar conditions. + return nil, err + } + + stat, err := parseSockstat(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) + } + + return stat, nil +} + +// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. +func parseSockstat(r io.Reader) (*NetSockstat, error) { + var stat NetSockstat + s := bufio.NewScanner(r) + for s.Scan() { + // Expect a minimum of a protocol and one key/value pair. + fields := strings.Split(s.Text(), " ") + if len(fields) < 3 { + return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + } + + // The remaining fields are key/value pairs. + kvs, err := parseSockstatKVs(fields[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) + } + + // The first field is the protocol. We must trim its colon suffix. + proto := strings.TrimSuffix(fields[0], ":") + switch proto { + case "sockets": + // Special case: IPv4 has a sockets "used" key/value pair that we + // embed at the top level of the structure. + used := kvs["used"] + stat.Used = &used + default: + // Parse all other lines as individual protocols. + nsp := parseSockstatProtocol(kvs) + nsp.Protocol = proto + stat.Protocols = append(stat.Protocols, nsp) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return &stat, nil +} + +// parseSockstatKVs parses a string slice into a map of key/value pairs. +func parseSockstatKVs(kvs []string) (map[string]int, error) { + if len(kvs)%2 != 0 { + return nil, errors.New("odd number of fields in key/value pairs") + } + + // Iterate two values at a time to gather key/value pairs. + out := make(map[string]int, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + vp := util.NewValueParser(kvs[i+1]) + out[kvs[i]] = vp.Int() + + if err := vp.Err(); err != nil { + return nil, err + } + } + + return out, nil +} + +// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. +func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { + var nsp NetSockstatProtocol + for k, v := range kvs { + // Capture the range variable to ensure we get unique pointers for + // each of the optional fields. + v := v + switch k { + case "inuse": + nsp.InUse = v + case "orphan": + nsp.Orphan = &v + case "tw": + nsp.TW = &v + case "alloc": + nsp.Alloc = &v + case "mem": + nsp.Mem = &v + case "memory": + nsp.Memory = &v + } + } + + return nsp +} diff --git a/test/vendor/github.com/prometheus/procfs/net_softnet.go b/test/vendor/github.com/prometheus/procfs/net_softnet.go new file mode 100644 index 0000000000..2a65cf19f0 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/net_softnet.go @@ -0,0 +1,98 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. + +// SoftnetStat contains a single row of data from /proc/net/softnet_stat +type SoftnetStat struct { + // Number of processed packets + Processed uint32 + // Number of dropped packets + Dropped uint32 + // Number of times processing packets ran out of quota + TimeSqueezed uint32 +} + +// NetSoftnetStat reads data from /proc/net/softnet_stat. +func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/softnet_stat")) + if err != nil { + return nil, err + } + + entries, err := parseSoftnet(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) + } + + return entries, nil +} + +func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { + const expectedColumns = 11 + + s := bufio.NewScanner(r) + + var stats []SoftnetStat + for s.Scan() { + columns := strings.Fields(s.Text()) + width := len(columns) + + if width != 11 { + return nil, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns) + } + + // We only parse the first three columns at the moment. + us, err := parseHexUint32s(columns[0:3]) + if err != nil { + return nil, err + } + + stats = append(stats, SoftnetStat{ + Processed: us[0], + Dropped: us[1], + TimeSqueezed: us[2], + }) + } + + return stats, nil +} + +func parseHexUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/net_udp.go b/test/vendor/github.com/prometheus/procfs/net_udp.go new file mode 100644 index 0000000000..d017e3f18d --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/net_udp.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" +) + +const ( + // readLimit is used by io.LimitReader while reading the content of the + // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic + // as each line represents a single used socket. + // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. + // With e.g. 150 Byte per line and the maximum number of 65535, + // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. + readLimit = 4294967296 // Byte -> 4 GiB +) + +type ( + // NetUDP represents the contents of /proc/net/udp{,6} file without the header. + NetUDP []*netUDPLine + + // NetUDPSummary provides already computed values like the total queue lengths or + // the total number of used sockets. In contrast to NetUDP it does not collect + // the parsed lines into a slice. + NetUDPSummary struct { + // TxQueueLength shows the total queue length of all parsed tx_queue lengths. + TxQueueLength uint64 + // RxQueueLength shows the total queue length of all parsed rx_queue lengths. + RxQueueLength uint64 + // UsedSockets shows the total number of parsed lines representing the + // number of used sockets. + UsedSockets uint64 + } + + // netUDPLine represents the fields parsed from a single line + // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped. + // For the proc file format details, see https://linux.die.net/man/5/proc. + netUDPLine struct { + Sl uint64 + LocalAddr net.IP + LocalPort uint64 + RemAddr net.IP + RemPort uint64 + St uint64 + TxQueue uint64 + RxQueue uint64 + UID uint64 + } +) + +// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp. +func (fs FS) NetUDP() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp")) +} + +// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams +// read from /proc/net/udp6. +func (fs FS) NetUDP6() (NetUDP, error) { + return newNetUDP(fs.proc.Path("net/udp6")) +} + +// NetUDPSummary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp. +func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp")) +} + +// NetUDP6Summary returns already computed statistics like the total queue lengths +// for UDP datagrams read from /proc/net/udp6. +func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { + return newNetUDPSummary(fs.proc.Path("net/udp6")) +} + +// newNetUDP creates a new NetUDP{,6} from the contents of the given file. +func newNetUDP(file string) (NetUDP, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDP := NetUDP{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDP = append(netUDP, line) + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDP, nil +} + +// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file. +func newNetUDPSummary(file string) (*NetUDPSummary, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + netUDPSummary := &NetUDPSummary{} + + lr := io.LimitReader(f, readLimit) + s := bufio.NewScanner(lr) + s.Scan() // skip first line with headers + for s.Scan() { + fields := strings.Fields(s.Text()) + line, err := parseNetUDPLine(fields) + if err != nil { + return nil, err + } + netUDPSummary.TxQueueLength += line.TxQueue + netUDPSummary.RxQueueLength += line.RxQueue + netUDPSummary.UsedSockets++ + } + if err := s.Err(); err != nil { + return nil, err + } + return netUDPSummary, nil +} + +// parseNetUDPLine parses a single line, represented by a list of fields. +func parseNetUDPLine(fields []string) (*netUDPLine, error) { + line := &netUDPLine{} + if len(fields) < 8 { + return nil, fmt.Errorf( + "cannot parse net udp socket line as it has less then 8 columns: %s", + strings.Join(fields, " "), + ) + } + var err error // parse error + + // sl + s := strings.Split(fields[0], ":") + if len(s) != 2 { + return nil, fmt.Errorf( + "cannot parse sl field in udp socket line: %s", fields[0]) + } + + if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err) + } + // local_address + l := strings.Split(fields[1], ":") + if len(l) != 2 { + return nil, fmt.Errorf( + "cannot parse local_address field in udp socket line: %s", fields[1]) + } + if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address value in udp socket line: %s", err) + } + if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse local_address port value in udp socket line: %s", err) + } + + // remote_address + r := strings.Split(fields[2], ":") + if len(r) != 2 { + return nil, fmt.Errorf( + "cannot parse rem_address field in udp socket line: %s", fields[1]) + } + if line.RemAddr, err = hex.DecodeString(r[0]); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address value in udp socket line: %s", err) + } + if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse rem_address port value in udp socket line: %s", err) + } + + // st + if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse st value in udp socket line: %s", err) + } + + // tx_queue and rx_queue + q := strings.Split(fields[4], ":") + if len(q) != 2 { + return nil, fmt.Errorf( + "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s", + fields[4], + ) + } + if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err) + } + if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { + return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err) + } + + // uid + if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { + return nil, fmt.Errorf( + "cannot parse uid value in udp socket line: %s", err) + } + + return line, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/net_unix.go b/test/vendor/github.com/prometheus/procfs/net_unix.go new file mode 100644 index 0000000000..c55b4b18e4 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/net_unix.go @@ -0,0 +1,257 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// For the proc file format details, +// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 +// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. + +// Constants for the various /proc/net/unix enumerations. +// TODO: match against x/sys/unix or similar? +const ( + netUnixTypeStream = 1 + netUnixTypeDgram = 2 + netUnixTypeSeqpacket = 5 + + netUnixFlagDefault = 0 + netUnixFlagListen = 1 << 16 + + netUnixStateUnconnected = 1 + netUnixStateConnecting = 2 + netUnixStateConnected = 3 + netUnixStateDisconnected = 4 +) + +// NetUNIXType is the type of the type field. +type NetUNIXType uint64 + +// NetUNIXFlags is the type of the flags field. +type NetUNIXFlags uint64 + +// NetUNIXState is the type of the state field. +type NetUNIXState uint64 + +// NetUNIXLine represents a line of /proc/net/unix. +type NetUNIXLine struct { + KernelPtr string + RefCount uint64 + Protocol uint64 + Flags NetUNIXFlags + Type NetUNIXType + State NetUNIXState + Inode uint64 + Path string +} + +// NetUNIX holds the data read from /proc/net/unix. +type NetUNIX struct { + Rows []*NetUNIXLine +} + +// NetUNIX returns data read from /proc/net/unix. +func (fs FS) NetUNIX() (*NetUNIX, error) { + return readNetUNIX(fs.proc.Path("net/unix")) +} + +// readNetUNIX reads data in /proc/net/unix format from the specified file. +func readNetUNIX(file string) (*NetUNIX, error) { + // This file could be quite large and a streaming read is desirable versus + // reading the entire contents at once. + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + return parseNetUNIX(f) +} + +// parseNetUNIX creates a NetUnix structure from the incoming stream. +func parseNetUNIX(r io.Reader) (*NetUNIX, error) { + // Begin scanning by checking for the existence of Inode. + s := bufio.NewScanner(r) + s.Scan() + + // From the man page of proc(5), it does not contain an Inode field, + // but in actually it exists. This code works for both cases. + hasInode := strings.Contains(s.Text(), "Inode") + + // Expect a minimum number of fields, but Inode and Path are optional: + // Num RefCount Protocol Flags Type St Inode Path + minFields := 6 + if hasInode { + minFields++ + } + + var nu NetUNIX + for s.Scan() { + line := s.Text() + item, err := nu.parseLine(line, hasInode, minFields) + if err != nil { + return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) + } + + nu.Rows = append(nu.Rows, item) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) + } + + return &nu, nil +} + +func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { + fields := strings.Fields(line) + + l := len(fields) + if l < min { + return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + } + + // Field offsets are as follows: + // Num RefCount Protocol Flags Type St Inode Path + + kernelPtr := strings.TrimSuffix(fields[0], ":") + + users, err := u.parseUsers(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) + } + + flags, err := u.parseFlags(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) + } + + typ, err := u.parseType(fields[4]) + if err != nil { + return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) + } + + state, err := u.parseState(fields[5]) + if err != nil { + return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) + } + + var inode uint64 + if hasInode { + inode, err = u.parseInode(fields[6]) + if err != nil { + return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) + } + } + + n := &NetUNIXLine{ + KernelPtr: kernelPtr, + RefCount: users, + Type: typ, + Flags: flags, + State: state, + Inode: inode, + } + + // Path field is optional. + if l > min { + // Path occurs at either index 6 or 7 depending on whether inode is + // already present. + pathIdx := 7 + if !hasInode { + pathIdx-- + } + + n.Path = fields[pathIdx] + } + + return n, nil +} + +func (u NetUNIX) parseUsers(s string) (uint64, error) { + return strconv.ParseUint(s, 16, 32) +} + +func (u NetUNIX) parseType(s string) (NetUNIXType, error) { + typ, err := strconv.ParseUint(s, 16, 16) + if err != nil { + return 0, err + } + + return NetUNIXType(typ), nil +} + +func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { + flags, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return 0, err + } + + return NetUNIXFlags(flags), nil +} + +func (u NetUNIX) parseState(s string) (NetUNIXState, error) { + st, err := strconv.ParseInt(s, 16, 8) + if err != nil { + return 0, err + } + + return NetUNIXState(st), nil +} + +func (u NetUNIX) parseInode(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func (t NetUNIXType) String() string { + switch t { + case netUnixTypeStream: + return "stream" + case netUnixTypeDgram: + return "dgram" + case netUnixTypeSeqpacket: + return "seqpacket" + } + return "unknown" +} + +func (f NetUNIXFlags) String() string { + switch f { + case netUnixFlagListen: + return "listen" + default: + return "default" + } +} + +func (s NetUNIXState) String() string { + switch s { + case netUnixStateUnconnected: + return "unconnected" + case netUnixStateConnecting: + return "connecting" + case netUnixStateConnected: + return "connected" + case netUnixStateDisconnected: + return "disconnected" + } + return "unknown" +} diff --git a/test/vendor/github.com/prometheus/procfs/proc.go b/test/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 0000000000..330e472c70 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,298 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs fs.FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.proc.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.Proc(pid) +} + +// NewProc returns a process for the given pid. +// +// Deprecated: use fs.Proc() instead +func (fs FS) NewProc(pid int) (Proc, error) { + return fs.Proc(pid) +} + +// Proc returns a process for the given pid. +func (fs FS) Proc(pid int) (Proc, error) { + if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs.proc}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.proc.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs.proc}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(p.path("cmdline")) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + data, err := util.ReadFileNoStat(p.path("comm")) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +// MountInfo retrieves mount information for mount points in a +// process's namespace. +// It supplies information missing in `/proc/self/mounts` and +// fixes various other problems with that file too. +func (p Proc) MountInfo() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(p.path("mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} + +// FileDescriptorsInfo retrieves information about all file descriptors of +// the process. +func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + var fdinfos ProcFDInfos + + for _, n := range names { + fdinfo, err := p.FDInfo(n) + if err != nil { + continue + } + fdinfos = append(fdinfos, *fdinfo) + } + + return fdinfos, nil +} + +// Schedstat returns task scheduling information for the process. +func (p Proc) Schedstat() (ProcSchedstat, error) { + contents, err := ioutil.ReadFile(p.path("schedstat")) + if err != nil { + return ProcSchedstat{}, err + } + return parseProcSchedstat(string(contents)) +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_environ.go b/test/vendor/github.com/prometheus/procfs/proc_environ.go new file mode 100644 index 0000000000..6134b3580c --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_environ.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Environ reads process environments from /proc//environ +func (p Proc) Environ() ([]string, error) { + environments := make([]string, 0) + + data, err := util.ReadFileNoStat(p.path("environ")) + if err != nil { + return environments, err + } + + environments = strings.Split(string(data), "\000") + if len(environments) > 0 { + environments = environments[:len(environments)-1] + } + + return environments, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/test/vendor/github.com/prometheus/procfs/proc_fdinfo.go new file mode 100644 index 0000000000..4e7597f86b --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -0,0 +1,125 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "regexp" + + "github.com/prometheus/procfs/internal/util" +) + +// Regexp variables +var ( + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) +) + +// ProcFDInfo contains represents file descriptor information. +type ProcFDInfo struct { + // File descriptor + FD string + // File offset + Pos string + // File access mode and status flags + Flags string + // Mount point ID + MntID string + // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) + InotifyInfos []InotifyInfo +} + +// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. +func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { + data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) + if err != nil { + return nil, err + } + + var text, pos, flags, mntid string + var inotify []InotifyInfo + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + text = scanner.Text() + if rPos.MatchString(text) { + pos = rPos.FindStringSubmatch(text)[1] + } else if rFlags.MatchString(text) { + flags = rFlags.FindStringSubmatch(text)[1] + } else if rMntID.MatchString(text) { + mntid = rMntID.FindStringSubmatch(text)[1] + } else if rInotify.MatchString(text) { + newInotify, err := parseInotifyInfo(text) + if err != nil { + return nil, err + } + inotify = append(inotify, *newInotify) + } + } + + i := &ProcFDInfo{ + FD: fd, + Pos: pos, + Flags: flags, + MntID: mntid, + InotifyInfos: inotify, + } + + return i, nil +} + +// InotifyInfo represents a single inotify line in the fdinfo file. +type InotifyInfo struct { + // Watch descriptor number + WD string + // Inode number + Ino string + // Device ID + Sdev string + // Mask of events being monitored + Mask string +} + +// InotifyInfo constructor. Only available on kernel 3.8+. +func parseInotifyInfo(line string) (*InotifyInfo, error) { + r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`) + m := r.FindStringSubmatch(line) + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: m[4], + } + return i, nil +} + +// ProcFDInfos represents a list of ProcFDInfo structs. +type ProcFDInfos []ProcFDInfo + +func (p ProcFDInfos) Len() int { return len(p) } +func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } + +// InotifyWatchLen returns the total number of inotify watches +func (p ProcFDInfos) InotifyWatchLen() (int, error) { + length := 0 + for _, f := range p { + length += len(f.InotifyInfos) + } + + return length, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_io.go b/test/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 0000000000..776f349717 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// IO creates a new ProcIO instance from a given Proc instance. +func (p Proc) IO() (ProcIO, error) { + pio := ProcIO{} + + data, err := util.ReadFileNoStat(p.path("io")) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_limits.go b/test/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 0000000000..91ee24df8b --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,157 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +// +// Deprecated: use p.Limits() instead +func (p Proc) NewLimits() (ProcLimits, error) { + return p.Limits() +} + +// Limits returns the current soft limits of the process. +func (p Proc) Limits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_ns.go b/test/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 0000000000..c66740ff74 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// Namespaces reads from /proc//ns/* to get the namespaces of which the +// process is a member. +func (p Proc) Namespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_psi.go b/test/vendor/github.com/prometheus/procfs/proc_psi.go new file mode 100644 index 0000000000..0d7bee54ca --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_psi.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// The PSI / pressure interface is described at +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt +// Each resource (cpu, io, memory, ...) is exposed as a single file. +// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. +// Each line contains several averages (over n seconds) and a total in µs. +// +// Example io pressure file: +// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 +// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" + +// PSILine is a single line of values as returned by /proc/pressure/* +// The Avg entries are averages over n seconds, as a percentage +// The Total line is in microseconds +type PSILine struct { + Avg10 float64 + Avg60 float64 + Avg300 float64 + Total uint64 +} + +// PSIStats represent pressure stall information from /proc/pressure/* +// Some indicates the share of time in which at least some tasks are stalled +// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +type PSIStats struct { + Some *PSILine + Full *PSILine +} + +// PSIStatsForResource reads pressure stall information for the specified +// resource from /proc/pressure/. At time of writing this can be +// either "cpu", "memory" or "io". +func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) + if err != nil { + return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) + } + + return parsePSIStats(resource, bytes.NewReader(data)) +} + +// parsePSIStats parses the specified file for pressure stall information +func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { + psiStats := PSIStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + l := scanner.Text() + prefix := strings.Split(l, " ")[0] + switch prefix { + case "some": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Some = &psi + case "full": + psi := PSILine{} + _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) + if err != nil { + return PSIStats{}, err + } + psiStats.Full = &psi + default: + // If we encounter a line with an unknown prefix, ignore it and move on + // Should new measurement types be added in the future we'll simply ignore them instead + // of erroring on retrieval + continue + } + } + + return psiStats, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_stat.go b/test/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 0000000000..4517d2e9dd --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,192 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "os" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize uint + // Resident set size in pages. + RSS int + + proc fs.FS +} + +// NewStat returns the current status information of the process. +// +// Deprecated: use p.Stat() instead +func (p Proc) NewStat() (ProcStat, error) { + return p.Stat() +} + +// Stat returns the current status information of the process. +func (p Proc) Stat() (ProcStat, error) { + data, err := util.ReadFileNoStat(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, proc: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() uint { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + fs := FS{proc: s.proc} + stat, err := fs.Stat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/test/vendor/github.com/prometheus/procfs/proc_status.go b/test/vendor/github.com/prometheus/procfs/proc_status.go new file mode 100644 index 0000000000..17b45167ec --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/proc_status.go @@ -0,0 +1,166 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcStatus provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStatus struct { + // The process ID. + PID int + // The process name. + Name string + + // Thread group ID. + TGID int + + // Peak virtual memory size. + VmPeak uint64 + // Virtual memory size. + VmSize uint64 + // Locked memory size. + VmLck uint64 + // Pinned memory size. + VmPin uint64 + // Peak resident set size. + VmHWM uint64 + // Resident set size (sum of RssAnnon RssFile and RssShmem). + VmRSS uint64 + // Size of resident anonymous memory. + RssAnon uint64 + // Size of resident file mappings. + RssFile uint64 + // Size of resident shared memory. + RssShmem uint64 + // Size of data segments. + VmData uint64 + // Size of stack segments. + VmStk uint64 + // Size of text segments. + VmExe uint64 + // Shared library code size. + VmLib uint64 + // Page table entries size. + VmPTE uint64 + // Size of second-level page tables. + VmPMD uint64 + // Swapped-out virtual memory size by anonymous private. + VmSwap uint64 + // Size of hugetlb memory portions + HugetlbPages uint64 + + // Number of voluntary context switches. + VoluntaryCtxtSwitches uint64 + // Number of involuntary context switches. + NonVoluntaryCtxtSwitches uint64 + + // UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) + UIDs [4]string +} + +// NewStatus returns the current status information of the process. +func (p Proc) NewStatus() (ProcStatus, error) { + data, err := util.ReadFileNoStat(p.path("status")) + if err != nil { + return ProcStatus{}, err + } + + s := ProcStatus{PID: p.PID} + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if !bytes.Contains([]byte(line), []byte(":")) { + continue + } + + kv := strings.SplitN(line, ":", 2) + + // removes spaces + k := string(strings.TrimSpace(kv[0])) + v := string(strings.TrimSpace(kv[1])) + // removes "kB" + v = string(bytes.Trim([]byte(v), " kB")) + + // value to int when possible + // we can skip error check here, 'cause vKBytes is not used when value is a string + vKBytes, _ := strconv.ParseUint(v, 10, 64) + // convert kB to B + vBytes := vKBytes * 1024 + + s.fillStatus(k, v, vKBytes, vBytes) + } + + return s, nil +} + +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { + switch k { + case "Tgid": + s.TGID = int(vUint) + case "Name": + s.Name = vString + case "Uid": + copy(s.UIDs[:], strings.Split(vString, "\t")) + case "VmPeak": + s.VmPeak = vUintBytes + case "VmSize": + s.VmSize = vUintBytes + case "VmLck": + s.VmLck = vUintBytes + case "VmPin": + s.VmPin = vUintBytes + case "VmHWM": + s.VmHWM = vUintBytes + case "VmRSS": + s.VmRSS = vUintBytes + case "RssAnon": + s.RssAnon = vUintBytes + case "RssFile": + s.RssFile = vUintBytes + case "RssShmem": + s.RssShmem = vUintBytes + case "VmData": + s.VmData = vUintBytes + case "VmStk": + s.VmStk = vUintBytes + case "VmExe": + s.VmExe = vUintBytes + case "VmLib": + s.VmLib = vUintBytes + case "VmPTE": + s.VmPTE = vUintBytes + case "VmPMD": + s.VmPMD = vUintBytes + case "VmSwap": + s.VmSwap = vUintBytes + case "HugetlbPages": + s.HugetlbPages = vUintBytes + case "voluntary_ctxt_switches": + s.VoluntaryCtxtSwitches = vUint + case "nonvoluntary_ctxt_switches": + s.NonVoluntaryCtxtSwitches = vUint + } +} + +// TotalCtxtSwitches returns the total context switch. +func (s ProcStatus) TotalCtxtSwitches() uint64 { + return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches +} diff --git a/test/vendor/github.com/prometheus/procfs/schedstat.go b/test/vendor/github.com/prometheus/procfs/schedstat.go new file mode 100644 index 0000000000..a4c4089ac5 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/schedstat.go @@ -0,0 +1,118 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "regexp" + "strconv" +) + +var ( + cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) + procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) +) + +// Schedstat contains scheduler statistics from /proc/schedstat +// +// See +// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt +// for a detailed description of what these numbers mean. +// +// Note the current kernel documentation claims some of the time units are in +// jiffies when they are actually in nanoseconds since 2.6.23 with the +// introduction of CFS. A fix to the documentation is pending. See +// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 +type Schedstat struct { + CPUs []*SchedstatCPU +} + +// SchedstatCPU contains the values from one "cpu" line +type SchedstatCPU struct { + CPUNum string + + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// ProcSchedstat contains the values from /proc//schedstat +type ProcSchedstat struct { + RunningNanoseconds uint64 + WaitingNanoseconds uint64 + RunTimeslices uint64 +} + +// Schedstat reads data from /proc/schedstat +func (fs FS) Schedstat() (*Schedstat, error) { + file, err := os.Open(fs.proc.Path("schedstat")) + if err != nil { + return nil, err + } + defer file.Close() + + stats := &Schedstat{} + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + match := cpuLineRE.FindStringSubmatch(scanner.Text()) + if match != nil { + cpu := &SchedstatCPU{} + cpu.CPUNum = match[1] + + cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) + if err != nil { + continue + } + + cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) + if err != nil { + continue + } + + cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) + if err != nil { + continue + } + + stats.CPUs = append(stats.CPUs, cpu) + } + } + + return stats, nil +} + +func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { + match := procLineRE.FindStringSubmatch(contents) + + if match != nil { + stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) + if err != nil { + return + } + + stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) + if err != nil { + return + } + + stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) + return + } + + err = errors.New("could not parse schedstat") + return +} diff --git a/test/vendor/github.com/prometheus/procfs/stat.go b/test/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 0000000000..b2a6fc994c --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,244 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/fs" + "github.com/prometheus/procfs/internal/util" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func NewStat() (Stat, error) { + fs, err := NewFS(fs.DefaultProcMountPoint) + if err != nil { + return Stat{}, err + } + return fs.Stat() +} + +// NewStat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// +// Deprecated: use fs.Stat() instead +func (fs FS) NewStat() (Stat, error) { + return fs.Stat() +} + +// Stat returns information about current cpu/process statistics. +// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +func (fs FS) Stat() (Stat, error) { + fileName := fs.proc.Path("stat") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Stat{}, err + } + + stat := Stat{} + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) + } + + return stat, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/swaps.go b/test/vendor/github.com/prometheus/procfs/swaps.go new file mode 100644 index 0000000000..15edc2212b --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/swaps.go @@ -0,0 +1,89 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Swap represents an entry in /proc/swaps. +type Swap struct { + Filename string + Type string + Size int + Used int + Priority int +} + +// Swaps returns a slice of all configured swap devices on the system. +func (fs FS) Swaps() ([]*Swap, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) + if err != nil { + return nil, err + } + return parseSwaps(data) +} + +func parseSwaps(info []byte) ([]*Swap, error) { + swaps := []*Swap{} + scanner := bufio.NewScanner(bytes.NewReader(info)) + scanner.Scan() // ignore header line + for scanner.Scan() { + swapString := scanner.Text() + parsedSwap, err := parseSwapString(swapString) + if err != nil { + return nil, err + } + swaps = append(swaps, parsedSwap) + } + + err := scanner.Err() + return swaps, err +} + +func parseSwapString(swapString string) (*Swap, error) { + var err error + + swapFields := strings.Fields(swapString) + swapLength := len(swapFields) + if swapLength < 5 { + return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + } + + swap := &Swap{ + Filename: swapFields[0], + Type: swapFields[1], + } + + swap.Size, err = strconv.Atoi(swapFields[2]) + if err != nil { + return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + } + swap.Used, err = strconv.Atoi(swapFields[3]) + if err != nil { + return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + } + swap.Priority, err = strconv.Atoi(swapFields[4]) + if err != nil { + return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + } + + return swap, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/vm.go b/test/vendor/github.com/prometheus/procfs/vm.go new file mode 100644 index 0000000000..cb13891414 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/vm.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// The VM interface is described at +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// Each setting is exposed as a single file. +// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array +// and numa_zonelist_order (deprecated) which is a string +type VM struct { + AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes + BlockDump *int64 // /proc/sys/vm/block_dump + CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed + DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes + DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio + DirtyBytes *int64 // /proc/sys/vm/dirty_bytes + DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs + DirtyRatio *int64 // /proc/sys/vm/dirty_ratio + DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds + DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs + DropCaches *int64 // /proc/sys/vm/drop_caches + ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold + HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group + LaptopMode *int64 // /proc/sys/vm/laptop_mode + LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout + LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio + MaxMapCount *int64 // /proc/sys/vm/max_map_count + MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill + MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery + MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes + MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio + MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio + MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr + NrHugepages *int64 // /proc/sys/vm/nr_hugepages + NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy + NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages + NumaStat *int64 // /proc/sys/vm/numa_stat + NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order + OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks + OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task + OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes + OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory + OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio + PageCluster *int64 // /proc/sys/vm/page-cluster + PanicOnOom *int64 // /proc/sys/vm/panic_on_oom + PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction + StatInterval *int64 // /proc/sys/vm/stat_interval + Swappiness *int64 // /proc/sys/vm/swappiness + UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes + VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure + WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor + WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor + ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode +} + +// VM reads the VM statistics from the specified `proc` filesystem. +func (fs FS) VM() (*VM, error) { + path := fs.proc.Path("sys/vm") + file, err := os.Stat(path) + if err != nil { + return nil, err + } + if !file.Mode().IsDir() { + return nil, fmt.Errorf("%s is not a directory", path) + } + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var vm VM + for _, f := range files { + if f.IsDir() { + continue + } + + name := filepath.Join(path, f.Name()) + // ignore errors on read, as there are some write only + // in /proc/sys/vm + value, err := util.SysReadFile(name) + if err != nil { + continue + } + vp := util.NewValueParser(value) + + switch f.Name() { + case "admin_reserve_kbytes": + vm.AdminReserveKbytes = vp.PInt64() + case "block_dump": + vm.BlockDump = vp.PInt64() + case "compact_unevictable_allowed": + vm.CompactUnevictableAllowed = vp.PInt64() + case "dirty_background_bytes": + vm.DirtyBackgroundBytes = vp.PInt64() + case "dirty_background_ratio": + vm.DirtyBackgroundRatio = vp.PInt64() + case "dirty_bytes": + vm.DirtyBytes = vp.PInt64() + case "dirty_expire_centisecs": + vm.DirtyExpireCentisecs = vp.PInt64() + case "dirty_ratio": + vm.DirtyRatio = vp.PInt64() + case "dirtytime_expire_seconds": + vm.DirtytimeExpireSeconds = vp.PInt64() + case "dirty_writeback_centisecs": + vm.DirtyWritebackCentisecs = vp.PInt64() + case "drop_caches": + vm.DropCaches = vp.PInt64() + case "extfrag_threshold": + vm.ExtfragThreshold = vp.PInt64() + case "hugetlb_shm_group": + vm.HugetlbShmGroup = vp.PInt64() + case "laptop_mode": + vm.LaptopMode = vp.PInt64() + case "legacy_va_layout": + vm.LegacyVaLayout = vp.PInt64() + case "lowmem_reserve_ratio": + stringSlice := strings.Fields(value) + pint64Slice := make([]*int64, 0, len(stringSlice)) + for _, value := range stringSlice { + vp := util.NewValueParser(value) + pint64Slice = append(pint64Slice, vp.PInt64()) + } + vm.LowmemReserveRatio = pint64Slice + case "max_map_count": + vm.MaxMapCount = vp.PInt64() + case "memory_failure_early_kill": + vm.MemoryFailureEarlyKill = vp.PInt64() + case "memory_failure_recovery": + vm.MemoryFailureRecovery = vp.PInt64() + case "min_free_kbytes": + vm.MinFreeKbytes = vp.PInt64() + case "min_slab_ratio": + vm.MinSlabRatio = vp.PInt64() + case "min_unmapped_ratio": + vm.MinUnmappedRatio = vp.PInt64() + case "mmap_min_addr": + vm.MmapMinAddr = vp.PInt64() + case "nr_hugepages": + vm.NrHugepages = vp.PInt64() + case "nr_hugepages_mempolicy": + vm.NrHugepagesMempolicy = vp.PInt64() + case "nr_overcommit_hugepages": + vm.NrOvercommitHugepages = vp.PInt64() + case "numa_stat": + vm.NumaStat = vp.PInt64() + case "numa_zonelist_order": + vm.NumaZonelistOrder = value + case "oom_dump_tasks": + vm.OomDumpTasks = vp.PInt64() + case "oom_kill_allocating_task": + vm.OomKillAllocatingTask = vp.PInt64() + case "overcommit_kbytes": + vm.OvercommitKbytes = vp.PInt64() + case "overcommit_memory": + vm.OvercommitMemory = vp.PInt64() + case "overcommit_ratio": + vm.OvercommitRatio = vp.PInt64() + case "page-cluster": + vm.PageCluster = vp.PInt64() + case "panic_on_oom": + vm.PanicOnOom = vp.PInt64() + case "percpu_pagelist_fraction": + vm.PercpuPagelistFraction = vp.PInt64() + case "stat_interval": + vm.StatInterval = vp.PInt64() + case "swappiness": + vm.Swappiness = vp.PInt64() + case "user_reserve_kbytes": + vm.UserReserveKbytes = vp.PInt64() + case "vfs_cache_pressure": + vm.VfsCachePressure = vp.PInt64() + case "watermark_boost_factor": + vm.WatermarkBoostFactor = vp.PInt64() + case "watermark_scale_factor": + vm.WatermarkScaleFactor = vp.PInt64() + case "zone_reclaim_mode": + vm.ZoneReclaimMode = vp.PInt64() + } + if err := vp.Err(); err != nil { + return nil, err + } + } + + return &vm, nil +} diff --git a/test/vendor/github.com/prometheus/procfs/xfrm.go b/test/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 0000000000..30aa417d53 --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.proc.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldn't parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/test/vendor/github.com/prometheus/procfs/zoneinfo.go b/test/vendor/github.com/prometheus/procfs/zoneinfo.go new file mode 100644 index 0000000000..e941503d5c --- /dev/null +++ b/test/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Zoneinfo holds info parsed from /proc/zoneinfo. +type Zoneinfo struct { + Node string + Zone string + NrFreePages *int64 + Min *int64 + Low *int64 + High *int64 + Scanned *int64 + Spanned *int64 + Present *int64 + Managed *int64 + NrActiveAnon *int64 + NrInactiveAnon *int64 + NrIsolatedAnon *int64 + NrAnonPages *int64 + NrAnonTransparentHugepages *int64 + NrActiveFile *int64 + NrInactiveFile *int64 + NrIsolatedFile *int64 + NrFilePages *int64 + NrSlabReclaimable *int64 + NrSlabUnreclaimable *int64 + NrMlockStack *int64 + NrKernelStack *int64 + NrMapped *int64 + NrDirty *int64 + NrWriteback *int64 + NrUnevictable *int64 + NrShmem *int64 + NrDirtied *int64 + NrWritten *int64 + NumaHit *int64 + NumaMiss *int64 + NumaForeign *int64 + NumaInterleave *int64 + NumaLocal *int64 + NumaOther *int64 + Protection []*int64 +} + +var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) + +// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of +// structs containing the relevant info. More information available here: +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +func (fs FS) Zoneinfo() ([]Zoneinfo, error) { + data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + if err != nil { + return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + zoneinfo, err := parseZoneinfo(data) + if err != nil { + return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) + } + return zoneinfo, nil +} + +func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { + + zoneinfo := []Zoneinfo{} + + zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) + for _, block := range zoneinfoBlocks { + var zoneinfoElement Zoneinfo + lines := strings.Split(string(block), "\n") + for _, line := range lines { + + if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { + zoneinfoElement.Node = nodeZone[1] + zoneinfoElement.Zone = nodeZone[2] + continue + } + if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { + zoneinfoElement.Zone = "" + continue + } + parts := strings.Fields(strings.TrimSpace(line)) + if len(parts) < 2 { + continue + } + vp := util.NewValueParser(parts[1]) + switch parts[0] { + case "nr_free_pages": + zoneinfoElement.NrFreePages = vp.PInt64() + case "min": + zoneinfoElement.Min = vp.PInt64() + case "low": + zoneinfoElement.Low = vp.PInt64() + case "high": + zoneinfoElement.High = vp.PInt64() + case "scanned": + zoneinfoElement.Scanned = vp.PInt64() + case "spanned": + zoneinfoElement.Spanned = vp.PInt64() + case "present": + zoneinfoElement.Present = vp.PInt64() + case "managed": + zoneinfoElement.Managed = vp.PInt64() + case "nr_active_anon": + zoneinfoElement.NrActiveAnon = vp.PInt64() + case "nr_inactive_anon": + zoneinfoElement.NrInactiveAnon = vp.PInt64() + case "nr_isolated_anon": + zoneinfoElement.NrIsolatedAnon = vp.PInt64() + case "nr_anon_pages": + zoneinfoElement.NrAnonPages = vp.PInt64() + case "nr_anon_transparent_hugepages": + zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() + case "nr_active_file": + zoneinfoElement.NrActiveFile = vp.PInt64() + case "nr_inactive_file": + zoneinfoElement.NrInactiveFile = vp.PInt64() + case "nr_isolated_file": + zoneinfoElement.NrIsolatedFile = vp.PInt64() + case "nr_file_pages": + zoneinfoElement.NrFilePages = vp.PInt64() + case "nr_slab_reclaimable": + zoneinfoElement.NrSlabReclaimable = vp.PInt64() + case "nr_slab_unreclaimable": + zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() + case "nr_mlock_stack": + zoneinfoElement.NrMlockStack = vp.PInt64() + case "nr_kernel_stack": + zoneinfoElement.NrKernelStack = vp.PInt64() + case "nr_mapped": + zoneinfoElement.NrMapped = vp.PInt64() + case "nr_dirty": + zoneinfoElement.NrDirty = vp.PInt64() + case "nr_writeback": + zoneinfoElement.NrWriteback = vp.PInt64() + case "nr_unevictable": + zoneinfoElement.NrUnevictable = vp.PInt64() + case "nr_shmem": + zoneinfoElement.NrShmem = vp.PInt64() + case "nr_dirtied": + zoneinfoElement.NrDirtied = vp.PInt64() + case "nr_written": + zoneinfoElement.NrWritten = vp.PInt64() + case "numa_hit": + zoneinfoElement.NumaHit = vp.PInt64() + case "numa_miss": + zoneinfoElement.NumaMiss = vp.PInt64() + case "numa_foreign": + zoneinfoElement.NumaForeign = vp.PInt64() + case "numa_interleave": + zoneinfoElement.NumaInterleave = vp.PInt64() + case "numa_local": + zoneinfoElement.NumaLocal = vp.PInt64() + case "numa_other": + zoneinfoElement.NumaOther = vp.PInt64() + case "protection:": + protectionParts := strings.Split(line, ":") + protectionValues := strings.Replace(protectionParts[1], "(", "", 1) + protectionValues = strings.Replace(protectionValues, ")", "", 1) + protectionValues = strings.TrimSpace(protectionValues) + protectionStringMap := strings.Split(protectionValues, ", ") + val, err := util.ParsePInt64s(protectionStringMap) + if err == nil { + zoneinfoElement.Protection = val + } + } + + } + + zoneinfo = append(zoneinfo, zoneinfoElement) + } + return zoneinfo, nil +} diff --git a/test/vendor/go.opencensus.io/metric/metricexport/doc.go b/test/vendor/go.opencensus.io/metric/metricexport/doc.go new file mode 100644 index 0000000000..df632a7928 --- /dev/null +++ b/test/vendor/go.opencensus.io/metric/metricexport/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricexport contains support for exporting metric data. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricexport // import "go.opencensus.io/metric/metricexport" diff --git a/test/vendor/go.opencensus.io/metric/metricexport/export.go b/test/vendor/go.opencensus.io/metric/metricexport/export.go new file mode 100644 index 0000000000..23f4a864a0 --- /dev/null +++ b/test/vendor/go.opencensus.io/metric/metricexport/export.go @@ -0,0 +1,26 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricexport + +import ( + "context" + + "go.opencensus.io/metric/metricdata" +) + +// Exporter is an interface that exporters implement to export the metric data. +type Exporter interface { + ExportMetrics(ctx context.Context, data []*metricdata.Metric) error +} diff --git a/test/vendor/go.opencensus.io/metric/metricexport/reader.go b/test/vendor/go.opencensus.io/metric/metricexport/reader.go new file mode 100644 index 0000000000..b920bacd87 --- /dev/null +++ b/test/vendor/go.opencensus.io/metric/metricexport/reader.go @@ -0,0 +1,187 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package metricexport + +import ( + "context" + "fmt" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/trace" +) + +var ( + defaultSampler = trace.ProbabilitySampler(0.0001) + errReportingIntervalTooLow = fmt.Errorf("reporting interval less than %d", minimumReportingDuration) + errAlreadyStarted = fmt.Errorf("already started") + errIntervalReaderNil = fmt.Errorf("interval reader is nil") + errExporterNil = fmt.Errorf("exporter is nil") + errReaderNil = fmt.Errorf("reader is nil") +) + +const ( + defaultReportingDuration = 60 * time.Second + minimumReportingDuration = 1 * time.Second + defaultSpanName = "ExportMetrics" +) + +// ReaderOptions contains options pertaining to metrics reader. +type ReaderOptions struct { + // SpanName is the name used for span created to export metrics. + SpanName string +} + +// Reader reads metrics from all producers registered +// with producer manager and exports those metrics using provided +// exporter. +type Reader struct { + sampler trace.Sampler + + spanName string +} + +// IntervalReader periodically reads metrics from all producers registered +// with producer manager and exports those metrics using provided +// exporter. Call Reader.Stop() to stop the reader. +type IntervalReader struct { + // ReportingInterval it the time duration between two consecutive + // metrics reporting. defaultReportingDuration is used if it is not set. + // It cannot be set lower than minimumReportingDuration. + ReportingInterval time.Duration + + exporter Exporter + timer *time.Ticker + quit, done chan bool + mu sync.RWMutex + reader *Reader +} + +// ReaderOption apply changes to ReaderOptions. +type ReaderOption func(*ReaderOptions) + +// WithSpanName makes new reader to use given span name when exporting metrics. +func WithSpanName(spanName string) ReaderOption { + return func(o *ReaderOptions) { + o.SpanName = spanName + } +} + +// NewReader returns a reader configured with specified options. +func NewReader(o ...ReaderOption) *Reader { + var opts ReaderOptions + for _, op := range o { + op(&opts) + } + reader := &Reader{defaultSampler, defaultSpanName} + if opts.SpanName != "" { + reader.spanName = opts.SpanName + } + return reader +} + +// NewIntervalReader creates a reader. Once started it periodically +// reads metrics from all producers and exports them using provided exporter. +func NewIntervalReader(reader *Reader, exporter Exporter) (*IntervalReader, error) { + if exporter == nil { + return nil, errExporterNil + } + if reader == nil { + return nil, errReaderNil + } + + r := &IntervalReader{ + exporter: exporter, + reader: reader, + } + return r, nil +} + +// Start starts the IntervalReader which periodically reads metrics from all +// producers registered with global producer manager. If the reporting interval +// is not set prior to calling this function then default reporting interval +// is used. +func (ir *IntervalReader) Start() error { + if ir == nil { + return errIntervalReaderNil + } + ir.mu.Lock() + defer ir.mu.Unlock() + var reportingInterval = defaultReportingDuration + if ir.ReportingInterval != 0 { + if ir.ReportingInterval < minimumReportingDuration { + return errReportingIntervalTooLow + } + reportingInterval = ir.ReportingInterval + } + + if ir.done != nil { + return errAlreadyStarted + } + ir.timer = time.NewTicker(reportingInterval) + ir.quit = make(chan bool) + ir.done = make(chan bool) + + go ir.startInternal() + return nil +} + +func (ir *IntervalReader) startInternal() { + for { + select { + case <-ir.timer.C: + ir.reader.ReadAndExport(ir.exporter) + case <-ir.quit: + ir.timer.Stop() + ir.done <- true + return + } + } +} + +// Stop stops the reader from reading and exporting metrics. +// Additional call to Stop are no-ops. +func (ir *IntervalReader) Stop() { + if ir == nil { + return + } + ir.mu.Lock() + defer ir.mu.Unlock() + if ir.quit == nil { + return + } + ir.quit <- true + <-ir.done + close(ir.quit) + close(ir.done) + ir.quit = nil +} + +// ReadAndExport reads metrics from all producer registered with +// producer manager and then exports them using provided exporter. +func (r *Reader) ReadAndExport(exporter Exporter) { + ctx, span := trace.StartSpan(context.Background(), r.spanName, trace.WithSampler(r.sampler)) + defer span.End() + producers := metricproducer.GlobalManager().GetAll() + data := []*metricdata.Metric{} + for _, producer := range producers { + data = append(data, producer.Read()...) + } + // TODO: [rghetia] add metrics for errors. + exporter.ExportMetrics(ctx, data) +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/client.go new file mode 100644 index 0000000000..28fddb8440 --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -0,0 +1,56 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "context" + "go.opencensus.io/trace" + + "google.golang.org/grpc/stats" +) + +// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and +// traces. Use with gRPC clients only. +type ClientHandler struct { + // StartOptions allows configuring the StartOptions used to create new spans. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this handler. + StartOptions trace.StartOptions +} + +// HandleConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = c.traceTagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) + return ctx +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go new file mode 100644 index 0000000000..abe978b67b --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -0,0 +1,107 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ClientHandler: +var ( + ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) +) + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ClientSentBytesPerRPCView = &view.View{ + Measure: ClientSentBytesPerRPC, + Name: "grpc.io/client/sent_bytes_per_rpc", + Description: "Distribution of bytes sent per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientReceivedBytesPerRPCView = &view.View{ + Measure: ClientReceivedBytesPerRPC, + Name: "grpc.io/client/received_bytes_per_rpc", + Description: "Distribution of bytes received per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientRoundtripLatencyView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/roundtrip_latency", + Description: "Distribution of round-trip latency, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } + + ClientCompletedRPCsView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + Aggregation: view.Count(), + } + + ClientSentMessagesPerRPCView = &view.View{ + Measure: ClientSentMessagesPerRPC, + Name: "grpc.io/client/sent_messages_per_rpc", + Description: "Distribution of sent messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientReceivedMessagesPerRPCView = &view.View{ + Measure: ClientReceivedMessagesPerRPC, + Name: "grpc.io/client/received_messages_per_rpc", + Description: "Distribution of received messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientServerLatencyView = &view.View{ + Measure: ClientServerLatency, + Name: "grpc.io/client/server_latency", + Description: "Distribution of server latency as viewed by client, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientRoundtripLatencyView, + ClientCompletedRPCsView, +} + +// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go new file mode 100644 index 0000000000..b36349820d --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -0,0 +1,49 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "context" + "time" + + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the tag.Map populated by the application code, serializes +// its tags into the GRPC metadata in order to be sent to the server. +func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Info("clientHandler.TagRPC called with nil info.") + } + return ctx + } + + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ts := tag.FromContext(ctx) + if ts != nil { + encoded := tag.Encode(ts) + ctx = stats.SetTags(ctx, encoded) + } + + return context.WithValue(ctx, rpcDataKey, d) +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/doc.go new file mode 100644 index 0000000000..1370323fb7 --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/doc.go @@ -0,0 +1,19 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ocgrpc contains OpenCensus stats and trace +// integrations for gRPC. +// +// Use ServerHandler for servers and ClientHandler for clients. +package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/server.go new file mode 100644 index 0000000000..15ada839d6 --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -0,0 +1,80 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "context" + "go.opencensus.io/trace" + + "google.golang.org/grpc/stats" +) + +// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and +// traces. Use with gRPC servers. +// +// When installed (see Example), tracing metadata is read from inbound RPCs +// by default. If no tracing metadata is present, or if the tracing metadata is +// present but the SpanContext isn't sampled, then a new trace may be started +// (as determined by Sampler). +type ServerHandler struct { + // IsPublicEndpoint may be set to true to always start a new trace around + // each RPC. Any SpanContext in the RPC metadata will be added as a linked + // span instead of making it the parent of the span created around the + // server RPC. + // + // Be aware that if you leave this false (the default) on a public-facing + // server, callers will be able to send tracing metadata in gRPC headers + // and trigger traces in your backend. + IsPublicEndpoint bool + + // StartOptions to use for to spans started around RPCs handled by this server. + // + // These will apply even if there is tracing metadata already + // present on the inbound RPC but the SpanContext is not sampled. This + // ensures that each service has some opportunity to be traced. If you would + // like to not add any additional traces for this gRPC service, set: + // + // StartOptions.Sampler = trace.ProbabilitySampler(0.0) + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this handler. + StartOptions trace.StartOptions +} + +var _ stats.Handler = (*ServerHandler)(nil) + +// HandleConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = s.traceTagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) + return ctx +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go new file mode 100644 index 0000000000..609d9ed248 --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ServerHandler: +var ( + ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) +) + +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ServerReceivedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + Measure: ServerReceivedBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerSentBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of total sent bytes per RPC, by method.", + Measure: ServerSentBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerLatencyView = &view.View{ + Name: "grpc.io/server/server_latency", + Description: "Distribution of server latency in milliseconds, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerLatency, + Aggregation: DefaultMillisecondsDistribution, + } + + ServerCompletedRPCsView = &view.View{ + Name: "grpc.io/server/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, + Measure: ServerLatency, + Aggregation: view.Count(), + } + + ServerReceivedMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/received_messages_per_rpc", + Description: "Distribution of messages received count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerReceivedMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } + + ServerSentMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_messages_per_rpc", + Description: "Distribution of messages sent count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerSentMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } +) + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerReceivedBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerLatencyView, + ServerCompletedRPCsView, +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go new file mode 100644 index 0000000000..afcef023af --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -0,0 +1,63 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "time" + + "context" + + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from +// it and creates a new tag.Map and puts them into the returned context. +func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("opencensus: TagRPC called with nil info.") + } + return ctx + } + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + propagated := h.extractPropagatedTags(ctx) + ctx = tag.NewContext(ctx, propagated) + ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) + return context.WithValue(ctx, rpcDataKey, d) +} + +// extractPropagatedTags creates a new tag map containing the tags extracted from the +// gRPC metadata. +func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { + buf := stats.Tags(ctx) + if buf == nil { + return nil + } + propagated, err := tag.Decode(buf) + if err != nil { + if grpclog.V(2) { + grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) + } + return nil + } + return propagated +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go new file mode 100644 index 0000000000..89cac9c4ec --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -0,0 +1,227 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "context" + "strconv" + "strings" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +type grpcInstrumentationKey string + +// rpcData holds the instrumentation RPC data that is needed between the start +// and end of an call. It holds the info that this package needs to keep track +// of between the various GRPC events. +type rpcData struct { + // reqCount and respCount has to be the first words + // in order to be 64-aligned on 32-bit architectures. + sentCount, sentBytes, recvCount, recvBytes int64 // access atomically + + // startTime represents the time at which TagRPC was invoked at the + // beginning of an RPC. It is an appoximation of the time when the + // application code invoked GRPC code. + startTime time.Time + method string +} + +// The following variables define the default hard-coded auxiliary data used by +// both the default GRPC client and GRPC server metrics. +var ( + DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) +) + +// Server tags are applied to the context used to process each RPC, as well as +// the measures at the end of each RPC. +var ( + KeyServerMethod = tag.MustNewKey("grpc_server_method") + KeyServerStatus = tag.MustNewKey("grpc_server_status") +) + +// Client tags are applied to measures at the end of each RPC. +var ( + KeyClientMethod = tag.MustNewKey("grpc_client_method") + KeyClientStatus = tag.MustNewKey("grpc_client_status") +) + +var ( + rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") +) + +func methodName(fullname string) string { + return strings.TrimLeft(fullname, "/") +} + +// statsHandleRPC processes the RPC events. +func statsHandleRPC(ctx context.Context, s stats.RPCStats) { + switch st := s.(type) { + case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + // do nothing for client + case *stats.OutPayload: + handleRPCOutPayload(ctx, st) + case *stats.InPayload: + handleRPCInPayload(ctx, st) + case *stats.End: + handleRPCEnd(ctx, st) + default: + grpclog.Infof("unexpected stats: %T", st) + } +} + +func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.sentBytes, int64(s.Length)) + atomic.AddInt64(&d.sentCount, 1) +} + +func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.recvBytes, int64(s.Length)) + atomic.AddInt64(&d.recvCount, 1) +} + +func handleRPCEnd(ctx context.Context, s *stats.End) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + elapsedTime := time.Since(d.startTime) + + var st string + if s.Error != nil { + s, ok := status.FromError(s.Error) + if ok { + st = statusCodeToString(s) + } + } else { + st = "OK" + } + + latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + attachments := getSpanCtxAttachment(ctx) + if s.Client { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(KeyClientMethod, methodName(d.method)), + tag.Upsert(KeyClientStatus, st)), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(KeyServerStatus, st), + ), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis))) + } +} + +func statusCodeToString(s *status.Status) string { + // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md + switch c := s.Code(); c { + case codes.OK: + return "OK" + case codes.Canceled: + return "CANCELLED" + case codes.Unknown: + return "UNKNOWN" + case codes.InvalidArgument: + return "INVALID_ARGUMENT" + case codes.DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case codes.NotFound: + return "NOT_FOUND" + case codes.AlreadyExists: + return "ALREADY_EXISTS" + case codes.PermissionDenied: + return "PERMISSION_DENIED" + case codes.ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case codes.FailedPrecondition: + return "FAILED_PRECONDITION" + case codes.Aborted: + return "ABORTED" + case codes.OutOfRange: + return "OUT_OF_RANGE" + case codes.Unimplemented: + return "UNIMPLEMENTED" + case codes.Internal: + return "INTERNAL" + case codes.Unavailable: + return "UNAVAILABLE" + case codes.DataLoss: + return "DATA_LOSS" + case codes.Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE_" + strconv.FormatInt(int64(c), 10) + } +} + +func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { + attachments := map[string]interface{}{} + span := trace.FromContext(ctx) + if span == nil { + return attachments + } + spanCtx := span.SpanContext() + if spanCtx.IsSampled() { + attachments[metricdata.AttachmentKeySpanContext] = spanCtx + } + return attachments +} diff --git a/test/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/test/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go new file mode 100644 index 0000000000..fef5827566 --- /dev/null +++ b/test/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -0,0 +1,107 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "strings" + + "google.golang.org/grpc/codes" + + "context" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +const traceContextKey = "grpc-trace-bin" + +// TagRPC creates a new trace span for the client side of the RPC. +// +// It returns ctx with the new trace span added and a serialization of the +// SpanContext added to the outgoing gRPC metadata. +func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + ctx, span := trace.StartSpan(ctx, name, + trace.WithSampler(c.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC + traceContextBinary := propagation.Binary(span.SpanContext()) + return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) +} + +// TagRPC creates a new trace span for the server side of the RPC. +// +// It checks the incoming gRPC metadata in ctx for a SpanContext, and if +// it finds one, uses that SpanContext as the parent context of the new span. +// +// It returns ctx, with the new trace span added. +func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + md, _ := metadata.FromIncomingContext(ctx) + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + traceContext := md[traceContextKey] + var ( + parent trace.SpanContext + haveParent bool + ) + if len(traceContext) > 0 { + // Metadata with keys ending in -bin are actually binary. They are base64 + // encoded before being put on the wire, see: + // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata + traceContextBinary := []byte(traceContext[0]) + parent, haveParent = propagation.FromBinary(traceContextBinary) + if haveParent && !s.IsPublicEndpoint { + ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler), + ) + return ctx + } + } + ctx, span := trace.StartSpan(ctx, name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler)) + if haveParent { + span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) + } + return ctx +} + +func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { + span := trace.FromContext(ctx) + // TODO: compressed and uncompressed sizes are not populated in every message. + switch rs := rs.(type) { + case *stats.Begin: + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast)) + case *stats.InPayload: + span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) + case *stats.OutPayload: + span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) + case *stats.End: + if rs.Error != nil { + s, ok := status.FromError(rs.Error) + if ok { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) + } + } + span.End() + } +} diff --git a/test/vendor/go.opencensus.io/resource/resourcekeys/const.go b/test/vendor/go.opencensus.io/resource/resourcekeys/const.go new file mode 100644 index 0000000000..1f2246662f --- /dev/null +++ b/test/vendor/go.opencensus.io/resource/resourcekeys/const.go @@ -0,0 +1,68 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resourcekeys contains well known type and label keys for resources. +package resourcekeys // import "go.opencensus.io/resource/resourcekeys" + +// Constants for Kubernetes resources. +const ( + K8SType = "k8s" + + // A uniquely identifying name for the Kubernetes cluster. Kubernetes + // does not have cluster names as an internal concept so this may be + // set to any meaningful value within the environment. For example, + // GKE clusters have a name which can be used for this label. + K8SKeyClusterName = "k8s.cluster.name" + K8SKeyNamespaceName = "k8s.namespace.name" + K8SKeyPodName = "k8s.pod.name" + K8SKeyDeploymentName = "k8s.deployment.name" +) + +// Constants for Container resources. +const ( + ContainerType = "container" + + // A uniquely identifying name for the Container. + ContainerKeyName = "container.name" + ContainerKeyImageName = "container.image.name" + ContainerKeyImageTag = "container.image.tag" +) + +// Constants for Cloud resources. +const ( + CloudType = "cloud" + + CloudKeyProvider = "cloud.provider" + CloudKeyAccountID = "cloud.account.id" + CloudKeyRegion = "cloud.region" + CloudKeyZone = "cloud.zone" + + // Cloud Providers + CloudProviderAWS = "aws" + CloudProviderGCP = "gcp" + CloudProviderAZURE = "azure" +) + +// Constants for Host resources. +const ( + HostType = "host" + + // A uniquely identifying name for the host. + HostKeyName = "host.name" + + // A hostname as returned by the 'hostname' command on host machine. + HostKeyHostName = "host.hostname" + HostKeyID = "host.id" + HostKeyType = "host.type" +) diff --git a/test/vendor/golang.org/x/net/http2/h2c/h2c.go b/test/vendor/golang.org/x/net/http2/h2c/h2c.go new file mode 100644 index 0000000000..07c5c9a60e --- /dev/null +++ b/test/vendor/golang.org/x/net/http2/h2c/h2c.go @@ -0,0 +1,495 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package h2c implements the unencrypted "h2c" form of HTTP/2. +// +// The h2c protocol is the non-TLS version of HTTP/2 which is not available from +// net/http or golang.org/x/net/http2. +package h2c + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/textproto" + "os" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var ( + http2VerboseLogs bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") { + http2VerboseLogs = true + } +} + +// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic +// that should be h2c traffic. There are two ways to begin a h2c connection +// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this +// works by starting an h2c connection with a string of bytes that is valid +// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to +// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to +// h2c. When either of those situations occur we hijack the HTTP/1 connection, +// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn. +type h2cHandler struct { + Handler http.Handler + s *http2.Server +} + +// NewHandler returns an http.Handler that wraps h, intercepting any h2c +// traffic. If a request is an h2c connection, it's hijacked and redirected to +// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This +// works because h2c is designed to be parseable as valid HTTP/1, but ignored by +// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1 +// compatible parts of the Go http library to parse and recognize h2c requests. +// Once a request is recognized as h2c, we hijack the connection and convert it +// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn +// understands HTTP/2 except for the h2c part of it.) +func NewHandler(h http.Handler, s *http2.Server) http.Handler { + return &h2cHandler{ + Handler: h, + s: s, + } +} + +// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler. +func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Handle h2c with prior knowledge (RFC 7540 Section 3.4) + if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" { + if http2VerboseLogs { + log.Print("h2c: attempting h2c with prior knowledge.") + } + conn, err := initH2CWithPriorKnowledge(w) + if err != nil { + if http2VerboseLogs { + log.Printf("h2c: error h2c with prior knowledge: %v", err) + } + return + } + defer conn.Close() + + s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler}) + return + } + // Handle Upgrade to h2c (RFC 7540 Section 3.2) + if conn, err := h2cUpgrade(w, r); err == nil { + defer conn.Close() + + s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler}) + return + } + + s.Handler.ServeHTTP(w, r) + return +} + +// initH2CWithPriorKnowledge implements creating a h2c connection with prior +// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn. +// All we have to do is look for the client preface that is suppose to be part +// of the body, and reforward the client preface on the net.Conn this function +// creates. +func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) { + hijacker, ok := w.(http.Hijacker) + if !ok { + panic("Hijack not supported.") + } + conn, rw, err := hijacker.Hijack() + if err != nil { + panic(fmt.Sprintf("Hijack failed: %v", err)) + } + + const expectedBody = "SM\r\n\r\n" + + buf := make([]byte, len(expectedBody)) + n, err := io.ReadFull(rw, buf) + if err != nil { + return nil, fmt.Errorf("could not read from the buffer: %s", err) + } + + if string(buf[:n]) == expectedBody { + c := &rwConn{ + Conn: conn, + Reader: io.MultiReader(strings.NewReader(http2.ClientPreface), rw), + BufWriter: rw.Writer, + } + return c, nil + } + + conn.Close() + if http2VerboseLogs { + log.Printf( + "h2c: missing the request body portion of the client preface. Wanted: %v Got: %v", + []byte(expectedBody), + buf[0:n], + ) + } + return nil, errors.New("invalid client preface") +} + +// drainClientPreface reads a single instance of the HTTP/2 client preface from +// the supplied reader. +func drainClientPreface(r io.Reader) error { + var buf bytes.Buffer + prefaceLen := int64(len(http2.ClientPreface)) + n, err := io.CopyN(&buf, r, prefaceLen) + if err != nil { + return err + } + if n != prefaceLen || buf.String() != http2.ClientPreface { + return fmt.Errorf("Client never sent: %s", http2.ClientPreface) + } + return nil +} + +// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2). +func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) { + if !isH2CUpgrade(r.Header) { + return nil, errors.New("non-conforming h2c headers") + } + + // Initial bytes we put into conn to fool http2 server + initBytes, _, err := convertH1ReqToH2(r) + if err != nil { + return nil, err + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + return nil, errors.New("hijack not supported.") + } + conn, rw, err := hijacker.Hijack() + if err != nil { + return nil, fmt.Errorf("hijack failed: %v", err) + } + + rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" + + "Connection: Upgrade\r\n" + + "Upgrade: h2c\r\n\r\n")) + rw.Flush() + + // A conforming client will now send an H2 client preface which need to drain + // since we already sent this. + if err := drainClientPreface(rw); err != nil { + return nil, err + } + + c := &rwConn{ + Conn: conn, + Reader: io.MultiReader(initBytes, rw), + BufWriter: newSettingsAckSwallowWriter(rw.Writer), + } + return c, nil +} + +// convert the data contained in the HTTP/1 upgrade request into the HTTP/2 +// version in byte form. +func convertH1ReqToH2(r *http.Request) (*bytes.Buffer, []http2.Setting, error) { + h2Bytes := bytes.NewBuffer([]byte((http2.ClientPreface))) + framer := http2.NewFramer(h2Bytes, nil) + settings, err := getH2Settings(r.Header) + if err != nil { + return nil, nil, err + } + + if err := framer.WriteSettings(settings...); err != nil { + return nil, nil, err + } + + headerBytes, err := getH2HeaderBytes(r, getMaxHeaderTableSize(settings)) + if err != nil { + return nil, nil, err + } + + maxFrameSize := int(getMaxFrameSize(settings)) + needOneHeader := len(headerBytes) < maxFrameSize + err = framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: 1, + BlockFragment: headerBytes, + EndHeaders: needOneHeader, + }) + if err != nil { + return nil, nil, err + } + + for i := maxFrameSize; i < len(headerBytes); i += maxFrameSize { + if len(headerBytes)-i > maxFrameSize { + if err := framer.WriteContinuation(1, + false, // endHeaders + headerBytes[i:maxFrameSize]); err != nil { + return nil, nil, err + } + } else { + if err := framer.WriteContinuation(1, + true, // endHeaders + headerBytes[i:]); err != nil { + return nil, nil, err + } + } + } + + return h2Bytes, settings, nil +} + +// getMaxFrameSize returns the SETTINGS_MAX_FRAME_SIZE. If not present default +// value is 16384 as specified by RFC 7540 Section 6.5.2. +func getMaxFrameSize(settings []http2.Setting) uint32 { + for _, setting := range settings { + if setting.ID == http2.SettingMaxFrameSize { + return setting.Val + } + } + return 16384 +} + +// getMaxHeaderTableSize returns the SETTINGS_HEADER_TABLE_SIZE. If not present +// default value is 4096 as specified by RFC 7540 Section 6.5.2. +func getMaxHeaderTableSize(settings []http2.Setting) uint32 { + for _, setting := range settings { + if setting.ID == http2.SettingHeaderTableSize { + return setting.Val + } + } + return 4096 +} + +// bufWriter is a Writer interface that also has a Flush method. +type bufWriter interface { + io.Writer + Flush() error +} + +// rwConn implements net.Conn but overrides Read and Write so that reads and +// writes are forwarded to the provided io.Reader and bufWriter. +type rwConn struct { + net.Conn + io.Reader + BufWriter bufWriter +} + +// Read forwards reads to the underlying Reader. +func (c *rwConn) Read(p []byte) (int, error) { + return c.Reader.Read(p) +} + +// Write forwards writes to the underlying bufWriter and immediately flushes. +func (c *rwConn) Write(p []byte) (int, error) { + n, err := c.BufWriter.Write(p) + if err := c.BufWriter.Flush(); err != nil { + return 0, err + } + return n, err +} + +// settingsAckSwallowWriter is a writer that normally forwards bytes to its +// underlying Writer, but swallows the first SettingsAck frame that it sees. +type settingsAckSwallowWriter struct { + Writer *bufio.Writer + buf []byte + didSwallow bool +} + +// newSettingsAckSwallowWriter returns a new settingsAckSwallowWriter. +func newSettingsAckSwallowWriter(w *bufio.Writer) *settingsAckSwallowWriter { + return &settingsAckSwallowWriter{ + Writer: w, + buf: make([]byte, 0), + didSwallow: false, + } +} + +// Write implements io.Writer interface. Normally forwards bytes to w.Writer, +// except for the first Settings ACK frame that it sees. +func (w *settingsAckSwallowWriter) Write(p []byte) (int, error) { + if !w.didSwallow { + w.buf = append(w.buf, p...) + // Process all the frames we have collected into w.buf + for { + // Append until we get full frame header which is 9 bytes + if len(w.buf) < 9 { + break + } + // Check if we have collected a whole frame. + fh, err := http2.ReadFrameHeader(bytes.NewBuffer(w.buf)) + if err != nil { + // Corrupted frame, fail current Write + return 0, err + } + fSize := fh.Length + 9 + if uint32(len(w.buf)) < fSize { + // Have not collected whole frame. Stop processing buf, and withold on + // forward bytes to w.Writer until we get the full frame. + break + } + + // We have now collected a whole frame. + if fh.Type == http2.FrameSettings && fh.Flags.Has(http2.FlagSettingsAck) { + // If Settings ACK frame, do not forward to underlying writer, remove + // bytes from w.buf, and record that we have swallowed Settings Ack + // frame. + w.didSwallow = true + w.buf = w.buf[fSize:] + continue + } + + // Not settings ack frame. Forward bytes to w.Writer. + if _, err := w.Writer.Write(w.buf[:fSize]); err != nil { + // Couldn't forward bytes. Fail current Write. + return 0, err + } + w.buf = w.buf[fSize:] + } + return len(p), nil + } + return w.Writer.Write(p) +} + +// Flush calls w.Writer.Flush. +func (w *settingsAckSwallowWriter) Flush() error { + return w.Writer.Flush() +} + +// isH2CUpgrade returns true if the header properly request an upgrade to h2c +// as specified by Section 3.2. +func isH2CUpgrade(h http.Header) bool { + return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") && + httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings") +} + +// getH2Settings returns the []http2.Setting that are encoded in the +// HTTP2-Settings header. +func getH2Settings(h http.Header) ([]http2.Setting, error) { + vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")] + if !ok { + return nil, errors.New("missing HTTP2-Settings header") + } + if len(vals) != 1 { + return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals) + } + settings, err := decodeSettings(vals[0]) + if err != nil { + return nil, fmt.Errorf("Invalid HTTP2-Settings: %q", vals[0]) + } + return settings, nil +} + +// decodeSettings decodes the base64url header value of the HTTP2-Settings +// header. RFC 7540 Section 3.2.1. +func decodeSettings(headerVal string) ([]http2.Setting, error) { + b, err := base64.RawURLEncoding.DecodeString(headerVal) + if err != nil { + return nil, err + } + if len(b)%6 != 0 { + return nil, err + } + settings := make([]http2.Setting, 0) + for i := 0; i < len(b)/6; i++ { + settings = append(settings, http2.Setting{ + ID: http2.SettingID(binary.BigEndian.Uint16(b[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(b[i*6+2 : i*6+6]), + }) + } + + return settings, nil +} + +// getH2HeaderBytes return the headers in r a []bytes encoded by HPACK. +func getH2HeaderBytes(r *http.Request, maxHeaderTableSize uint32) ([]byte, error) { + headerBytes := bytes.NewBuffer(nil) + hpackEnc := hpack.NewEncoder(headerBytes) + hpackEnc.SetMaxDynamicTableSize(maxHeaderTableSize) + + // Section 8.1.2.3 + err := hpackEnc.WriteField(hpack.HeaderField{ + Name: ":method", + Value: r.Method, + }) + if err != nil { + return nil, err + } + + err = hpackEnc.WriteField(hpack.HeaderField{ + Name: ":scheme", + Value: "http", + }) + if err != nil { + return nil, err + } + + err = hpackEnc.WriteField(hpack.HeaderField{ + Name: ":authority", + Value: r.Host, + }) + if err != nil { + return nil, err + } + + path := r.URL.Path + if r.URL.RawQuery != "" { + path = strings.Join([]string{path, r.URL.RawQuery}, "?") + } + err = hpackEnc.WriteField(hpack.HeaderField{ + Name: ":path", + Value: path, + }) + if err != nil { + return nil, err + } + + // TODO Implement Section 8.3 + + for header, values := range r.Header { + // Skip non h2 headers + if isNonH2Header(header) { + continue + } + for _, v := range values { + err := hpackEnc.WriteField(hpack.HeaderField{ + Name: strings.ToLower(header), + Value: v, + }) + if err != nil { + return nil, err + } + } + } + return headerBytes.Bytes(), nil +} + +// Connection specific headers listed in RFC 7540 Section 8.1.2.2 that are not +// suppose to be transferred to HTTP/2. The Http2-Settings header is skipped +// since already use to create the HTTP/2 SETTINGS frame. +var nonH2Headers = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", + "Http2-Settings", +} + +// isNonH2Header returns true if header should not be transferred to HTTP/2. +func isNonH2Header(header string) bool { + for _, nonH2h := range nonH2Headers { + if header == nonH2h { + return true + } + } + return false +} diff --git a/test/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/test/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000000..685f0e7ea2 --- /dev/null +++ b/test/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/test/vendor/golang.org/x/net/trace/events.go b/test/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000000..c646a6952e --- /dev/null +++ b/test/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/test/vendor/golang.org/x/net/trace/histogram.go b/test/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000000..9bf4286c79 --- /dev/null +++ b/test/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/test/vendor/golang.org/x/net/trace/trace.go b/test/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000000..3ebf6f2daa --- /dev/null +++ b/test/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/test/vendor/golang.org/x/oauth2/google/appengine.go b/test/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 0000000000..feb1157b15 --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "time" + + "golang.org/x/oauth2" +) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. +// +// First generation App Engine runtimes (<= Go 1.9): +// AppEngineTokenSource returns a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + return appEngineTokenSource(ctx, scope...) +} diff --git a/test/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/test/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 0000000000..83dacac320 --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/test/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/test/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 0000000000..04c2c2216a --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/test/vendor/golang.org/x/oauth2/google/default.go b/test/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 0000000000..ad2c09236c --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource("", scopes...), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/test/vendor/golang.org/x/oauth2/google/doc.go b/test/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 0000000000..73be629033 --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/test/vendor/golang.org/x/oauth2/google/google.go b/test/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 0000000000..81de32b360 --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,209 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + AuthStyle: oauth2.AuthStyleInParams, +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://oauth2.googleapis.com/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// If no scopes are specified, a set of default scopes are automatically granted. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) +} + +type computeSource struct { + account string + scopes []string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenURI := "instance/service-accounts/" + acct + "/token" + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI = tokenURI + "?" + v.Encode() + } + tokenJSON, err := metadata.Get(tokenURI) + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + tok := &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil +} diff --git a/test/vendor/golang.org/x/oauth2/google/jwt.go b/test/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 0000000000..b0fdb3a888 --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/test/vendor/golang.org/x/oauth2/google/sdk.go b/test/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 0000000000..456224bc78 --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/test/vendor/golang.org/x/oauth2/jws/jws.go b/test/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 0000000000..683d2d271a --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/test/vendor/golang.org/x/oauth2/jwt/jwt.go b/test/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 0000000000..b2bf18298b --- /dev/null +++ b/test/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration + + // Audience optionally specifies the intended audience of the + // request. If empty, the value of TokenURL is used as the + // intended audience. + Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := js.conf.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } + return token, nil +} diff --git a/test/vendor/golang.org/x/sync/AUTHORS b/test/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/test/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/test/vendor/golang.org/x/sync/CONTRIBUTORS b/test/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/test/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/test/vendor/golang.org/x/sync/LICENSE b/test/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/golang.org/x/sync/PATENTS b/test/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/test/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/test/vendor/golang.org/x/sync/errgroup/errgroup.go b/test/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..9857fe53d3 --- /dev/null +++ b/test/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "context" + "sync" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/test/vendor/golang.org/x/sync/semaphore/semaphore.go b/test/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 0000000000..7f096fef07 --- /dev/null +++ b/test/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/test/vendor/google.golang.org/api/AUTHORS b/test/vendor/google.golang.org/api/AUTHORS new file mode 100644 index 0000000000..f07029059d --- /dev/null +++ b/test/vendor/google.golang.org/api/AUTHORS @@ -0,0 +1,11 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. +LightStep Inc. diff --git a/test/vendor/google.golang.org/api/CONTRIBUTORS b/test/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 0000000000..788677b8f0 --- /dev/null +++ b/test/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,56 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Kunpei Sakai +Matthew Dolan +Matthew Whisenhunt +Michael McGreevy +Nick Craig-Wood +Robbie Trencheny +Ross Light +Sarah Adams +Scott Van Woudenberg +Takashi Matsuo diff --git a/test/vendor/google.golang.org/api/LICENSE b/test/vendor/google.golang.org/api/LICENSE new file mode 100644 index 0000000000..263aa7a0c1 --- /dev/null +++ b/test/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/google.golang.org/api/googleapi/transport/apikey.go b/test/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 0000000000..61720ec2ea --- /dev/null +++ b/test/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,44 @@ +// Copyright 2012 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +// +// This package is DEPRECATED. Users should instead use, +// +// service, err := NewService(..., option.WithAPIKey(...)) +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +// +// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/test/vendor/google.golang.org/api/internal/conn_pool.go b/test/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 0000000000..fedcce15b4 --- /dev/null +++ b/test/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/test/vendor/google.golang.org/api/internal/creds.go b/test/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 0000000000..75e9445e1b --- /dev/null +++ b/test/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,105 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.TokenSource != nil { + return &google.Credentials{TokenSource: ds.TokenSource}, nil + } + cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials based on the input. +// +// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow +// - Otherwise, returns OAuth 2.0 flow. +func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + return nil, err + } + if len(data) > 0 && len(scopes) == 0 { + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + } + return cred, err +} + +func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { + // Use the API endpoint as the default audience + audience := endpoint + if len(audiences) > 0 { + // TODO(shinfan): Update golang oauth to support multiple audiences. + if len(audiences) > 1 { + return nil, fmt.Errorf("multiple audiences support is not implemented") + } + audience = audiences[0] + } + return google.JWTAccessTokenSourceFromJSON(data, audience) +} + +// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. +// +// NOTE(cbro): consider promoting this to a field on google.Credentials. +func QuotaProjectFromCreds(cred *google.Credentials) string { + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(cred.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} diff --git a/test/vendor/google.golang.org/api/internal/pool.go b/test/vendor/google.golang.org/api/internal/pool.go new file mode 100644 index 0000000000..908579fffc --- /dev/null +++ b/test/vendor/google.golang.org/api/internal/pool.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "errors" + + "google.golang.org/grpc/naming" +) + +// TODO: move to transport/grpc package + +// PoolResolver provides a fixed list of addresses to load balance between +// and does not provide further updates. +type PoolResolver struct { + poolSize int + dialOpt *DialSettings + ch chan []*naming.Update +} + +// NewPoolResolver returns a PoolResolver +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func NewPoolResolver(size int, o *DialSettings) *PoolResolver { + return &PoolResolver{poolSize: size, dialOpt: o} +} + +// Resolve returns a Watcher for the endpoint defined by the DialSettings +// provided to NewPoolResolver. +func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { + if r.dialOpt.Endpoint == "" { + return nil, errors.New("no endpoint configured") + } + addrs := make([]*naming.Update, 0, r.poolSize) + for i := 0; i < r.poolSize; i++ { + addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) + } + r.ch = make(chan []*naming.Update, 1) + r.ch <- addrs + return r, nil +} + +// Next returns a static list of updates on the first call, +// and blocks indefinitely until Close is called on subsequent calls. +func (r *PoolResolver) Next() ([]*naming.Update, error) { + return <-r.ch, nil +} + +// Close releases resources associated with the pool and causes Next to unblock. +func (r *PoolResolver) Close() { + close(r.ch) +} diff --git a/test/vendor/google.golang.org/api/internal/settings.go b/test/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 0000000000..3af6559ef3 --- /dev/null +++ b/test/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,95 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal supports the options and transport packages. +package internal + +import ( + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + + return nil +} diff --git a/test/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/test/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE new file mode 100644 index 0000000000..7109c6ef93 --- /dev/null +++ b/test/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Joshua Tacoma. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/google.golang.org/api/iterator/iterator.go b/test/vendor/google.golang.org/api/iterator/iterator.go new file mode 100644 index 0000000000..1799b5d9af --- /dev/null +++ b/test/vendor/google.golang.org/api/iterator/iterator.go @@ -0,0 +1,227 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iterator provides support for standard Google API iterators. +// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. +package iterator + +import ( + "errors" + "fmt" + "reflect" +) + +// Done is returned by an iterator's Next method when the iteration is +// complete; when there are no more items to return. +var Done = errors.New("no more items in iterator") + +// We don't support mixed calls to Next and NextPage because they play +// with the paging state in incompatible ways. +var errMixed = errors.New("iterator: Next and NextPage called on same iterator") + +// PageInfo contains information about an iterator's paging state. +type PageInfo struct { + // Token is the token used to retrieve the next page of items from the + // API. You may set Token immediately after creating an iterator to + // begin iteration at a particular point. If Token is the empty string, + // the iterator will begin with the first eligible item. + // + // The result of setting Token after the first call to Next is undefined. + // + // After the underlying API method is called to retrieve a page of items, + // Token is set to the next-page token in the response. + Token string + + // MaxSize is the maximum number of items returned by a call to the API. + // Set MaxSize as a hint to optimize the buffering behavior of the iterator. + // If zero, the page size is determined by the underlying service. + // + // Use Pager to retrieve a page of a specific, exact size. + MaxSize int + + // The error state of the iterator. Manipulated by PageInfo.next and Pager. + // This is a latch: it starts as nil, and once set should never change. + err error + + // If true, no more calls to fetch should be made. Set to true when fetch + // returns an empty page token. The iterator is Done when this is true AND + // the buffer is empty. + atEnd bool + + // Function that fetches a page from the underlying service. It should pass + // the pageSize and pageToken arguments to the service, fill the buffer + // with the results from the call, and return the next-page token returned + // by the service. The function must not remove any existing items from the + // buffer. If the underlying RPC takes an int32 page size, pageSize should + // be silently truncated. + fetch func(pageSize int, pageToken string) (nextPageToken string, err error) + + // Function that returns the number of currently buffered items. + bufLen func() int + + // Function that returns the buffer, after setting the buffer variable to nil. + takeBuf func() interface{} + + // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check + // for calls to both Next and NextPage with the same iterator. + nextCalled, nextPageCalled bool +} + +// NewPageInfo exposes internals for iterator implementations. +// It is not a stable interface. +var NewPageInfo = newPageInfo + +// newPageInfo creates and returns a PageInfo and a next func. If an iterator can +// support paging, its iterator-creating method should call this. Each time the +// iterator's Next is called, it should call the returned next fn to determine +// whether a next item exists, and if so it should pop an item from the buffer. +// +// The fetch, bufLen and takeBuf arguments provide access to the iterator's +// internal slice of buffered items. They behave as described in PageInfo, above. +// +// The return value is the PageInfo.next method bound to the returned PageInfo value. +// (Returning it avoids exporting PageInfo.next.) +// +// Note: the returned PageInfo and next fn do not remove items from the buffer. +// It is up to the iterator using these to remove items from the buffer: +// typically by performing a pop in its Next. If items are not removed from the +// buffer, memory may grow unbounded. +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { + pi = &PageInfo{ + fetch: fetch, + bufLen: bufLen, + takeBuf: takeBuf, + } + return pi, pi.next +} + +// Remaining returns the number of items available before the iterator makes another API call. +func (pi *PageInfo) Remaining() int { return pi.bufLen() } + +// next provides support for an iterator's Next function. An iterator's Next +// should return the error returned by next if non-nil; else it can assume +// there is at least one item in its buffer, and it should return that item and +// remove it from the buffer. +func (pi *PageInfo) next() error { + pi.nextCalled = true + if pi.err != nil { // Once we get an error, always return it. + // TODO(jba): fix so users can retry on transient errors? Probably not worth it. + return pi.err + } + if pi.nextPageCalled { + pi.err = errMixed + return pi.err + } + // Loop until we get some items or reach the end. + for pi.bufLen() == 0 && !pi.atEnd { + if err := pi.fill(pi.MaxSize); err != nil { + pi.err = err + return pi.err + } + if pi.Token == "" { + pi.atEnd = true + } + } + // Either the buffer is non-empty or pi.atEnd is true (or both). + if pi.bufLen() == 0 { + // The buffer is empty and pi.atEnd is true, i.e. the service has no + // more items. + pi.err = Done + } + return pi.err +} + +// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the +// next-page token returned by the call. +// If fill returns a non-nil error, the buffer will be empty. +func (pi *PageInfo) fill(size int) error { + tok, err := pi.fetch(size, pi.Token) + if err != nil { + pi.takeBuf() // clear the buffer + return err + } + pi.Token = tok + return nil +} + +// Pageable is implemented by iterators that support paging. +type Pageable interface { + // PageInfo returns paging information associated with the iterator. + PageInfo() *PageInfo +} + +// Pager supports retrieving iterator items a page at a time. +type Pager struct { + pageInfo *PageInfo + pageSize int +} + +// NewPager returns a pager that uses iter. Calls to its NextPage method will +// obtain exactly pageSize items, unless fewer remain. The pageToken argument +// indicates where to start the iteration. Pass the empty string to start at +// the beginning, or pass a token retrieved from a call to Pager.NextPage. +// +// If you use an iterator with a Pager, you must not call Next on the iterator. +func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { + p := &Pager{ + pageInfo: iter.PageInfo(), + pageSize: pageSize, + } + p.pageInfo.Token = pageToken + if pageSize <= 0 { + p.pageInfo.err = errors.New("iterator: page size must be positive") + } + return p +} + +// NextPage retrieves a sequence of items from the iterator and appends them +// to slicep, which must be a pointer to a slice of the iterator's item type. +// Exactly p.pageSize items will be appended, unless fewer remain. +// +// The first return value is the page token to use for the next page of items. +// If empty, there are no more pages. Aside from checking for the end of the +// iteration, the returned page token is only needed if the iteration is to be +// resumed a later time, in another context (possibly another process). +// +// The second return value is non-nil if an error occurred. It will never be +// the special iterator sentinel value Done. To recognize the end of the +// iteration, compare nextPageToken to the empty string. +// +// It is possible for NextPage to return a single zero-length page along with +// an empty page token when there are no more items in the iteration. +func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { + p.pageInfo.nextPageCalled = true + if p.pageInfo.err != nil { + return "", p.pageInfo.err + } + if p.pageInfo.nextCalled { + p.pageInfo.err = errMixed + return "", p.pageInfo.err + } + if p.pageInfo.bufLen() > 0 { + return "", errors.New("must call NextPage with an empty buffer") + } + // The buffer must be empty here, so takeBuf is a no-op. We call it just to get + // the buffer's type. + wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) + if slicep == nil { + return "", errors.New("nil passed to Pager.NextPage") + } + vslicep := reflect.ValueOf(slicep) + if vslicep.Type() != wantSliceType { + return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) + } + for p.pageInfo.bufLen() < p.pageSize { + if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { + p.pageInfo.err = err + return "", p.pageInfo.err + } + if p.pageInfo.Token == "" { + break + } + } + e := vslicep.Elem() + e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) + return p.pageInfo.Token, nil +} diff --git a/test/vendor/google.golang.org/api/option/credentials_go19.go b/test/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 0000000000..d06f918b0e --- /dev/null +++ b/test/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/test/vendor/google.golang.org/api/option/credentials_notgo19.go b/test/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 0000000000..0ce107a624 --- /dev/null +++ b/test/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,22 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/test/vendor/google.golang.org/api/option/option.go b/test/vendor/google.golang.org/api/option/option.go new file mode 100644 index 0000000000..0de9466c2f --- /dev/null +++ b/test/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,237 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package option contains options for Google API clients. +package option + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option may only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + o.GRPCConnPoolSize = int(w) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabledOption{} +} + +type withTelemetryDisabledOption struct{} + +func (w withTelemetryDisabledOption) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} diff --git a/test/vendor/google.golang.org/api/support/bundler/bundler.go b/test/vendor/google.golang.org/api/support/bundler/bundler.go new file mode 100644 index 0000000000..418143d7c2 --- /dev/null +++ b/test/vendor/google.golang.org/api/support/bundler/bundler.go @@ -0,0 +1,402 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bundler supports bundling (batching) of items. Bundling amortizes an +// action with fixed costs over multiple items. For example, if an API provides +// an RPC that accepts a list of items as input, but clients would prefer +// adding items one at a time, then a Bundler can accept individual items from +// the client and bundle many of them into a single RPC. +// +// This package is experimental and subject to change without notice. +package bundler + +import ( + "context" + "errors" + "reflect" + "sync" + "time" + + "golang.org/x/sync/semaphore" +) + +type mode int + +const ( + DefaultDelayThreshold = time.Second + DefaultBundleCountThreshold = 10 + DefaultBundleByteThreshold = 1e6 // 1M + DefaultBufferedByteLimit = 1e9 // 1G +) + +const ( + none mode = iota + add + addWait +) + +var ( + // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. + ErrOverflow = errors.New("bundler reached buffered byte limit") + + // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. + ErrOversizedItem = errors.New("item size exceeds bundle byte limit") + + // errMixedMethods indicates that mutually exclusive methods has been + // called subsequently. + errMixedMethods = errors.New("calls to Add and AddWait cannot be mixed") +) + +// A Bundler collects items added to it into a bundle until the bundle +// exceeds a given size, then calls a user-provided function to handle the +// bundle. +// +// The exported fields are only safe to modify prior to the first call to Add +// or AddWait. +type Bundler struct { + // Starting from the time that the first message is added to a bundle, once + // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. + DelayThreshold time.Duration + + // Once a bundle has this many items, handle the bundle. Since only one + // item at a time is added to a bundle, no bundle will exceed this + // threshold, so it also serves as a limit. The default is + // DefaultBundleCountThreshold. + BundleCountThreshold int + + // Once the number of bytes in current bundle reaches this threshold, handle + // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, + // but does not cap the total size of a bundle. + BundleByteThreshold int + + // The maximum size of a bundle, in bytes. Zero means unlimited. + BundleByteLimit int + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. The default is DefaultBufferedByteLimit. + BufferedByteLimit int + + // The maximum number of handler invocations that can be running at once. + // The default is 1. + HandlerLimit int + + handler func(interface{}) // called to handle a bundle + itemSliceZero reflect.Value // nil (zero value) for slice of items + + mu sync.Mutex // guards access to fields below + flushTimer *time.Timer // implements DelayThreshold + handlerCount int // # of bundles currently being handled (i.e. handler is invoked on them) + sem *semaphore.Weighted // enforces BufferedByteLimit + semOnce sync.Once // guards semaphore initialization + // The current bundle we're adding items to. Not yet in the queue. + // Appended to the queue once the flushTimer fires or the bundle + // thresholds/limits are reached. If curBundle is nil and tail is + // not, we first try to add items to tail. Once tail is full or handled, + // we create a new curBundle for the incoming item. + curBundle *bundle + // The next bundle in the queue to be handled. Nil if the queue is + // empty. + head *bundle + // The last bundle in the queue to be handled. Nil if the queue is + // empty. If curBundle is nil and tail isn't, we attempt to add new + // items to the tail until if becomes full or has been passed to the + // handler. + tail *bundle + curFlush *sync.WaitGroup // counts outstanding bundles since last flush + prevFlush chan bool // signal used to wait for prior flush + + // The first call to Add or AddWait, mode will be add or addWait respectively. + // If there wasn't call yet then mode is none. + mode mode + // TODO: consider alternative queue implementation for head/tail bundle. see: + // https://code-review.googlesource.com/c/google-api-go-client/+/47991/4/support/bundler/bundler.go#74 +} + +// A bundle is a group of items that were added individually and will be passed +// to a handler as a slice. +type bundle struct { + items reflect.Value // slice of T + size int // size in bytes of all items + next *bundle // bundles are handled in order as a linked list queue + flush *sync.WaitGroup // the counter that tracks flush completion +} + +// add appends item to this bundle and increments the total size. It requires +// that b.mu is locked. +func (bu *bundle) add(item interface{}, size int) { + bu.items = reflect.Append(bu.items, reflect.ValueOf(item)) + bu.size += size +} + +// NewBundler creates a new Bundler. +// +// itemExample is a value of the type that will be bundled. For example, if you +// want to create bundles of *Entry, you could pass &Entry{} for itemExample. +// +// handler is a function that will be called on each bundle. If itemExample is +// of type T, the argument to handler is of type []T. handler is always called +// sequentially for each bundle, and never in parallel. +// +// Configure the Bundler by setting its thresholds and limits before calling +// any of its methods. +func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { + b := &Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultBundleCountThreshold, + BundleByteThreshold: DefaultBundleByteThreshold, + BufferedByteLimit: DefaultBufferedByteLimit, + HandlerLimit: 1, + + handler: handler, + itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), + curFlush: &sync.WaitGroup{}, + } + return b +} + +func (b *Bundler) initSemaphores() { + // Create the semaphores lazily, because the user may set limits + // after NewBundler. + b.semOnce.Do(func() { + b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) + }) +} + +// enqueueCurBundle moves curBundle to the end of the queue. The bundle may be +// handled immediately if we are below HandlerLimit. It requires that b.mu is +// locked. +func (b *Bundler) enqueueCurBundle() { + // We don't require callers to check if there is a pending bundle. It + // may have already been appended to the queue. If so, return early. + if b.curBundle == nil { + return + } + // If we are below the HandlerLimit, the queue must be empty. Handle + // immediately with a new goroutine. + if b.handlerCount < b.HandlerLimit { + b.handlerCount++ + go b.handle(b.curBundle) + } else if b.tail != nil { + // There are bundles on the queue, so append to the end + b.tail.next = b.curBundle + b.tail = b.curBundle + } else { + // The queue is empty, so initialize the queue + b.head = b.curBundle + b.tail = b.curBundle + } + b.curBundle = nil + if b.flushTimer != nil { + b.flushTimer.Stop() + b.flushTimer = nil + } +} + +// setMode sets the state of Bundler's mode. If mode was defined before +// and passed state is different from it then return an error. +func (b *Bundler) setMode(m mode) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.mode == m || b.mode == none { + b.mode = m + return nil + } + return errMixedMethods +} + +// canFit returns true if bu can fit an additional item of size bytes based +// on the limits of Bundler b. +func (b *Bundler) canFit(bu *bundle, size int) bool { + return (b.BundleByteLimit <= 0 || bu.size+size <= b.BundleByteLimit) && + (b.BundleCountThreshold <= 0 || bu.items.Len() < b.BundleCountThreshold) +} + +// Add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. Add returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed +// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for +// memory, Add returns ErrOverflow. +// +// Add never blocks. +func (b *Bundler) Add(item interface{}, size int) error { + if err := b.setMode(add); err != nil { + return err + } + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + + // If adding this item would exceed our allotted memory + // footprint, we can't accept it. + // (TryAcquire also returns false if anything is waiting on the semaphore, + // so calls to Add and AddWait shouldn't be mixed.) + b.initSemaphores() + if !b.sem.TryAcquire(int64(size)) { + return ErrOverflow + } + + b.mu.Lock() + defer b.mu.Unlock() + return b.add(item, size) +} + +// add adds item to the tail of the bundle queue or curBundle depending on space +// and nil-ness (see inline comments). It marks curBundle for handling (by +// appending it to the queue) if any of the thresholds or limits are exceeded. +// curBundle is lazily initialized. It requires that b.mu is locked. +func (b *Bundler) add(item interface{}, size int) error { + // If we don't have a curBundle, see if we can add to the queue tail. + if b.tail != nil && b.curBundle == nil && b.canFit(b.tail, size) { + b.tail.add(item, size) + return nil + } + + // If we can't fit in the existing curBundle, move it onto the queue. + if b.curBundle != nil && !b.canFit(b.curBundle, size) { + b.enqueueCurBundle() + } + + // Create a curBundle if we don't have one. + if b.curBundle == nil { + b.curFlush.Add(1) + b.curBundle = &bundle{ + items: b.itemSliceZero, + flush: b.curFlush, + } + } + + // Add the item. + b.curBundle.add(item, size) + + // If curBundle is ready for handling, move it to the queue. + if b.curBundle.size >= b.BundleByteThreshold || + b.curBundle.items.Len() == b.BundleCountThreshold { + b.enqueueCurBundle() + } + + // If we created a new bundle and it wasn't immediately handled, set a timer + if b.curBundle != nil && b.flushTimer == nil { + b.flushTimer = time.AfterFunc(b.DelayThreshold, b.tryHandleBundles) + } + + return nil +} + +// tryHandleBundles is the timer callback that handles or queues any current +// bundle after DelayThreshold time, even if the bundle isn't completely full. +func (b *Bundler) tryHandleBundles() { + b.mu.Lock() + b.enqueueCurBundle() + b.mu.Unlock() +} + +// next returns the next bundle that is ready for handling and removes it from +// the internal queue. It requires that b.mu is locked. +func (b *Bundler) next() *bundle { + if b.head == nil { + return nil + } + out := b.head + b.head = b.head.next + if b.head == nil { + b.tail = nil + } + out.next = nil + return out +} + +// handle calls the user-specified handler on the given bundle. handle is +// intended to be run as a goroutine. After the handler returns, we update the +// byte total. handle continues processing additional bundles that are ready. +// If no more bundles are ready, the handler count is decremented and the +// goroutine ends. +func (b *Bundler) handle(bu *bundle) { + for bu != nil { + b.handler(bu.items.Interface()) + bu = b.postHandle(bu) + } + b.mu.Lock() + b.handlerCount-- + b.mu.Unlock() +} + +func (b *Bundler) postHandle(bu *bundle) *bundle { + b.mu.Lock() + defer b.mu.Unlock() + b.sem.Release(int64(bu.size)) + bu.flush.Done() + return b.next() +} + +// AddWait adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. AddWait returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), +// AddWait blocks until space is available or ctx is done. +// +// Calls to Add and AddWait should not be mixed on the same Bundler. +func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { + if err := b.setMode(addWait); err != nil { + return err + } + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory footprint, block + // until space is available. The semaphore is FIFO, so there will be no + // starvation. + b.initSemaphores() + if err := b.sem.Acquire(ctx, int64(size)); err != nil { + return err + } + + b.mu.Lock() + defer b.mu.Unlock() + return b.add(item, size) +} + +// Flush invokes the handler for all remaining items in the Bundler and waits +// for it to return. +func (b *Bundler) Flush() { + b.mu.Lock() + + // If a curBundle is pending, move it to the queue. + b.enqueueCurBundle() + + // Store a pointer to the WaitGroup that counts outstanding bundles + // in the current flush and create a new one to track the next flush. + wg := b.curFlush + b.curFlush = &sync.WaitGroup{} + + // Flush must wait for all prior, outstanding flushes to complete. + // We use a channel to communicate completion between each flush in + // the sequence. + prev := b.prevFlush + next := make(chan bool) + b.prevFlush = next + + b.mu.Unlock() + + // Wait until the previous flush is finished. + if prev != nil { + <-prev + } + + // Wait until this flush is finished. + wg.Wait() + + // Allow the next flush to finish. + close(next) +} diff --git a/test/vendor/google.golang.org/api/transport/dial.go b/test/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 0000000000..2c495ad538 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,36 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package transport + +import ( + "context" + "net/http" + + "google.golang.org/grpc" + + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} diff --git a/test/vendor/google.golang.org/api/transport/doc.go b/test/vendor/google.golang.org/api/transport/doc.go new file mode 100644 index 0000000000..7143abee45 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport provides utility methods for creating authenticated +// transports to Google's HTTP and gRPC APIs. It is intended to be used in +// conjunction with google.golang.org/api/option. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport diff --git a/test/vendor/google.golang.org/api/transport/go19.go b/test/vendor/google.golang.org/api/transport/go19.go new file mode 100644 index 0000000000..abaa633f4e --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/go19.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package transport + +import ( + "context" + + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/test/vendor/google.golang.org/api/transport/grpc/dial.go b/test/vendor/google.golang.org/api/transport/grpc/dial.go new file mode 100644 index 0000000000..3c0782e040 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/grpc/dial.go @@ -0,0 +1,287 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package grpc supports network connections to GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package grpc + +import ( + "context" + "errors" + "log" + "os" + "strings" + + "go.opencensus.io/plugin/ocgrpc" + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + grpcgoogle "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/oauth" + + // Install grpclb, which is required for direct path. + _ "google.golang.org/grpc/balancer/grpclb" +) + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineDialerHook func(context.Context) grpc.DialOption + +// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. +var timeoutDialerOption grpc.DialOption + +// Dial returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.GRPCConnPool != nil { + return o.GRPCConnPool.Conn(), nil + } + if o.GRPCConnPoolSize != 0 { + // NOTE(cbro): RoundRobin and WithBalancer are deprecated and we need to remove usages of it. + balancer := grpc.RoundRobin(internal.NewPoolResolver(o.GRPCConnPoolSize, o)) + o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) + } + return dial(ctx, false, o) +} + +// DialInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + return dial(ctx, true, o) +} + +// DialPool returns a pool of GRPC connections for the given service. +// This differs from the connection pooling implementation used by Dial, which uses a custom GRPC load balancer. +// DialPool should be used instead of Dial when a pool is used by default or a different custom GRPC load balancer is needed. +// The context and options are shared between each Conn in the pool. +// The pool size is configured using the WithGRPCConnectionPool option. +// +// This API is subject to change as we further refine requirements. It will go away if gRPC stubs accept an interface instead of the concrete ClientConn type. See https://github.com/grpc/grpc-go/issues/1287. +func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error) { + o, err := processAndValidateOpts(opts) + if err != nil { + return nil, err + } + if o.GRPCConnPool != nil { + return o.GRPCConnPool, nil + } + poolSize := o.GRPCConnPoolSize + o.GRPCConnPoolSize = 0 // we don't *need* to set this to zero, but it's safe to. + + if poolSize == 0 || poolSize == 1 { + // Fast path for common case for a connection pool with a single connection. + conn, err := dial(ctx, false, o) + if err != nil { + return nil, err + } + return &singleConnPool{conn}, nil + } + + pool := &roundRobinConnPool{} + for i := 0; i < poolSize; i++ { + conn, err := dial(ctx, false, o) + if err != nil { + defer pool.Close() // NOTE: error from Close is ignored. + return nil, err + } + pool.conns = append(pool.conns, conn) + } + return pool, nil +} + +func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + var grpcOpts []grpc.DialOption + if insecure { + grpcOpts = []grpc.DialOption{grpc.WithInsecure()} + } else if !o.NoAuth { + if o.APIKey != "" { + log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") + } + creds, err := internal.Creds(ctx, o) + if err != nil { + return nil, err + } + + if o.QuotaProject == "" { + o.QuotaProject = internal.QuotaProjectFromCreds(creds) + } + + // Attempt Direct Path only if: + // * The endpoint is a host:port (or dns:///host:port). + // * Credentials are obtained via GCE metadata server, using the default + // service account. + // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. + // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub + if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { + if !strings.HasPrefix(o.Endpoint, "dns:///") { + o.Endpoint = "dns:///" + o.Endpoint + } + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle( + grpcgoogle.NewComputeEngineCredentials(), + ), + // For now all DirectPath go clients will be using the following lb config, but in future + // when different services need different configs, then we should change this to a + // per-service config. + grpc.WithDisableServiceConfig(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`), + } + // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. + } else { + grpcOpts = []grpc.DialOption{ + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + } + } + + if appengineDialerHook != nil { + // Use the Socket API on App Engine. + // appengine dialer will override socketopt dialer + grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) + } + + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts, o) + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + + // TODO(weiranf): This socketopt dialer will be used by default at some + // point when isDirectPathEnabled will default to true, we guard it by + // the Directpath env var for now once we can introspect user defined + // dialer (https://github.com/grpc/grpc-go/issues/2795). + if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) +} + +func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. +type grpcTokenSource struct { + oauth.TokenSource + + // Additional metadata attached as headers. + quotaProject string + requestReason string +} + +// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. +func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( + map[string]string, error) { + metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) + if err != nil { + return nil, err + } + + // Attach system parameter + if ts.quotaProject != "" { + metadata["X-goog-user-project"] = ts.quotaProject + } + if ts.requestReason != "" { + metadata["X-goog-request-reason"] = ts.requestReason + } + return metadata, nil +} + +func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { + if ts == nil { + return false + } + tok, err := ts.Token() + if err != nil { + return false + } + if tok == nil { + return false + } + if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { + return false + } + return true +} + +func isDirectPathEnabled(endpoint string) bool { + // Only host:port is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + // + // TODO(cbro): once gRPC has introspectible options, check the user hasn't + // provided a custom dialer in gRPC options. + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + // Only try direct path if the user has opted in via the environment variable. + whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") + for _, api := range whitelist { + // Ignore empty string since an empty env variable splits into [""] + if api != "" && strings.Contains(endpoint, api) { + return true + } + } + return false +} + +func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + return &o, nil +} + +type connPoolOption struct{ ConnPool } + +// WithConnPool returns a ClientOption that specifies the ConnPool +// connection to use as the basis of communications. +// +// This is only to be used by Google client libraries internally, for example +// when creating a longrunning API client that shares the same connection pool +// as a service client. +func WithConnPool(p ConnPool) option.ClientOption { + return connPoolOption{p} +} + +func (o connPoolOption) Apply(s *internal.DialSettings) { + s.GRPCConnPool = o.ConnPool +} diff --git a/test/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/test/vendor/google.golang.org/api/transport/grpc/dial_appengine.go new file mode 100644 index 0000000000..2c6aef2264 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -0,0 +1,31 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + "google.golang.org/grpc" +) + +func init() { + // NOTE: dev_appserver doesn't currently support SSL. + // When it does, this code can be removed. + if appengine.IsDevAppServer() { + return + } + + appengineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} diff --git a/test/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/test/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go new file mode 100644 index 0000000000..0e4f388968 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -0,0 +1,49 @@ +// Copyright 2019 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11,linux + +package grpc + +import ( + "context" + "net" + "syscall" + + "golang.org/x/sys/unix" + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/test/vendor/google.golang.org/api/transport/grpc/pool.go b/test/vendor/google.golang.org/api/transport/grpc/pool.go new file mode 100644 index 0000000000..32c02934b7 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/grpc/pool.go @@ -0,0 +1,92 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package grpc + +import ( + "context" + "fmt" + "sync/atomic" + + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool = internal.ConnPool // NOTE(cbro): type alias to export the type. It must live in internal to avoid a circular dependency. + +var _ ConnPool = &roundRobinConnPool{} +var _ ConnPool = &singleConnPool{} + +// singleConnPool is a special case for a single connection. +type singleConnPool struct { + *grpc.ClientConn +} + +func (p *singleConnPool) Conn() *grpc.ClientConn { return p.ClientConn } +func (p *singleConnPool) Num() int { return 1 } + +type roundRobinConnPool struct { + conns []*grpc.ClientConn + + idx uint32 // access via sync/atomic +} + +func (p *roundRobinConnPool) Num() int { + return len(p.conns) +} + +func (p *roundRobinConnPool) Conn() *grpc.ClientConn { + i := atomic.AddUint32(&p.idx, 1) + return p.conns[i%uint32(len(p.conns))] +} + +func (p *roundRobinConnPool) Close() error { + var errs multiError + for _, conn := range p.conns { + if err := conn.Close(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 0 { + return nil + } + return errs +} + +func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error { + return p.Conn().Invoke(ctx, method, args, reply, opts...) +} + +func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return p.Conn().NewStream(ctx, desc, method, opts...) +} + +// multiError represents errors from mulitple conns in the group. +// +// TODO: figure out how and whether this is useful to export. End users should +// not be depending on the transport/grpc package directly, so there might need +// to be some service-specific multi-error type. +type multiError []error + +func (m multiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/test/vendor/google.golang.org/api/transport/http/dial.go b/test/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 0000000000..c2ca3b5f11 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,156 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "errors" + "net/http" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/http/internal/propagation" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + paramTransport := ¶meterTransport{ + base: base, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, + } + var trans http.RoundTripper = paramTransport + trans = addOCTransport(trans, settings) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + if paramTransport.quotaProject == "" { + paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) + } + trans = &oauth2.Transport{ + Base: trans, + Source: creds.TokenSource, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func defaultBaseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} + +func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/test/vendor/google.golang.org/api/transport/http/dial_appengine.go b/test/vendor/google.golang.org/api/transport/http/dial_appengine.go new file mode 100644 index 0000000000..baee9f27af --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -0,0 +1,20 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package http + +import ( + "context" + "net/http" + + "google.golang.org/appengine/urlfetch" +) + +func init() { + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } +} diff --git a/test/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/test/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 0000000000..fb951bb162 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,86 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/test/vendor/google.golang.org/api/transport/not_go19.go b/test/vendor/google.golang.org/api/transport/not_go19.go new file mode 100644 index 0000000000..657bb6b2e9 --- /dev/null +++ b/test/vendor/google.golang.org/api/transport/not_go19.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package transport + +import ( + "context" + + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.DefaultCredentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.DefaultCredentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/test/vendor/google.golang.org/appengine/appengine.go b/test/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 0000000000..8c9697674f --- /dev/null +++ b/test/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,135 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// On App Engine Standard it ensures the server has started and is prepared to +// receive requests. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// IsStandard reports whether the App Engine app is running in the standard +// environment. This includes both the first generation runtimes (<= Go 1.9) +// and the second generation runtimes (>= Go 1.11). +func IsStandard() bool { + return internal.IsStandard() +} + +// IsFlex reports whether the App Engine app is running in the flexible environment. +func IsFlex() bool { + return internal.IsFlex() +} + +// IsAppEngine reports whether the App Engine app is running on App Engine, in either +// the standard or flexible environment. +func IsAppEngine() bool { + return internal.IsAppEngine() +} + +// IsSecondGen reports whether the App Engine app is running on the second generation +// runtimes (>= Go 1.11). +func IsSecondGen() bool { + return internal.IsSecondGen() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return internal.ReqContext(req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/test/vendor/google.golang.org/appengine/appengine_vm.go b/test/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 0000000000..f4b645aad3 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works in App Engine "flexible environment". +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/test/vendor/google.golang.org/appengine/errors.go b/test/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 0000000000..16d0772e2a --- /dev/null +++ b/test/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/test/vendor/google.golang.org/appengine/identity.go b/test/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 0000000000..b8dcf8f361 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/test/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/test/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 0000000000..9a2ff77ab5 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,611 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} +func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} +} + +type AppIdentityServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} +func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} +} +func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) +} +func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) +} +func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) +} +func (m *AppIdentityServiceError) XXX_Size() int { + return xxx_messageInfo_AppIdentityServiceError.Size(m) +} +func (m *AppIdentityServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} +func (*SignForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} +} +func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) +} +func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) +} +func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppRequest.Merge(dst, src) +} +func (m *SignForAppRequest) XXX_Size() int { + return xxx_messageInfo_SignForAppRequest.Size(m) +} +func (m *SignForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} +func (*SignForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} +} +func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) +} +func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) +} +func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppResponse.Merge(dst, src) +} +func (m *SignForAppResponse) XXX_Size() int { + return xxx_messageInfo_SignForAppResponse.Size(m) +} +func (m *SignForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} +func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} +} +func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) +} +func (m *GetPublicCertificateForAppRequest) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) +} +func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} +func (*PublicCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} +} +func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) +} +func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) +} +func (dst *PublicCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicCertificate.Merge(dst, src) +} +func (m *PublicCertificate) XXX_Size() int { + return xxx_messageInfo_PublicCertificate.Size(m) +} +func (m *PublicCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_PublicCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} +func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} +} +func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) +} +func (m *GetPublicCertificateForAppResponse) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) +} +func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} +func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} +} +func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) +} +func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) +} +func (m *GetServiceAccountNameRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) +} +func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} +func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} +} +func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) +} +func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) +} +func (m *GetServiceAccountNameResponse) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) +} +func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} +func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} +} +func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) +} +func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) +} +func (m *GetAccessTokenRequest) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenRequest.Size(m) +} +func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} +func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} +} +func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) +} +func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) +} +func (m *GetAccessTokenResponse) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenResponse.Size(m) +} +func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} +func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} +func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { + proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") + proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") + proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") + proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") + proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") + proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") + proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") + proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") + proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") + proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") + proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") + proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) +} + +var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ + // 676 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, + 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, + 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, + 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, + 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, + 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, + 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, + 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, + 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, + 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, + 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, + 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, + 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, + 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, + 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, + 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, + 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, + 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, + 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, + 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, + 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, + 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, + 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, + 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, + 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, + 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, + 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, + 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, + 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, + 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, + 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, + 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, + 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, + 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, + 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, + 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, + 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, + 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, + 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, + 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, + 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, + 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, + 0xf3, 0x04, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/test/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 0000000000..ddfc0c04a1 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,786 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/modules/modules_service.proto + +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} +func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} +} + +type ModulesServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} +func (*ModulesServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} +} +func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) +} +func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) +} +func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModulesServiceError.Merge(dst, src) +} +func (m *ModulesServiceError) XXX_Size() int { + return xxx_messageInfo_ModulesServiceError.Size(m) +} +func (m *ModulesServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo + +type GetModulesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} +func (*GetModulesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} +} +func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) +} +func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) +} +func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesRequest.Merge(dst, src) +} +func (m *GetModulesRequest) XXX_Size() int { + return xxx_messageInfo_GetModulesRequest.Size(m) +} +func (m *GetModulesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} +func (*GetModulesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} +} +func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) +} +func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) +} +func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesResponse.Merge(dst, src) +} +func (m *GetModulesResponse) XXX_Size() int { + return xxx_messageInfo_GetModulesResponse.Size(m) +} +func (m *GetModulesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} +func (*GetVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} +} +func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) +} +func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsRequest.Merge(dst, src) +} +func (m *GetVersionsRequest) XXX_Size() int { + return xxx_messageInfo_GetVersionsRequest.Size(m) +} +func (m *GetVersionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} +func (*GetVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} +} +func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) +} +func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) +} +func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsResponse.Merge(dst, src) +} +func (m *GetVersionsResponse) XXX_Size() int { + return xxx_messageInfo_GetVersionsResponse.Size(m) +} +func (m *GetVersionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} +func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} +} +func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) +} +func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) +} +func (m *GetDefaultVersionRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionRequest.Size(m) +} +func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} +func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} +} +func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) +} +func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) +} +func (m *GetDefaultVersionResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionResponse.Size(m) +} +func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} +func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} +} +func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) +} +func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) +} +func (m *GetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesRequest.Size(m) +} +func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} +func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} +} +func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) +} +func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) +} +func (m *GetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesResponse.Size(m) +} +func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} +func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} +} +func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) +} +func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) +} +func (m *SetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesRequest.Size(m) +} +func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} +func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} +} +func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) +} +func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) +} +func (m *SetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesResponse.Size(m) +} +func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} +func (*StartModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} +} +func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) +} +func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleRequest.Merge(dst, src) +} +func (m *StartModuleRequest) XXX_Size() int { + return xxx_messageInfo_StartModuleRequest.Size(m) +} +func (m *StartModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} +func (*StartModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} +} +func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) +} +func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleResponse.Merge(dst, src) +} +func (m *StartModuleResponse) XXX_Size() int { + return xxx_messageInfo_StartModuleResponse.Size(m) +} +func (m *StartModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} +func (*StopModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} +} +func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) +} +func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleRequest.Merge(dst, src) +} +func (m *StopModuleRequest) XXX_Size() int { + return xxx_messageInfo_StopModuleRequest.Size(m) +} +func (m *StopModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} +func (*StopModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} +} +func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) +} +func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleResponse.Merge(dst, src) +} +func (m *StopModuleResponse) XXX_Size() int { + return xxx_messageInfo_StopModuleResponse.Size(m) +} +func (m *StopModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} +func (*GetHostnameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} +} +func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) +} +func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) +} +func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameRequest.Merge(dst, src) +} +func (m *GetHostnameRequest) XXX_Size() int { + return xxx_messageInfo_GetHostnameRequest.Size(m) +} +func (m *GetHostnameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} +func (*GetHostnameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} +} +func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) +} +func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) +} +func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameResponse.Merge(dst, src) +} +func (m *GetHostnameResponse) XXX_Size() int { + return xxx_messageInfo_GetHostnameResponse.Size(m) +} +func (m *GetHostnameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { + proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") + proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") + proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") + proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") + proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") + proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") + proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") + proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") + proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") + proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") + proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") + proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") + proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") + proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") + proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") + proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") + proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) +} + +var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, + 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, + 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, + 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, + 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, + 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, + 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, + 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, + 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, + 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, + 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, + 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, + 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, + 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, + 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, + 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, + 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, + 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, + 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, + 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, + 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, + 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, + 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, + 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, + 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, + 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, + 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, + 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/test/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 0000000000..4ec872e460 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,2822 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/socket/socket_service.proto + +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} +func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} +func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} +func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} +func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} +func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} +func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} +func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} +func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} +func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} +func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} +func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} +} +func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) +} +func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) +} +func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) +} +func (m *RemoteSocketServiceError) XXX_Size() int { + return xxx_messageInfo_RemoteSocketServiceError.Size(m) +} +func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} +func (*AddressPort) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} +} +func (m *AddressPort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddressPort.Unmarshal(m, b) +} +func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) +} +func (dst *AddressPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressPort.Merge(dst, src) +} +func (m *AddressPort) XXX_Size() int { + return xxx_messageInfo_AddressPort.Size(m) +} +func (m *AddressPort) XXX_DiscardUnknown() { + xxx_messageInfo_AddressPort.DiscardUnknown(m) +} + +var xxx_messageInfo_AddressPort proto.InternalMessageInfo + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} +func (*CreateSocketRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} +} +func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) +} +func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketRequest.Merge(dst, src) +} +func (m *CreateSocketRequest) XXX_Size() int { + return xxx_messageInfo_CreateSocketRequest.Size(m) +} +func (m *CreateSocketRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} +func (*CreateSocketReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} +} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) +} +func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) +} +func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketReply.Merge(dst, src) +} +func (m *CreateSocketReply) XXX_Size() int { + return xxx_messageInfo_CreateSocketReply.Size(m) +} +func (m *CreateSocketReply) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} +func (*BindRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} +} +func (m *BindRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindRequest.Unmarshal(m, b) +} +func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) +} +func (dst *BindRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindRequest.Merge(dst, src) +} +func (m *BindRequest) XXX_Size() int { + return xxx_messageInfo_BindRequest.Size(m) +} +func (m *BindRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BindRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BindRequest proto.InternalMessageInfo + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} +func (*BindReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} +} +func (m *BindReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindReply.Unmarshal(m, b) +} +func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) +} +func (dst *BindReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindReply.Merge(dst, src) +} +func (m *BindReply) XXX_Size() int { + return xxx_messageInfo_BindReply.Size(m) +} +func (m *BindReply) XXX_DiscardUnknown() { + xxx_messageInfo_BindReply.DiscardUnknown(m) +} + +var xxx_messageInfo_BindReply proto.InternalMessageInfo + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} +func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} +} +func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) +} +func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) +} +func (m *GetSocketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketNameRequest.Size(m) +} +func (m *GetSocketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} +func (*GetSocketNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} +} +func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) +} +func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameReply.Merge(dst, src) +} +func (m *GetSocketNameReply) XXX_Size() int { + return xxx_messageInfo_GetSocketNameReply.Size(m) +} +func (m *GetSocketNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} +func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} +} +func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) +} +func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) +} +func (m *GetPeerNameRequest) XXX_Size() int { + return xxx_messageInfo_GetPeerNameRequest.Size(m) +} +func (m *GetPeerNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} +func (*GetPeerNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} +} +func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) +} +func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameReply.Merge(dst, src) +} +func (m *GetPeerNameReply) XXX_Size() int { + return xxx_messageInfo_GetPeerNameReply.Size(m) +} +func (m *GetPeerNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} +func (*SocketOption) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} +} +func (m *SocketOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SocketOption.Unmarshal(m, b) +} +func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) +} +func (dst *SocketOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_SocketOption.Merge(dst, src) +} +func (m *SocketOption) XXX_Size() int { + return xxx_messageInfo_SocketOption.Size(m) +} +func (m *SocketOption) XXX_DiscardUnknown() { + xxx_messageInfo_SocketOption.DiscardUnknown(m) +} + +var xxx_messageInfo_SocketOption proto.InternalMessageInfo + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} +func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} +} +func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) +} +func (m *SetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsRequest.Size(m) +} +func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} +func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} +} +func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) +} +func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) +} +func (m *SetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsReply.Size(m) +} +func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} +func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} +} +func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) +} +func (m *GetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsRequest.Size(m) +} +func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} +func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} +} +func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) +} +func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) +} +func (m *GetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsReply.Size(m) +} +func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} +} +func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) +} +func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) +} +func (dst *ConnectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectRequest.Merge(dst, src) +} +func (m *ConnectRequest) XXX_Size() int { + return xxx_messageInfo_ConnectRequest.Size(m) +} +func (m *ConnectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} +func (*ConnectReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} +} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectReply.Unmarshal(m, b) +} +func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) +} +func (dst *ConnectReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectReply.Merge(dst, src) +} +func (m *ConnectReply) XXX_Size() int { + return xxx_messageInfo_ConnectReply.Size(m) +} +func (m *ConnectReply) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectReply proto.InternalMessageInfo + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} +func (*ListenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} +} +func (m *ListenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenRequest.Unmarshal(m, b) +} +func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) +} +func (dst *ListenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenRequest.Merge(dst, src) +} +func (m *ListenRequest) XXX_Size() int { + return xxx_messageInfo_ListenRequest.Size(m) +} +func (m *ListenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenRequest proto.InternalMessageInfo + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} +func (*ListenReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} +} +func (m *ListenReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenReply.Unmarshal(m, b) +} +func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) +} +func (dst *ListenReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenReply.Merge(dst, src) +} +func (m *ListenReply) XXX_Size() int { + return xxx_messageInfo_ListenReply.Size(m) +} +func (m *ListenReply) XXX_DiscardUnknown() { + xxx_messageInfo_ListenReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenReply proto.InternalMessageInfo + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} +func (*AcceptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} +} +func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) +} +func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) +} +func (dst *AcceptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptRequest.Merge(dst, src) +} +func (m *AcceptRequest) XXX_Size() int { + return xxx_messageInfo_AcceptRequest.Size(m) +} +func (m *AcceptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} +func (*AcceptReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} +} +func (m *AcceptReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptReply.Unmarshal(m, b) +} +func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) +} +func (dst *AcceptReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptReply.Merge(dst, src) +} +func (m *AcceptReply) XXX_Size() int { + return xxx_messageInfo_AcceptReply.Size(m) +} +func (m *AcceptReply) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptReply.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptReply proto.InternalMessageInfo + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} +func (*ShutDownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} +} +func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) +} +func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) +} +func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownRequest.Merge(dst, src) +} +func (m *ShutDownRequest) XXX_Size() int { + return xxx_messageInfo_ShutDownRequest.Size(m) +} +func (m *ShutDownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} +func (*ShutDownReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} +} +func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) +} +func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) +} +func (dst *ShutDownReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownReply.Merge(dst, src) +} +func (m *ShutDownReply) XXX_Size() int { + return xxx_messageInfo_ShutDownReply.Size(m) +} +func (m *ShutDownReply) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} +} +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (dst *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(dst, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} +func (*CloseReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} +} +func (m *CloseReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseReply.Unmarshal(m, b) +} +func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) +} +func (dst *CloseReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseReply.Merge(dst, src) +} +func (m *CloseReply) XXX_Size() int { + return xxx_messageInfo_CloseReply.Size(m) +} +func (m *CloseReply) XXX_DiscardUnknown() { + xxx_messageInfo_CloseReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseReply proto.InternalMessageInfo + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} +func (*SendRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} +} +func (m *SendRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendRequest.Unmarshal(m, b) +} +func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) +} +func (dst *SendRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendRequest.Merge(dst, src) +} +func (m *SendRequest) XXX_Size() int { + return xxx_messageInfo_SendRequest.Size(m) +} +func (m *SendRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendRequest proto.InternalMessageInfo + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} +func (*SendReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} +} +func (m *SendReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendReply.Unmarshal(m, b) +} +func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) +} +func (dst *SendReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendReply.Merge(dst, src) +} +func (m *SendReply) XXX_Size() int { + return xxx_messageInfo_SendReply.Size(m) +} +func (m *SendReply) XXX_DiscardUnknown() { + xxx_messageInfo_SendReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SendReply proto.InternalMessageInfo + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} +func (*ReceiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} +} +func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) +} +func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) +} +func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveRequest.Merge(dst, src) +} +func (m *ReceiveRequest) XXX_Size() int { + return xxx_messageInfo_ReceiveRequest.Size(m) +} +func (m *ReceiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} +func (*ReceiveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} +} +func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) +} +func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) +} +func (dst *ReceiveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveReply.Merge(dst, src) +} +func (m *ReceiveReply) XXX_Size() int { + return xxx_messageInfo_ReceiveReply.Size(m) +} +func (m *ReceiveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} +func (*PollEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} +} +func (m *PollEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollEvent.Unmarshal(m, b) +} +func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) +} +func (dst *PollEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollEvent.Merge(dst, src) +} +func (m *PollEvent) XXX_Size() int { + return xxx_messageInfo_PollEvent.Size(m) +} +func (m *PollEvent) XXX_DiscardUnknown() { + xxx_messageInfo_PollEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_PollEvent proto.InternalMessageInfo + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} +func (*PollRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} +} +func (m *PollRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollRequest.Unmarshal(m, b) +} +func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) +} +func (dst *PollRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollRequest.Merge(dst, src) +} +func (m *PollRequest) XXX_Size() int { + return xxx_messageInfo_PollRequest.Size(m) +} +func (m *PollRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PollRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PollRequest proto.InternalMessageInfo + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} +func (*PollReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} +} +func (m *PollReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollReply.Unmarshal(m, b) +} +func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) +} +func (dst *PollReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollReply.Merge(dst, src) +} +func (m *PollReply) XXX_Size() int { + return xxx_messageInfo_PollReply.Size(m) +} +func (m *PollReply) XXX_DiscardUnknown() { + xxx_messageInfo_PollReply.DiscardUnknown(m) +} + +var xxx_messageInfo_PollReply proto.InternalMessageInfo + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} +func (*ResolveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} +} +func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) +} +func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) +} +func (dst *ResolveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveRequest.Merge(dst, src) +} +func (m *ResolveRequest) XXX_Size() int { + return xxx_messageInfo_ResolveRequest.Size(m) +} +func (m *ResolveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} +func (*ResolveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} +} +func (m *ResolveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveReply.Unmarshal(m, b) +} +func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) +} +func (dst *ResolveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveReply.Merge(dst, src) +} +func (m *ResolveReply) XXX_Size() int { + return xxx_messageInfo_ResolveReply.Size(m) +} +func (m *ResolveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveReply proto.InternalMessageInfo + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { + proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") + proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") + proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") + proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") + proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") + proto.RegisterType((*BindReply)(nil), "appengine.BindReply") + proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") + proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") + proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") + proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") + proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") + proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") + proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") + proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") + proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") + proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") + proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") + proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") + proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") + proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") + proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") + proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") + proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") + proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") + proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") + proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") + proto.RegisterType((*SendReply)(nil), "appengine.SendReply") + proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") + proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") + proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") + proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") + proto.RegisterType((*PollReply)(nil), "appengine.PollReply") + proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") + proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) +} + +var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ + // 3088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, + 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, + 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, + 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, + 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, + 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, + 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, + 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, + 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, + 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, + 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, + 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, + 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, + 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, + 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, + 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, + 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, + 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, + 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, + 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, + 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, + 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, + 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, + 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, + 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, + 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, + 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, + 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, + 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, + 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, + 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, + 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, + 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, + 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, + 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, + 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, + 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, + 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, + 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, + 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, + 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, + 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, + 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, + 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, + 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, + 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, + 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, + 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, + 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, + 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, + 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, + 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, + 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, + 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, + 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, + 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, + 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, + 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, + 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, + 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, + 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, + 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, + 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, + 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, + 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, + 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, + 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, + 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, + 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, + 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, + 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, + 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, + 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, + 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, + 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, + 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, + 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, + 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, + 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, + 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, + 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, + 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, + 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, + 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, + 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, + 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, + 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, + 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, + 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, + 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, + 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, + 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, + 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, + 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, + 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, + 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, + 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, + 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, + 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, + 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, + 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, + 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, + 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, + 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, + 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, + 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, + 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, + 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, + 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, + 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, + 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, + 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, + 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, + 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, + 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, + 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, + 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, + 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, + 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, + 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, + 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, + 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, + 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, + 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, + 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, + 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, + 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, + 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, + 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, + 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, + 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, + 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, + 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, + 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, + 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, + 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, + 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, + 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, + 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, + 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, + 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, + 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, + 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, + 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, + 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, + 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, + 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, + 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, + 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, + 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, + 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, + 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, + 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, + 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, + 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, + 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, + 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, + 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, + 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, + 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, + 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, + 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, + 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, + 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, + 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, + 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, + 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, + 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, + 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, + 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, + 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, + 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, + 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, + 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, + 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, + 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, + 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, + 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, + 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, + 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, + 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, + 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, + 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, + 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, + 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, + 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, + 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, + 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, + 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, + 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, + 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, + 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, + 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/appengine/namespace.go b/test/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 0000000000..21860ca082 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/test/vendor/google.golang.org/appengine/socket/doc.go b/test/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 0000000000..3de46df826 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/test/vendor/google.golang.org/appengine/socket/socket_classic.go b/test/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 0000000000..0ad50e2d36 --- /dev/null +++ b/test/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/test/vendor/google.golang.org/appengine/socket/socket_vm.go b/test/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 0000000000..c804169a1c --- /dev/null +++ b/test/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/test/vendor/google.golang.org/appengine/timeout.go b/test/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 0000000000..05642a992a --- /dev/null +++ b/test/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/test/vendor/google.golang.org/genproto/LICENSE b/test/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 0000000000..bf2f703fff --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +package annotations + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor_c591c5aa9fb79aab) } + +var fileDescriptor_c591c5aa9fb79aab = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go new file mode 100644 index 0000000000..867fc0c3fa --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -0,0 +1,79 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/client.proto + +package annotations + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +var E_MethodSignature = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", +} + +var E_DefaultHost = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", +} + +var E_OauthScopes = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", +} + +func init() { + proto.RegisterExtension(E_MethodSignature) + proto.RegisterExtension(E_DefaultHost) + proto.RegisterExtension(E_OauthScopes) +} + +func init() { proto.RegisterFile("google/api/client.proto", fileDescriptor_78f2c6f7c3a942c1) } + +var fileDescriptor_78f2c6f7c3a942c1 = []byte{ + // 262 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x3f, 0x4f, 0xc3, 0x30, + 0x10, 0xc5, 0x55, 0x40, 0xa8, 0x75, 0x11, 0xa0, 0x2c, 0x20, 0x06, 0xc8, 0xd8, 0xc9, 0x1e, 0xd8, + 0xca, 0xd4, 0x76, 0xe0, 0x8f, 0x84, 0x88, 0x9a, 0x8d, 0x25, 0x72, 0x9d, 0xab, 0x63, 0x29, 0xf5, + 0x59, 0xf6, 0x85, 0xef, 0x02, 0x6c, 0x7c, 0x52, 0x54, 0xc7, 0x11, 0x48, 0x0c, 0x6c, 0x27, 0xbd, + 0xf7, 0xfb, 0x9d, 0xf4, 0xd8, 0x85, 0x46, 0xd4, 0x2d, 0x08, 0xe9, 0x8c, 0x50, 0xad, 0x01, 0x4b, + 0xdc, 0x79, 0x24, 0xcc, 0x58, 0x1f, 0x70, 0xe9, 0xcc, 0x55, 0x9e, 0x4a, 0x31, 0xd9, 0x74, 0x5b, + 0x51, 0x43, 0x50, 0xde, 0x38, 0x42, 0xdf, 0xb7, 0xe7, 0x4f, 0xec, 0x7c, 0x07, 0xd4, 0x60, 0x5d, + 0x05, 0xa3, 0xad, 0xa4, 0xce, 0x43, 0x76, 0xcd, 0x93, 0x62, 0xc0, 0xf8, 0x73, 0xac, 0xbc, 0x38, + 0x32, 0x68, 0xc3, 0xe5, 0xe7, 0x38, 0x3f, 0x9c, 0x4d, 0xd6, 0x67, 0x3d, 0x58, 0x0e, 0xdc, 0x7c, + 0xc5, 0x4e, 0x6a, 0xd8, 0xca, 0xae, 0xa5, 0xaa, 0xc1, 0x40, 0xd9, 0xcd, 0x1f, 0x4f, 0x09, 0xfe, + 0xcd, 0x28, 0x18, 0x44, 0xef, 0xe3, 0x7c, 0x34, 0x9b, 0xac, 0xa7, 0x89, 0x7a, 0xc0, 0x40, 0x7b, + 0x09, 0xca, 0x8e, 0x9a, 0x2a, 0x28, 0x74, 0x10, 0xfe, 0x97, 0x7c, 0x24, 0x49, 0xa4, 0xca, 0x08, + 0x2d, 0x0d, 0x3b, 0x55, 0xb8, 0xe3, 0x3f, 0x4b, 0x2c, 0xa7, 0xab, 0xb8, 0x51, 0xb1, 0x97, 0x14, + 0xa3, 0xd7, 0x45, 0x8a, 0x34, 0xb6, 0xd2, 0x6a, 0x8e, 0x5e, 0x0b, 0x0d, 0x36, 0xbe, 0x10, 0x7d, + 0x24, 0x9d, 0x09, 0x71, 0x5c, 0x69, 0x2d, 0x92, 0x8c, 0xbf, 0xee, 0x7e, 0xdd, 0x5f, 0x07, 0x47, + 0xf7, 0x8b, 0xe2, 0x71, 0x73, 0x1c, 0xa1, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xc2, + 0xcf, 0x71, 0x90, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go new file mode 100644 index 0000000000..31f87dd00d --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/field_behavior.proto + +package annotations + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + FieldBehavior_OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + FieldBehavior_REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + FieldBehavior_OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + FieldBehavior_INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + FieldBehavior_IMMUTABLE FieldBehavior = 5 +) + +var FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", +} + +var FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, +} + +func (x FieldBehavior) String() string { + return proto.EnumName(FieldBehavior_name, int32(x)) +} + +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4648f18fd5079967, []int{0} +} + +var E_FieldBehavior = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", +} + +func init() { + proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value) + proto.RegisterExtension(E_FieldBehavior) +} + +func init() { proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_4648f18fd5079967) } + +var fileDescriptor_4648f18fd5079967 = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4f, 0xb3, 0x30, + 0x1c, 0xc7, 0x9f, 0xfd, 0x79, 0xcc, 0xac, 0x0e, 0x49, 0x4f, 0xba, 0x44, 0xdd, 0xd1, 0x78, 0x28, + 0x89, 0xde, 0xf4, 0x04, 0xae, 0xd3, 0x26, 0x8c, 0x56, 0x04, 0x13, 0xbd, 0x60, 0xb7, 0xb1, 0xda, + 0x64, 0xd2, 0x06, 0xd0, 0x8b, 0x6f, 0xc5, 0x93, 0xaf, 0xd4, 0xd0, 0x31, 0x85, 0x5b, 0xbf, 0xf9, + 0x7d, 0xfa, 0xeb, 0xe7, 0x5b, 0x70, 0x2a, 0x94, 0x12, 0xeb, 0xd4, 0xe1, 0x5a, 0x3a, 0x2b, 0x99, + 0xae, 0x97, 0xc9, 0x3c, 0x7d, 0xe5, 0x1f, 0x52, 0xe5, 0x48, 0xe7, 0xaa, 0x54, 0x10, 0x6c, 0x00, + 0xc4, 0xb5, 0x1c, 0x8d, 0x6b, 0xd8, 0x4c, 0xe6, 0xef, 0x2b, 0x67, 0x99, 0x16, 0x8b, 0x5c, 0xea, + 0x72, 0x4b, 0x9f, 0x7f, 0x82, 0xe1, 0xb4, 0xda, 0xe2, 0xd5, 0x4b, 0xe0, 0x09, 0x18, 0x4d, 0x09, + 0xf6, 0x27, 0x89, 0x87, 0xef, 0xdc, 0x47, 0x42, 0xc3, 0x24, 0x0e, 0x1e, 0x18, 0xbe, 0x21, 0x53, + 0x82, 0x27, 0xf6, 0x3f, 0xb8, 0x0f, 0x06, 0x94, 0x45, 0x84, 0x06, 0xae, 0x6f, 0x77, 0xaa, 0x14, + 0xe2, 0xfb, 0x98, 0x84, 0x78, 0x62, 0x77, 0xe1, 0x01, 0xd8, 0xa3, 0x71, 0xc4, 0xe2, 0x28, 0xa1, + 0x81, 0xff, 0x64, 0xf7, 0xa0, 0x05, 0x00, 0x09, 0x7e, 0x73, 0x1f, 0x0e, 0xc1, 0x2e, 0x99, 0xcd, + 0xe2, 0xc8, 0xf5, 0x7c, 0x6c, 0xff, 0xbf, 0x7a, 0x01, 0x56, 0xbb, 0x02, 0x3c, 0x46, 0xb5, 0xfd, + 0xd6, 0x18, 0x19, 0x3b, 0xaa, 0x4b, 0xa9, 0xb2, 0xe2, 0xf0, 0x6b, 0x30, 0xee, 0x9d, 0x59, 0x17, + 0x47, 0xe8, 0xaf, 0x23, 0x6a, 0xe9, 0x87, 0xc3, 0x55, 0x33, 0x7a, 0x1a, 0x58, 0x0b, 0xf5, 0xd6, + 0xc0, 0x3d, 0xd8, 0xe2, 0x59, 0xf5, 0x0c, 0xeb, 0x3c, 0xbb, 0x35, 0x21, 0xd4, 0x9a, 0x67, 0x02, + 0xa9, 0x5c, 0x38, 0x22, 0xcd, 0x8c, 0x84, 0xb3, 0x19, 0x71, 0x2d, 0x0b, 0xf3, 0xe9, 0x3c, 0xcb, + 0x54, 0xc9, 0x8d, 0xcf, 0x75, 0xe3, 0xfc, 0xdd, 0xed, 0xdf, 0xba, 0x8c, 0xcc, 0x77, 0xcc, 0xa5, + 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x94, 0x57, 0x94, 0xa8, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 0000000000..a63870374d --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{0} +} + +func (m *Http) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Http.Unmarshal(m, b) +} +func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Http.Marshal(b, m, deterministic) +} +func (m *Http) XXX_Merge(src proto.Message) { + xxx_messageInfo_Http.Merge(m, src) +} +func (m *Http) XXX_Size() int { + return xxx_messageInfo_Http.Size(m) +} +func (m *Http) XXX_DiscardUnknown() { + xxx_messageInfo_Http.DiscardUnknown(m) +} + +var xxx_messageInfo_Http proto.InternalMessageInfo + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Http) GetFullyDecodeReservedExpansion() bool { + if m != nil { + return m.FullyDecodeReservedExpansion + } + return false +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +type HttpRule struct { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{1} +} + +func (m *HttpRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpRule.Unmarshal(m, b) +} +func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) +} +func (m *HttpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpRule.Merge(m, src) +} +func (m *HttpRule) XXX_Size() int { + return xxx_messageInfo_HttpRule.Size(m) +} +func (m *HttpRule) XXX_DiscardUnknown() { + xxx_messageInfo_HttpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpRule proto.InternalMessageInfo + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetResponseBody() string { + if m != nil { + return m.ResponseBody + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HttpRule) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{2} +} + +func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) +} +func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) +} +func (m *CustomHttpPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomHttpPattern.Merge(m, src) +} +func (m *CustomHttpPattern) XXX_Size() int { + return xxx_messageInfo_CustomHttpPattern.Size(m) +} +func (m *CustomHttpPattern) XXX_DiscardUnknown() { + xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_ff9994be407cdcc9) } + +var fileDescriptor_ff9994be407cdcc9 = []byte{ + // 419 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, + 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, + 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, + 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, + 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, + 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, + 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, + 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, + 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, + 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, + 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, + 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, + 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, + 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, + 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, + 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, + 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, + 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, + 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, + 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, + 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, + 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, + 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, + 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, + 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, + 0x02, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go new file mode 100644 index 0000000000..6aea4d701f --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -0,0 +1,441 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/resource.proto + +package annotations + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A description of the historical or future-looking state of the +// resource pattern. +type ResourceDescriptor_History int32 + +const ( + // The "unset" value. + ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0 + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1 + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 +) + +var ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", +} + +var ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, +} + +func (x ResourceDescriptor_History) String() string { + return proto.EnumName(ResourceDescriptor_History_name, int32(x)) +} + +func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_465e9122405d1bb5, []int{0, 0} +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// name_descriptor: { +// pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// name_descriptor: +// - pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// name_descriptor: { +// pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// name_descriptor: { +// pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// } +// name_descriptor: { +// pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// } +// name_descriptor: { +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// name_descriptor: +// - pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// - pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// - pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// - pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// +// For flexible resources, the resource name doesn't contain parent names, but +// the resource itself has parents for policy evaluation. +// +// Example: +// +// message Shelf { +// option (google.api.resource) = { +// type: "library.googleapis.com/Shelf" +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// } +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'library.googleapis.com/Shelf' +// name_descriptor: +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +type ResourceDescriptor struct { + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"` + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"` + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name, such as 'projects' for + // the name of 'projects/{project}'. It is the same concept of the `plural` + // field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceDescriptor) Reset() { *m = ResourceDescriptor{} } +func (m *ResourceDescriptor) String() string { return proto.CompactTextString(m) } +func (*ResourceDescriptor) ProtoMessage() {} +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_465e9122405d1bb5, []int{0} +} + +func (m *ResourceDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceDescriptor.Unmarshal(m, b) +} +func (m *ResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceDescriptor.Marshal(b, m, deterministic) +} +func (m *ResourceDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceDescriptor.Merge(m, src) +} +func (m *ResourceDescriptor) XXX_Size() int { + return xxx_messageInfo_ResourceDescriptor.Size(m) +} +func (m *ResourceDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceDescriptor proto.InternalMessageInfo + +func (m *ResourceDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ResourceDescriptor) GetPattern() []string { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *ResourceDescriptor) GetNameField() string { + if m != nil { + return m.NameField + } + return "" +} + +func (m *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if m != nil { + return m.History + } + return ResourceDescriptor_HISTORY_UNSPECIFIED +} + +func (m *ResourceDescriptor) GetPlural() string { + if m != nil { + return m.Plural + } + return "" +} + +func (m *ResourceDescriptor) GetSingular() string { + if m != nil { + return m.Singular + } + return "" +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +type ResourceReference struct { + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceReference) Reset() { *m = ResourceReference{} } +func (m *ResourceReference) String() string { return proto.CompactTextString(m) } +func (*ResourceReference) ProtoMessage() {} +func (*ResourceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_465e9122405d1bb5, []int{1} +} + +func (m *ResourceReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceReference.Unmarshal(m, b) +} +func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic) +} +func (m *ResourceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceReference.Merge(m, src) +} +func (m *ResourceReference) XXX_Size() int { + return xxx_messageInfo_ResourceReference.Size(m) +} +func (m *ResourceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceReference proto.InternalMessageInfo + +func (m *ResourceReference) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ResourceReference) GetChildType() string { + if m != nil { + return m.ChildType + } + return "" +} + +var E_ResourceReference = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", +} + +var E_ResourceDefinition = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", +} + +var E_Resource = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", +} + +func init() { + proto.RegisterEnum("google.api.ResourceDescriptor_History", ResourceDescriptor_History_name, ResourceDescriptor_History_value) + proto.RegisterType((*ResourceDescriptor)(nil), "google.api.ResourceDescriptor") + proto.RegisterType((*ResourceReference)(nil), "google.api.ResourceReference") + proto.RegisterExtension(E_ResourceReference) + proto.RegisterExtension(E_ResourceDefinition) + proto.RegisterExtension(E_Resource) +} + +func init() { proto.RegisterFile("google/api/resource.proto", fileDescriptor_465e9122405d1bb5) } + +var fileDescriptor_465e9122405d1bb5 = []byte{ + // 490 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xfd, 0x9c, 0xe4, 0xcb, 0xcf, 0xad, 0xa8, 0xda, 0x29, 0x02, 0xb7, 0x22, 0x60, 0x65, 0x81, + 0xb2, 0xb2, 0xa5, 0xb0, 0x0b, 0x1b, 0x52, 0xe2, 0xa4, 0x96, 0xd2, 0xc4, 0x9a, 0x38, 0x8b, 0x02, + 0x92, 0x35, 0x75, 0x26, 0xee, 0x48, 0xee, 0xcc, 0x68, 0xec, 0x2c, 0xf2, 0x30, 0x08, 0x89, 0x67, + 0xe0, 0xe1, 0x58, 0xa2, 0x8c, 0x7f, 0x88, 0x68, 0x84, 0xd8, 0xcd, 0xbd, 0xe7, 0xde, 0x73, 0x8e, + 0xcf, 0x95, 0xe1, 0x32, 0x16, 0x22, 0x4e, 0xa8, 0x43, 0x24, 0x73, 0x14, 0x4d, 0xc5, 0x56, 0x45, + 0xd4, 0x96, 0x4a, 0x64, 0x02, 0x41, 0x0e, 0xd9, 0x44, 0xb2, 0x2b, 0xab, 0x18, 0xd3, 0xc8, 0xfd, + 0x76, 0xe3, 0xac, 0x69, 0x1a, 0x29, 0x26, 0x33, 0xa1, 0xf2, 0xe9, 0xde, 0x8f, 0x1a, 0x20, 0x5c, + 0x10, 0x8c, 0x2b, 0x10, 0x21, 0x68, 0x64, 0x3b, 0x49, 0x4d, 0xc3, 0x32, 0xfa, 0x1d, 0xac, 0xdf, + 0xc8, 0x84, 0x96, 0x24, 0x59, 0x46, 0x15, 0x37, 0x6b, 0x56, 0xbd, 0xdf, 0xc1, 0x65, 0x89, 0xba, + 0x00, 0x9c, 0x3c, 0xd2, 0x70, 0xc3, 0x68, 0xb2, 0x36, 0xeb, 0x7a, 0xa7, 0xb3, 0xef, 0x4c, 0xf6, + 0x0d, 0xf4, 0x01, 0x5a, 0x0f, 0x2c, 0xcd, 0x84, 0xda, 0x99, 0x0d, 0xcb, 0xe8, 0x9f, 0x0e, 0xde, + 0xda, 0xbf, 0x3d, 0xda, 0x4f, 0xd5, 0xed, 0x9b, 0x7c, 0x1a, 0x97, 0x6b, 0xe8, 0x05, 0x34, 0x65, + 0xb2, 0x55, 0x24, 0x31, 0xff, 0xd7, 0xe4, 0x45, 0x85, 0xae, 0xa0, 0x9d, 0x32, 0x1e, 0x6f, 0x13, + 0xa2, 0xcc, 0xa6, 0x46, 0xaa, 0xba, 0xf7, 0x19, 0x5a, 0x05, 0x0f, 0x7a, 0x09, 0x17, 0x37, 0xde, + 0x32, 0x58, 0xe0, 0xbb, 0x70, 0x35, 0x5f, 0xfa, 0xee, 0x47, 0x6f, 0xe2, 0xb9, 0xe3, 0xb3, 0xff, + 0x50, 0x17, 0x2e, 0x17, 0xd8, 0x9b, 0x7a, 0xf3, 0xd1, 0x6c, 0x76, 0x17, 0x2e, 0xbd, 0xf9, 0x74, + 0xe6, 0x86, 0xfe, 0x28, 0x08, 0x5c, 0x3c, 0x3f, 0x33, 0x90, 0x09, 0xcf, 0x27, 0xab, 0x60, 0x85, + 0xdd, 0xf0, 0x76, 0x35, 0x0b, 0xbc, 0x0a, 0xa9, 0xf5, 0x26, 0x70, 0x5e, 0xfa, 0xc6, 0x74, 0x43, + 0x15, 0xe5, 0x11, 0x3d, 0x1a, 0x5a, 0x17, 0x20, 0x7a, 0x60, 0xc9, 0x3a, 0xd4, 0x48, 0x2d, 0x8f, + 0x46, 0x77, 0x82, 0x9d, 0xa4, 0xc3, 0x04, 0x50, 0x79, 0xbe, 0x50, 0x55, 0x44, 0xdd, 0x32, 0x9f, + 0xf2, 0x6e, 0xb6, 0x0e, 0x72, 0x21, 0x33, 0x26, 0x78, 0x6a, 0x7e, 0x6b, 0x5b, 0x46, 0xff, 0x64, + 0xd0, 0x3d, 0x96, 0x62, 0xe5, 0x06, 0x9f, 0xab, 0x3f, 0x5b, 0x43, 0x0e, 0x17, 0x95, 0xda, 0x9a, + 0x6e, 0x18, 0x67, 0x7b, 0x42, 0xf4, 0xea, 0x88, 0x5c, 0x42, 0x4b, 0xb5, 0xaf, 0x6d, 0xab, 0xde, + 0x3f, 0x19, 0xbc, 0xfe, 0xfb, 0xcd, 0x70, 0xf5, 0x1d, 0xe3, 0x8a, 0x78, 0xf8, 0x05, 0xda, 0x65, + 0x17, 0xbd, 0x79, 0x22, 0x72, 0x4b, 0xd3, 0x94, 0xc4, 0x87, 0x3a, 0xc6, 0x3f, 0xe8, 0x54, 0x8c, + 0xd7, 0x1c, 0x4e, 0x23, 0xf1, 0x78, 0x30, 0x7e, 0xfd, 0xac, 0x9c, 0xf7, 0xf7, 0x1a, 0xbe, 0xf1, + 0x69, 0x54, 0x80, 0xb1, 0x48, 0x08, 0x8f, 0x6d, 0xa1, 0x62, 0x27, 0xa6, 0x5c, 0x3b, 0x70, 0x72, + 0x88, 0x48, 0x96, 0xea, 0xbf, 0x88, 0x70, 0x2e, 0x32, 0xa2, 0xad, 0xbc, 0x3f, 0x78, 0xff, 0x34, + 0x8c, 0xef, 0xb5, 0xc6, 0x74, 0xe4, 0x7b, 0xf7, 0x4d, 0xbd, 0xf7, 0xee, 0x57, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x75, 0x12, 0x53, 0xef, 0x7c, 0x03, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 0000000000..39ff34cfb0 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,638 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/distribution.proto + +package distribution + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + timestamp "github.com/golang/protobuf/ptypes/timestamp" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `Distribution` contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those values +// across a set of buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by formulas for buckets of fixed or exponentially increasing +// widths. +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in `bucket_counts` if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + // Defines the histogram bucket boundaries. If the distribution does not + // contain a histogram, then omit this field. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // The number of values in each bucket of the histogram, as described in + // `bucket_options`. If the distribution does not have a histogram, then omit + // this field. If there is a histogram, then the sum of the values in + // `bucket_counts` must equal the value in the `count` field of the + // distribution. + // + // If present, `bucket_counts` should contain N values, where N is the number + // of buckets specified in `bucket_options`. If you supply fewer than N + // values, the remaining values are assumed to be 0. + // + // The order of the values in `bucket_counts` follows the bucket numbering + // schemes described for the three bucket types. The first value must be the + // count for the underflow bucket (number 0). The next N-2 values are the + // counts for the finite buckets (number 1 through N-2). The N'th value in + // `bucket_counts` is the count for the overflow bucket (number N-1). + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // Must be in increasing order of `value` field. + Exemplars []*Distribution_Exemplar `protobuf:"bytes,10,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution) Reset() { *m = Distribution{} } +func (m *Distribution) String() string { return proto.CompactTextString(m) } +func (*Distribution) ProtoMessage() {} +func (*Distribution) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0} +} + +func (m *Distribution) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution.Unmarshal(m, b) +} +func (m *Distribution) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution.Marshal(b, m, deterministic) +} +func (m *Distribution) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution.Merge(m, src) +} +func (m *Distribution) XXX_Size() int { + return xxx_messageInfo_Distribution.Size(m) +} +func (m *Distribution) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution proto.InternalMessageInfo + +func (m *Distribution) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Distribution) GetMean() float64 { + if m != nil { + return m.Mean + } + return 0 +} + +func (m *Distribution) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *Distribution) GetRange() *Distribution_Range { + if m != nil { + return m.Range + } + return nil +} + +func (m *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *Distribution) GetBucketCounts() []int64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *Distribution) GetExemplars() []*Distribution_Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_Range) Reset() { *m = Distribution_Range{} } +func (m *Distribution_Range) String() string { return proto.CompactTextString(m) } +func (*Distribution_Range) ProtoMessage() {} +func (*Distribution_Range) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 0} +} + +func (m *Distribution_Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_Range.Unmarshal(m, b) +} +func (m *Distribution_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_Range.Marshal(b, m, deterministic) +} +func (m *Distribution_Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_Range.Merge(m, src) +} +func (m *Distribution_Range) XXX_Size() int { + return xxx_messageInfo_Distribution_Range.Size(m) +} +func (m *Distribution_Range) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_Range proto.InternalMessageInfo + +func (m *Distribution_Range) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *Distribution_Range) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +// `BucketOptions` describes the bucket boundaries used to create a histogram +// for the distribution. The buckets can be in a linear sequence, an +// exponential sequence, or each bucket can be specified explicitly. +// `BucketOptions` does not include the number of values in each bucket. +// +// A bucket has an inclusive lower bound and exclusive upper bound for the +// values that are counted for that bucket. The upper bound of a bucket must +// be strictly greater than the lower bound. The sequence of N buckets for a +// distribution consists of an underflow bucket (number 0), zero or more +// finite buckets (number 1 through N - 2) and an overflow bucket (number N - +// 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the +// same as the upper bound of bucket i - 1. The buckets span the whole range +// of finite values: lower bound of the underflow bucket is -infinity and the +// upper bound of the overflow bucket is +infinity. The finite buckets are +// so-called because both bounds are finite. +type Distribution_BucketOptions struct { + // Exactly one of these three fields must be set. + // + // Types that are valid to be assigned to Options: + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions) Reset() { *m = Distribution_BucketOptions{} } +func (m *Distribution_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions) ProtoMessage() {} +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1} +} + +func (m *Distribution_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions.Merge(m, src) +} +func (m *Distribution_BucketOptions) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions.Size(m) +} +func (m *Distribution_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions proto.InternalMessageInfo + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExponentialBuckets struct { + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExplicitBuckets struct { + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (m *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Distribution_BucketOptions) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } +} + +// Specifies a linear sequence of buckets that all have the same width +// (except overflow and underflow). Each bucket represents a constant +// absolute uncertainty on the specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Linear) Reset() { *m = Distribution_BucketOptions_Linear{} } +func (m *Distribution_BucketOptions_Linear) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 0} +} + +func (m *Distribution_BucketOptions_Linear) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Linear) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions_Linear) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Linear.Merge(m, src) +} +func (m *Distribution_BucketOptions_Linear) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Size(m) +} +func (m *Distribution_BucketOptions_Linear) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Linear.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Linear proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetWidth() float64 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetOffset() float64 { + if m != nil { + return m.Offset + } + return 0 +} + +// Specifies an exponential sequence of buckets that have a width that is +// proportional to the value of the lower bound. Each bucket represents a +// constant relative uncertainty on a specific value in the bucket. +// +// There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the +// following boundaries: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Exponential) Reset() { + *m = Distribution_BucketOptions_Exponential{} +} +func (m *Distribution_BucketOptions_Exponential) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 1} +} + +func (m *Distribution_BucketOptions_Exponential) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Exponential.Merge(m, src) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Size(m) +} +func (m *Distribution_BucketOptions_Exponential) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Exponential.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Exponential proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if m != nil { + return m.GrowthFactor + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetScale() float64 { + if m != nil { + return m.Scale + } + return 0 +} + +// Specifies a set of buckets with arbitrary widths. +// +// There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following +// boundaries: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// The `bounds` field must contain at least one element. If `bounds` has +// only one element, then there are no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Explicit) Reset() { *m = Distribution_BucketOptions_Explicit{} } +func (m *Distribution_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 2} +} + +func (m *Distribution_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Explicit.Merge(m, src) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Size(m) +} +func (m *Distribution_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket, such as a trace ID that +// was active when a value was added. They may contain further information, +// such as a example values and timestamps, origin, etc. +type Distribution_Exemplar struct { + // Value of the exemplar point. This value determines to which bucket the + // exemplar belongs. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. Examples are: + // + // Trace: type.googleapis.com/google.monitoring.v3.SpanContext + // + // Literal string: type.googleapis.com/google.protobuf.StringValue + // + // Labels dropped during aggregation: + // type.googleapis.com/google.monitoring.v3.DroppedLabels + // + // There may be only a single attachment of any given message type in a + // single exemplar, and this is enforced by the system. + Attachments []*any.Any `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_Exemplar) Reset() { *m = Distribution_Exemplar{} } +func (m *Distribution_Exemplar) String() string { return proto.CompactTextString(m) } +func (*Distribution_Exemplar) ProtoMessage() {} +func (*Distribution_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 2} +} + +func (m *Distribution_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_Exemplar.Unmarshal(m, b) +} +func (m *Distribution_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_Exemplar.Marshal(b, m, deterministic) +} +func (m *Distribution_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_Exemplar.Merge(m, src) +} +func (m *Distribution_Exemplar) XXX_Size() int { + return xxx_messageInfo_Distribution_Exemplar.Size(m) +} +func (m *Distribution_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_Exemplar proto.InternalMessageInfo + +func (m *Distribution_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Distribution_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *Distribution_Exemplar) GetAttachments() []*any.Any { + if m != nil { + return m.Attachments + } + return nil +} + +func init() { + proto.RegisterType((*Distribution)(nil), "google.api.Distribution") + proto.RegisterType((*Distribution_Range)(nil), "google.api.Distribution.Range") + proto.RegisterType((*Distribution_BucketOptions)(nil), "google.api.Distribution.BucketOptions") + proto.RegisterType((*Distribution_BucketOptions_Linear)(nil), "google.api.Distribution.BucketOptions.Linear") + proto.RegisterType((*Distribution_BucketOptions_Exponential)(nil), "google.api.Distribution.BucketOptions.Exponential") + proto.RegisterType((*Distribution_BucketOptions_Explicit)(nil), "google.api.Distribution.BucketOptions.Explicit") + proto.RegisterType((*Distribution_Exemplar)(nil), "google.api.Distribution.Exemplar") +} + +func init() { proto.RegisterFile("google/api/distribution.proto", fileDescriptor_0835ee0fd90bf943) } + +var fileDescriptor_0835ee0fd90bf943 = []byte{ + // 631 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xed, 0x6a, 0xd4, 0x40, + 0x14, 0x6d, 0x9a, 0xdd, 0x6d, 0x7b, 0xb7, 0x5b, 0xeb, 0x58, 0x25, 0x06, 0xd4, 0xb5, 0x05, 0x59, + 0x50, 0xb3, 0xb0, 0x8a, 0x0a, 0xfe, 0x90, 0x6e, 0x3f, 0xac, 0xa0, 0xb4, 0x8c, 0xe2, 0x0f, 0x11, + 0xc2, 0x6c, 0x76, 0x92, 0x0e, 0x26, 0x33, 0x69, 0x32, 0x69, 0xb7, 0xaf, 0xe1, 0x23, 0xf8, 0x16, + 0xbe, 0x8a, 0x4f, 0x23, 0xf3, 0x91, 0x6e, 0x6a, 0x29, 0xd4, 0x7f, 0xb9, 0xf7, 0x9c, 0x7b, 0xce, + 0xbd, 0x73, 0x67, 0x02, 0x0f, 0x12, 0x21, 0x92, 0x94, 0x0e, 0x49, 0xce, 0x86, 0x53, 0x56, 0xca, + 0x82, 0x4d, 0x2a, 0xc9, 0x04, 0x0f, 0xf2, 0x42, 0x48, 0x81, 0xc0, 0xc0, 0x01, 0xc9, 0x99, 0x7f, + 0xdf, 0x52, 0x35, 0x32, 0xa9, 0xe2, 0x21, 0xe1, 0xe7, 0x86, 0xe6, 0x3f, 0xfa, 0x17, 0x92, 0x2c, + 0xa3, 0xa5, 0x24, 0x59, 0x6e, 0x08, 0x9b, 0x7f, 0x96, 0x61, 0x75, 0xb7, 0x21, 0x8f, 0x36, 0xa0, + 0x1d, 0x89, 0x8a, 0x4b, 0xcf, 0xe9, 0x3b, 0x03, 0x17, 0x9b, 0x00, 0x21, 0x68, 0x65, 0x94, 0x70, + 0x6f, 0xb1, 0xef, 0x0c, 0x1c, 0xac, 0xbf, 0xd1, 0x6b, 0xf0, 0xca, 0x2a, 0x0b, 0x45, 0x1c, 0x96, + 0x27, 0x15, 0x29, 0xe8, 0x34, 0x9c, 0xd2, 0x53, 0x46, 0x94, 0x8a, 0xe7, 0x6a, 0xde, 0xdd, 0xb2, + 0xca, 0x0e, 0xe3, 0xcf, 0x06, 0xdd, 0xad, 0x41, 0xf4, 0x12, 0xda, 0x05, 0xe1, 0x09, 0xf5, 0x5a, + 0x7d, 0x67, 0xd0, 0x1d, 0x3d, 0x0c, 0xe6, 0xb3, 0x04, 0xcd, 0x5e, 0x02, 0xac, 0x58, 0xd8, 0x90, + 0xd1, 0x27, 0x58, 0x9b, 0x54, 0xd1, 0x0f, 0x2a, 0x43, 0x91, 0x2b, 0xb4, 0xf4, 0x3a, 0xba, 0xfc, + 0xc9, 0xb5, 0xe5, 0x63, 0x4d, 0x3f, 0x34, 0x6c, 0xdc, 0x9b, 0x34, 0x43, 0xb4, 0x05, 0x36, 0x11, + 0xea, 0x09, 0x4b, 0x6f, 0xa9, 0xef, 0x0e, 0x5c, 0xbc, 0x6a, 0x92, 0x3b, 0x3a, 0x87, 0xde, 0xc1, + 0x0a, 0x9d, 0xd1, 0x2c, 0x4f, 0x49, 0x51, 0x7a, 0xd0, 0x77, 0x07, 0xdd, 0xd1, 0xe3, 0x6b, 0xed, + 0xf6, 0x2c, 0x13, 0xcf, 0x6b, 0xfc, 0xa7, 0xd0, 0xd6, 0x43, 0xa0, 0x75, 0x70, 0x33, 0xc6, 0xf5, + 0xa1, 0x3a, 0x58, 0x7d, 0xea, 0x0c, 0x99, 0xd9, 0x13, 0x55, 0x9f, 0xfe, 0xef, 0x16, 0xf4, 0x2e, + 0xf5, 0x8c, 0xbe, 0xc2, 0x5a, 0xca, 0x38, 0x25, 0x45, 0x68, 0xda, 0x2a, 0xb5, 0x40, 0x77, 0xf4, + 0xfc, 0x66, 0x33, 0x07, 0x1f, 0x75, 0xf1, 0xc1, 0x02, 0xee, 0x19, 0x19, 0x83, 0x96, 0x88, 0xc2, + 0x1d, 0x3a, 0xcb, 0x05, 0xa7, 0x5c, 0x32, 0x92, 0x5e, 0x88, 0x2f, 0x6a, 0xf1, 0xd1, 0x0d, 0xc5, + 0xf7, 0xe6, 0x0a, 0x07, 0x0b, 0x18, 0x35, 0x04, 0x6b, 0x9b, 0xef, 0xb0, 0x4e, 0x67, 0x79, 0xca, + 0x22, 0x26, 0x2f, 0x3c, 0x5c, 0xed, 0x31, 0xbc, 0xb9, 0x87, 0x2e, 0x3f, 0x58, 0xc0, 0xb7, 0x6a, + 0x29, 0xab, 0xee, 0x4f, 0xa1, 0x63, 0xe6, 0x43, 0xcf, 0x00, 0xf1, 0x2a, 0x0b, 0x63, 0xc6, 0x99, + 0xa4, 0x97, 0x8e, 0xaa, 0x8d, 0xd7, 0x79, 0x95, 0xed, 0x6b, 0xa0, 0xee, 0x6a, 0x03, 0xda, 0x67, + 0x6c, 0x2a, 0x8f, 0xed, 0xd1, 0x9b, 0x00, 0xdd, 0x83, 0x8e, 0x88, 0xe3, 0x92, 0x4a, 0x7b, 0x77, + 0x6d, 0xe4, 0x9f, 0x42, 0xb7, 0x31, 0xe8, 0x7f, 0x5a, 0x6d, 0x41, 0x2f, 0x29, 0xc4, 0x99, 0x3c, + 0x0e, 0x63, 0x12, 0x49, 0x51, 0x58, 0xcb, 0x55, 0x93, 0xdc, 0xd7, 0x39, 0xd5, 0x4f, 0x19, 0x91, + 0x94, 0x5a, 0x63, 0x13, 0xf8, 0x9b, 0xb0, 0x5c, 0x0f, 0xaf, 0x7a, 0x9b, 0x88, 0x8a, 0x4f, 0x95, + 0x91, 0xab, 0x7a, 0x33, 0xd1, 0x78, 0x05, 0x96, 0xec, 0x5b, 0xf0, 0x7f, 0x3a, 0x8a, 0x6f, 0xae, + 0x9d, 0x52, 0x3c, 0x25, 0x69, 0x45, 0xed, 0x75, 0x33, 0x01, 0x7a, 0x03, 0x2b, 0x17, 0xaf, 0xdf, + 0xae, 0xda, 0xaf, 0xd7, 0x50, 0xff, 0x1f, 0x82, 0x2f, 0x35, 0x03, 0xcf, 0xc9, 0xe8, 0x15, 0x74, + 0x89, 0x94, 0x24, 0x3a, 0xce, 0x28, 0xd7, 0x2b, 0x54, 0x0f, 0x61, 0xe3, 0x4a, 0xed, 0x36, 0x3f, + 0xc7, 0x4d, 0xe2, 0xf8, 0x04, 0xd6, 0x22, 0x91, 0x35, 0x56, 0x3d, 0xbe, 0xdd, 0xdc, 0xf5, 0x91, + 0x2a, 0x3c, 0x72, 0xbe, 0xed, 0x58, 0x42, 0x22, 0x52, 0xc2, 0x93, 0x40, 0x14, 0xc9, 0x30, 0xa1, + 0x5c, 0xcb, 0x0e, 0x0d, 0x44, 0x72, 0x56, 0x5e, 0xf9, 0x13, 0xbe, 0x6d, 0x06, 0xbf, 0x16, 0x5b, + 0xef, 0xb7, 0x8f, 0x3e, 0x4c, 0x3a, 0xba, 0xec, 0xc5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, + 0xf1, 0xc2, 0x23, 0x3f, 0x05, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 0000000000..5b6c587a96 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/httpbody.proto + +package httpbody + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) returns +// (google.protobuf.Empty); +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + // The HTTP Content-Type header value specifying the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The HTTP request/response body as raw binary. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Application specific response metadata. Must be set in the first response + // for streaming APIs. + Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpBody) Reset() { *m = HttpBody{} } +func (m *HttpBody) String() string { return proto.CompactTextString(m) } +func (*HttpBody) ProtoMessage() {} +func (*HttpBody) Descriptor() ([]byte, []int) { + return fileDescriptor_09ea2ecaa32a0070, []int{0} +} + +func (m *HttpBody) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpBody.Unmarshal(m, b) +} +func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic) +} +func (m *HttpBody) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpBody.Merge(m, src) +} +func (m *HttpBody) XXX_Size() int { + return xxx_messageInfo_HttpBody.Size(m) +} +func (m *HttpBody) XXX_DiscardUnknown() { + xxx_messageInfo_HttpBody.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpBody proto.InternalMessageInfo + +func (m *HttpBody) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +func (m *HttpBody) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *HttpBody) GetExtensions() []*any.Any { + if m != nil { + return m.Extensions + } + return nil +} + +func init() { + proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody") +} + +func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_09ea2ecaa32a0070) } + +var fileDescriptor_09ea2ecaa32a0070 = []byte{ + // 229 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30, + 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09, + 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7, + 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf, + 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc, + 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c, + 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e, + 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35, + 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c, + 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b, + 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52, + 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38, + 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec, + 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16, + 0x2b, 0x2d, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 0000000000..8ecced45cd --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,140 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/label.proto + +package label + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +var LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", +} + +var LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, +} + +func (x LabelDescriptor_ValueType) String() string { + return proto.EnumName(LabelDescriptor_ValueType_name, int32(x)) +} + +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f372a463e25ba151, []int{0, 0} +} + +// A description of a label. +type LabelDescriptor struct { + // The label key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelDescriptor) Reset() { *m = LabelDescriptor{} } +func (m *LabelDescriptor) String() string { return proto.CompactTextString(m) } +func (*LabelDescriptor) ProtoMessage() {} +func (*LabelDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_f372a463e25ba151, []int{0} +} + +func (m *LabelDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelDescriptor.Unmarshal(m, b) +} +func (m *LabelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelDescriptor.Marshal(b, m, deterministic) +} +func (m *LabelDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelDescriptor.Merge(m, src) +} +func (m *LabelDescriptor) XXX_Size() int { + return xxx_messageInfo_LabelDescriptor.Size(m) +} +func (m *LabelDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_LabelDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelDescriptor proto.InternalMessageInfo + +func (m *LabelDescriptor) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return LabelDescriptor_STRING +} + +func (m *LabelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value) + proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor") +} + +func init() { proto.RegisterFile("google/api/label.proto", fileDescriptor_f372a463e25ba151) } + +var fileDescriptor_f372a463e25ba151 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x88, 0xeb, 0x25, 0x16, 0x64, 0x2a, 0xed, 0x64, 0xe4, 0xe2, 0xf7, + 0x01, 0xc9, 0xb9, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x09, 0x09, 0x70, 0x31, + 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, 0x2e, 0x5c, 0x5c, + 0x65, 0x89, 0x39, 0xa5, 0xa9, 0xf1, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x7c, + 0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03, 0xa9, 0x0e, 0xa9, 0x2c, 0x48, + 0x0d, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a, 0x32, 0xf3, 0xf3, 0x24, + 0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a, 0x71, 0x71, 0xb1, 0x05, + 0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38, 0xf9, 0xfb, 0xfb, 0x08, + 0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30, 0x39, 0xc5, 0x73, 0xf1, + 0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90, 0x2f, 0x03, 0x18, 0xa3, + 0x4c, 0xa1, 0x32, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, 0xe9, 0xa9, + 0x79, 0xe0, 0x30, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x23, 0x82, 0xc7, 0x1a, 0x4c, 0xfe, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xe2, 0xee, 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6b, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x57, 0x04, 0xaa, 0x1f, 0x49, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go new file mode 100644 index 0000000000..208ff134bf --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/launch_stage.proto + +package api + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](http://cloud.google.com/terms/launch-stages). +type LaunchStage int32 + +const ( + // Do not use this default value. + LaunchStage_LAUNCH_STAGE_UNSPECIFIED LaunchStage = 0 + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + LaunchStage_EARLY_ACCESS LaunchStage = 1 + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects whitelisted. Alpha releases don’t have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + LaunchStage_ALPHA LaunchStage = 2 + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + LaunchStage_BETA LaunchStage = 3 + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + LaunchStage_GA LaunchStage = 4 + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the “Deprecation Policy” section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage_DEPRECATED LaunchStage = 5 +) + +var LaunchStage_name = map[int32]string{ + 0: "LAUNCH_STAGE_UNSPECIFIED", + 1: "EARLY_ACCESS", + 2: "ALPHA", + 3: "BETA", + 4: "GA", + 5: "DEPRECATED", +} + +var LaunchStage_value = map[string]int32{ + "LAUNCH_STAGE_UNSPECIFIED": 0, + "EARLY_ACCESS": 1, + "ALPHA": 2, + "BETA": 3, + "GA": 4, + "DEPRECATED": 5, +} + +func (x LaunchStage) String() string { + return proto.EnumName(LaunchStage_name, int32(x)) +} + +func (LaunchStage) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6b5f68b6c1cefff8, []int{0} +} + +func init() { + proto.RegisterEnum("google.api.LaunchStage", LaunchStage_name, LaunchStage_value) +} + +func init() { proto.RegisterFile("google/api/launch_stage.proto", fileDescriptor_6b5f68b6c1cefff8) } + +var fileDescriptor_6b5f68b6c1cefff8 = []byte{ + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0xc1, 0x4a, 0xc3, 0x40, + 0x14, 0x45, 0x6d, 0x4c, 0x8b, 0x3e, 0xa5, 0x3c, 0x66, 0xe5, 0x42, 0x7f, 0x40, 0x30, 0x59, 0xb8, + 0x74, 0xf5, 0x32, 0x79, 0xa6, 0x81, 0x50, 0x86, 0x4e, 0xba, 0xb0, 0x9b, 0x30, 0x96, 0x30, 0x8e, + 0xc4, 0xcc, 0xd0, 0xd6, 0x1f, 0xf2, 0x4b, 0x25, 0x89, 0x60, 0xd7, 0xe7, 0xc0, 0x3d, 0x17, 0x1e, + 0xac, 0xf7, 0xb6, 0x6b, 0x53, 0x13, 0x5c, 0xda, 0x99, 0xef, 0x7e, 0xff, 0xd1, 0x1c, 0x4f, 0xc6, + 0xb6, 0x49, 0x38, 0xf8, 0x93, 0x17, 0x30, 0xe1, 0xc4, 0x04, 0xf7, 0xf8, 0x09, 0x37, 0xd5, 0x68, + 0xe8, 0x41, 0x10, 0xf7, 0x70, 0x57, 0xd1, 0x76, 0x2d, 0x57, 0x8d, 0xae, 0xa9, 0xe0, 0x66, 0xbb, + 0xd6, 0x8a, 0x65, 0xf9, 0x5a, 0x72, 0x8e, 0x17, 0x02, 0xe1, 0x96, 0x69, 0x53, 0xbd, 0x35, 0x24, + 0x25, 0x6b, 0x8d, 0x33, 0x71, 0x0d, 0x73, 0xaa, 0xd4, 0x8a, 0x30, 0x12, 0x57, 0x10, 0x67, 0x5c, + 0x13, 0x5e, 0x8a, 0x05, 0x44, 0x05, 0x61, 0x2c, 0x96, 0x00, 0x39, 0xab, 0x0d, 0x4b, 0xaa, 0x39, + 0xc7, 0x79, 0xb6, 0x83, 0xe5, 0xde, 0x7f, 0x25, 0xff, 0xeb, 0x19, 0x9e, 0x6d, 0xab, 0xa1, 0x4d, + 0xcd, 0x76, 0x4f, 0x7f, 0xdc, 0xfa, 0xce, 0xf4, 0x36, 0xf1, 0x07, 0x9b, 0xda, 0xb6, 0x1f, 0xcb, + 0xd3, 0x09, 0x99, 0xe0, 0x8e, 0xc3, 0xb7, 0x17, 0x13, 0xdc, 0x4f, 0x14, 0x17, 0xa4, 0xca, 0xf7, + 0xc5, 0x28, 0x3c, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xd5, 0x39, 0x1a, 0xfb, 0x00, 0x00, + 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 0000000000..2d10759f74 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,535 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/metric.proto + +package metric + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The kind of measurement. It describes how the data is reported. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +var MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", +} + +var MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, +} + +func (x MetricDescriptor_MetricKind) String() string { + return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x)) +} + +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +var MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", +} + +var MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, +} + +func (x MetricDescriptor_ValueType) String() string { + return proto.EnumName(MetricDescriptor_ValueType_name, int32(x)) +} + +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + // The resource name of the metric descriptor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined metric types have the DNS name + // `custom.googleapis.com` or `external.googleapis.com`. Metric types should + // use a natural hierarchical grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The units in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit` + // defines the representation of the stored metric values. + // + // Different systems may scale the values to be more easily displayed (so a + // value of `0.02KBy` _might_ be displayed as `20By`, and a value of + // `3523KBy` _might_ be displayed as `3.5MBy`). However, if the `unit` is + // `KBy`, then the value of the metric is always in thousands of bytes, no + // matter how it may be displayed.. + // + // If you want a custom metric to record the exact number of CPU-seconds used + // by a job, you can create an `INT64 CUMULATIVE` metric whose `unit` is + // `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the job uses 12,005 + // CPU-seconds, then the value is written as `12005`. + // + // Alternatively, if you want a custome metric to record data in a more + // granular way, you can create a `DOUBLE CUMULATIVE` metric whose `unit` is + // `ks{CPU}`, and then write the value `12.005` (which is `12005/1000`), + // or use `Kis{CPU}` and write `11.723` (which is `12005/1024`). + // + // The supported units are a subset of [The Unified Code for Units of + // Measure](http://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10^3) + // * `M` mega (10^6) + // * `G` giga (10^9) + // * `T` tera (10^12) + // * `P` peta (10^15) + // * `E` exa (10^18) + // * `Z` zetta (10^21) + // * `Y` yotta (10^24) + // + // * `m` milli (10^-3) + // * `u` micro (10^-6) + // * `n` nano (10^-9) + // * `p` pico (10^-12) + // * `f` femto (10^-15) + // * `a` atto (10^-18) + // * `z` zepto (10^-21) + // * `y` yocto (10^-24) + // + // * `Ki` kibi (2^10) + // * `Mi` mebi (2^20) + // * `Gi` gibi (2^30) + // * `Ti` tebi (2^40) + // * `Pi` pebi (2^50) + // + // **Grammar** + // + // The grammar also includes these connectors: + // + // * `/` division or ratio (as an infix operator). For examples, + // `kBy/{email}` or `MiBy/10ms` (although you should almost never + // have `/s` in a metric `unit`; rates should always be computed at + // query time from the underlying cumulative or delta value). + // * `.` multiplication or composition (as an infix operator). For + // examples, `GBy.d` or `k{watt}.h`. + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // * `Annotation` is just a comment if it follows a `UNIT`. If the annotation + // is used alone, then the unit is equivalent to `1`. For examples, + // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. + // * `NAME` is a sequence of non-blank printable ASCII characters not + // containing `{` or `}`. + // * `1` represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such + // as in `1/s`. It is typically used when none of the basic units are + // appropriate. For example, "new users per day" can be represented as + // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). + // * `%` represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of 0..100, + // and a metric value `3` means "3 percent"). + // * `10^2.%` indicates a metric contains a ratio, typically in the range + // 0..1, that will be multiplied by 100 and displayed as a percentage + // (so a metric value `0.03` means "3 percent"). + // + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + // This field is optional but it is recommended to be set for any metrics + // associated with user-visible concepts, such as Quota. + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. Metadata which can be used to guide usage of the metric. + Metadata *MetricDescriptor_MetricDescriptorMetadata `protobuf:"bytes,10,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Optional. The launch stage of the metric definition. + LaunchStage api.LaunchStage `protobuf:"varint,12,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MetricDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MetricDescriptor) GetMetadata() *MetricDescriptor_MetricDescriptorMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *MetricDescriptor) GetLaunchStage() api.LaunchStage { + if m != nil { + return m.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +// Additional annotations that can be used to guide the usage of a metric. +type MetricDescriptor_MetricDescriptorMetadata struct { + // Deprecated. Please use the MetricDescriptor.launch_stage instead. + // The launch stage of the metric definition. + LaunchStage api.LaunchStage `protobuf:"varint,1,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` // Deprecated: Do not use. + // The sampling period of metric data points. For metrics which are written + // periodically, consecutive data points are stored at this time interval, + // excluding data loss due to errors. Metrics with a higher granularity have + // a smaller sampling period. + SamplePeriod *duration.Duration `protobuf:"bytes,2,opt,name=sample_period,json=samplePeriod,proto3" json:"sample_period,omitempty"` + // The delay of data points caused by ingestion. Data points older than this + // age are guaranteed to be ingested and available to be read, excluding + // data loss due to errors. + IngestDelay *duration.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor_MetricDescriptorMetadata) Reset() { + *m = MetricDescriptor_MetricDescriptorMetadata{} +} +func (m *MetricDescriptor_MetricDescriptorMetadata) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor_MetricDescriptorMetadata) ProtoMessage() {} +func (*MetricDescriptor_MetricDescriptorMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0, 0} +} + +func (m *MetricDescriptor_MetricDescriptorMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor_MetricDescriptorMetadata.Unmarshal(m, b) +} +func (m *MetricDescriptor_MetricDescriptorMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor_MetricDescriptorMetadata.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor_MetricDescriptorMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor_MetricDescriptorMetadata.Merge(m, src) +} +func (m *MetricDescriptor_MetricDescriptorMetadata) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor_MetricDescriptorMetadata.Size(m) +} +func (m *MetricDescriptor_MetricDescriptorMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor_MetricDescriptorMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor_MetricDescriptorMetadata proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *MetricDescriptor_MetricDescriptorMetadata) GetLaunchStage() api.LaunchStage { + if m != nil { + return m.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (m *MetricDescriptor_MetricDescriptorMetadata) GetSamplePeriod() *duration.Duration { + if m != nil { + return m.SamplePeriod + } + return nil +} + +func (m *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *duration.Duration { + if m != nil { + return m.IngestDelay + } + return nil +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. + // For example, `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{1} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Metric) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value) + proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value) + proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor") + proto.RegisterType((*MetricDescriptor_MetricDescriptorMetadata)(nil), "google.api.MetricDescriptor.MetricDescriptorMetadata") + proto.RegisterType((*Metric)(nil), "google.api.Metric") + proto.RegisterMapType((map[string]string)(nil), "google.api.Metric.LabelsEntry") +} + +func init() { proto.RegisterFile("google/api/metric.proto", fileDescriptor_927eaac1a24f8abb) } + +var fileDescriptor_927eaac1a24f8abb = []byte{ + // 661 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0x26, 0xe9, 0xcf, 0xd6, 0x93, 0x32, 0x45, 0x16, 0xda, 0x42, 0x27, 0xa6, 0xd2, 0x0b, 0xe8, + 0x55, 0x2b, 0x6d, 0x30, 0x60, 0xa0, 0x49, 0xed, 0x12, 0x4a, 0xb4, 0x36, 0x2d, 0x59, 0x32, 0x69, + 0xdc, 0x44, 0x5e, 0x63, 0x42, 0xb4, 0x34, 0x09, 0x49, 0x3a, 0xa9, 0x4f, 0xc0, 0x25, 0xef, 0xc0, + 0x53, 0xf1, 0x38, 0xc8, 0x76, 0xda, 0x66, 0x45, 0x54, 0x5c, 0xe5, 0xf8, 0xfb, 0xbe, 0xf3, 0xd9, + 0xc7, 0x3e, 0x27, 0x70, 0xe0, 0x45, 0x91, 0x17, 0x90, 0x2e, 0x8e, 0xfd, 0xee, 0x8c, 0x64, 0x89, + 0x3f, 0xed, 0xc4, 0x49, 0x94, 0x45, 0x08, 0x38, 0xd1, 0xc1, 0xb1, 0xdf, 0xd8, 0x2f, 0x88, 0x02, + 0x7c, 0x4b, 0x02, 0xae, 0x69, 0x3c, 0x7b, 0x80, 0xcf, 0xc3, 0xe9, 0x37, 0x27, 0xcd, 0xb0, 0x47, + 0x72, 0xfa, 0x28, 0xa7, 0xd9, 0xea, 0x76, 0xfe, 0xb5, 0xeb, 0xce, 0x13, 0x9c, 0xf9, 0x51, 0xc8, + 0xf9, 0xd6, 0x8f, 0x1d, 0x90, 0x47, 0x6c, 0x4f, 0x95, 0xa4, 0xd3, 0xc4, 0x8f, 0xb3, 0x28, 0x41, + 0x08, 0xca, 0x21, 0x9e, 0x11, 0x45, 0x68, 0x0a, 0xed, 0x9a, 0xc9, 0x62, 0x8a, 0x65, 0x8b, 0x98, + 0x28, 0xbb, 0x1c, 0xa3, 0x31, 0x3a, 0x81, 0x2a, 0x3b, 0x4a, 0xaa, 0x88, 0xcd, 0x52, 0x5b, 0x3a, + 0x3e, 0xec, 0xac, 0x0f, 0xdc, 0x19, 0x52, 0x66, 0x6d, 0x6a, 0xe6, 0x52, 0xf4, 0x09, 0x24, 0x5e, + 0xa4, 0x73, 0xe7, 0x87, 0xae, 0x52, 0x6a, 0x0a, 0xed, 0xbd, 0xe3, 0x97, 0xc5, 0xcc, 0xcd, 0xf3, + 0xe4, 0xc0, 0xa5, 0x1f, 0xba, 0x26, 0xcc, 0x56, 0x31, 0xd2, 0x00, 0xee, 0x71, 0x30, 0x27, 0x0e, + 0x3b, 0x58, 0x99, 0x19, 0xbd, 0xd8, 0x6a, 0x74, 0x4d, 0xe5, 0xd6, 0x22, 0x26, 0x66, 0xed, 0x7e, + 0x19, 0xd2, 0xca, 0xe6, 0xa1, 0x9f, 0x29, 0x15, 0x5e, 0x19, 0x8d, 0x51, 0x13, 0x24, 0x37, 0x4f, + 0xf3, 0xa3, 0x50, 0xa9, 0x32, 0xaa, 0x08, 0xa1, 0xe7, 0x50, 0x77, 0xfd, 0x34, 0x0e, 0xf0, 0xc2, + 0x61, 0x77, 0xb5, 0x93, 0x4b, 0x38, 0x66, 0xd0, 0x2b, 0xfb, 0x0c, 0xbb, 0x33, 0x92, 0x61, 0x17, + 0x67, 0x58, 0x81, 0xa6, 0xd0, 0x96, 0x8e, 0x5f, 0xff, 0x47, 0x99, 0x6b, 0x60, 0x94, 0x27, 0x9b, + 0x2b, 0x1b, 0x74, 0x06, 0xf5, 0xe2, 0x23, 0x2b, 0x75, 0x56, 0xf4, 0xc1, 0xc3, 0x7b, 0xa7, 0xfc, + 0x15, 0xa5, 0x4d, 0x29, 0x58, 0x2f, 0x1a, 0xbf, 0x05, 0x50, 0xfe, 0xb5, 0x05, 0x3a, 0xdf, 0x30, + 0x16, 0xb6, 0x1a, 0xf7, 0x45, 0x45, 0x78, 0x60, 0x8e, 0xce, 0xe1, 0x71, 0x8a, 0x67, 0x71, 0x40, + 0x9c, 0x98, 0x24, 0x7e, 0xe4, 0x2a, 0x22, 0x2b, 0xf8, 0xe9, 0xd2, 0x60, 0xd9, 0x7f, 0x1d, 0x35, + 0xef, 0x3f, 0xb3, 0xce, 0xf5, 0x13, 0x26, 0x47, 0x1f, 0xa0, 0xee, 0x87, 0x1e, 0x49, 0x33, 0xc7, + 0x25, 0x01, 0x5e, 0xb0, 0xb6, 0xd8, 0x9a, 0x2e, 0x71, 0xb9, 0x4a, 0xd5, 0xad, 0x31, 0xc0, 0xba, + 0x47, 0xd0, 0x21, 0x1c, 0x8c, 0x34, 0xcb, 0xd4, 0x2f, 0x9c, 0x4b, 0xdd, 0x50, 0x1d, 0xdb, 0xb8, + 0x9a, 0x68, 0x17, 0xfa, 0x47, 0x5d, 0x53, 0xe5, 0x47, 0xa8, 0x06, 0x95, 0x41, 0xcf, 0x1e, 0x68, + 0xb2, 0x40, 0x43, 0x55, 0x1b, 0x5a, 0x3d, 0x59, 0x44, 0x7b, 0x00, 0x17, 0xf6, 0xc8, 0x1e, 0xf6, + 0x2c, 0xfd, 0x5a, 0x93, 0x4b, 0xad, 0xef, 0x50, 0x5b, 0xf5, 0x0a, 0x6a, 0xc0, 0xfe, 0x75, 0x6f, + 0x68, 0x6b, 0x8e, 0x75, 0x33, 0xd1, 0x36, 0xec, 0x76, 0xa1, 0xdc, 0x1f, 0x8f, 0x87, 0xdc, 0x4d, + 0x37, 0xac, 0xd3, 0x57, 0xb2, 0x88, 0x00, 0xaa, 0xea, 0xd8, 0xee, 0x0f, 0x35, 0xb9, 0x44, 0xe3, + 0x2b, 0xcb, 0xd4, 0x8d, 0x81, 0x5c, 0x46, 0x32, 0xd4, 0x55, 0x9d, 0xae, 0xfa, 0xb6, 0xa5, 0x8f, + 0x0d, 0xb9, 0x42, 0x93, 0x46, 0x63, 0x43, 0xbb, 0x91, 0xab, 0xad, 0x9f, 0x02, 0x54, 0x79, 0x11, + 0xab, 0x59, 0x2b, 0x15, 0x66, 0xed, 0x74, 0x63, 0xd6, 0x8e, 0xfe, 0x6e, 0x25, 0x3e, 0x72, 0xa9, + 0x16, 0x66, 0xc9, 0x62, 0x39, 0x6e, 0x8d, 0x77, 0x20, 0x15, 0x60, 0x24, 0x43, 0xe9, 0x8e, 0x2c, + 0xf2, 0xc9, 0xa6, 0x21, 0x7a, 0x02, 0x15, 0x36, 0x0b, 0xec, 0xc5, 0x6a, 0x26, 0x5f, 0x9c, 0x89, + 0x6f, 0x85, 0xbe, 0x03, 0x7b, 0xd3, 0x68, 0x56, 0xd8, 0xa7, 0x2f, 0xf1, 0x8d, 0x26, 0xf4, 0x35, + 0x26, 0xc2, 0x97, 0x37, 0x39, 0xe5, 0x45, 0x01, 0x0e, 0xbd, 0x4e, 0x94, 0x78, 0x5d, 0x8f, 0x84, + 0xec, 0xad, 0xba, 0x9c, 0xc2, 0xb1, 0x9f, 0x16, 0xfe, 0x6b, 0xef, 0xf9, 0xe7, 0x97, 0x58, 0x1e, + 0xf4, 0x26, 0xfa, 0x6d, 0x95, 0x49, 0x4f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x9a, 0x6a, + 0xfb, 0x01, 0x05, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/test/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 0000000000..d2ffbd4e0a --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,306 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/monitored_resource.proto + +package monitoredres + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + _struct "github.com/golang/protobuf/ptypes/struct" + api "google.golang.org/genproto/googleapis/api" + label "google.golang.org/genproto/googleapis/api/label" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a +// type name and a set of labels. For example, the monitored resource +// descriptor for Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // The maximum length of this value is 256 characters. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // Optional. The launch stage of the monitored resource definition. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} } +func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceDescriptor) ProtoMessage() {} +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd8bd738b08f2bf, []int{0} +} + +func (m *MonitoredResourceDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResourceDescriptor.Unmarshal(m, b) +} +func (m *MonitoredResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResourceDescriptor.Marshal(b, m, deterministic) +} +func (m *MonitoredResourceDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResourceDescriptor.Merge(m, src) +} +func (m *MonitoredResourceDescriptor) XXX_Size() int { + return xxx_messageInfo_MonitoredResourceDescriptor.Size(m) +} +func (m *MonitoredResourceDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResourceDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResourceDescriptor proto.InternalMessageInfo + +func (m *MonitoredResourceDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { + if m != nil { + return m.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's +// schema. Information in the `labels` field identifies the actual resource and +// its attributes according to the schema. For example, a particular Compute +// Engine VM instance could be represented by the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels +// `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + // Required. The monitored resource type. This field must match + // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For + // example, the type of a Compute Engine VM instance is `gce_instance`. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResource) Reset() { *m = MonitoredResource{} } +func (m *MonitoredResource) String() string { return proto.CompactTextString(m) } +func (*MonitoredResource) ProtoMessage() {} +func (*MonitoredResource) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd8bd738b08f2bf, []int{1} +} + +func (m *MonitoredResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResource.Unmarshal(m, b) +} +func (m *MonitoredResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResource.Marshal(b, m, deterministic) +} +func (m *MonitoredResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResource.Merge(m, src) +} +func (m *MonitoredResource) XXX_Size() int { + return xxx_messageInfo_MonitoredResource.Size(m) +} +func (m *MonitoredResource) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResource.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResource proto.InternalMessageInfo + +func (m *MonitoredResource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. +// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to +// uniquely identify a monitored resource instance. There is some other useful +// auxiliary metadata. Monitoring and Logging use an ingestion +// pipeline to extract metadata for cloud resources of all types, and store +// the metadata in this message. +type MonitoredResourceMetadata struct { + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google, including + // "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + SystemLabels *_struct.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + // Output only. A map of user-defined metadata labels. + UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResourceMetadata) Reset() { *m = MonitoredResourceMetadata{} } +func (m *MonitoredResourceMetadata) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceMetadata) ProtoMessage() {} +func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd8bd738b08f2bf, []int{2} +} + +func (m *MonitoredResourceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResourceMetadata.Unmarshal(m, b) +} +func (m *MonitoredResourceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResourceMetadata.Marshal(b, m, deterministic) +} +func (m *MonitoredResourceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResourceMetadata.Merge(m, src) +} +func (m *MonitoredResourceMetadata) XXX_Size() int { + return xxx_messageInfo_MonitoredResourceMetadata.Size(m) +} +func (m *MonitoredResourceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResourceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResourceMetadata proto.InternalMessageInfo + +func (m *MonitoredResourceMetadata) GetSystemLabels() *_struct.Struct { + if m != nil { + return m.SystemLabels + } + return nil +} + +func (m *MonitoredResourceMetadata) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func init() { + proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor") + proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource") + proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResource.LabelsEntry") + proto.RegisterType((*MonitoredResourceMetadata)(nil), "google.api.MonitoredResourceMetadata") + proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResourceMetadata.UserLabelsEntry") +} + +func init() { + proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor_6cd8bd738b08f2bf) +} + +var fileDescriptor_6cd8bd738b08f2bf = []byte{ + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0x8b, 0xd4, 0x40, + 0x10, 0xa5, 0x67, 0x66, 0x57, 0xac, 0x8c, 0xab, 0x36, 0xb2, 0xc6, 0xac, 0x42, 0x1c, 0x2f, 0xe3, + 0x25, 0x81, 0x5d, 0x04, 0x5d, 0xf5, 0xb0, 0xab, 0x22, 0x82, 0x2b, 0x43, 0x16, 0x3d, 0x78, 0x09, + 0x3d, 0x49, 0x1b, 0x83, 0x49, 0x3a, 0x74, 0x77, 0x84, 0xfc, 0x1d, 0xc1, 0xff, 0xe7, 0x51, 0xf0, + 0x22, 0xfd, 0x91, 0x49, 0x62, 0x44, 0xf0, 0x56, 0xf5, 0xde, 0xab, 0xaa, 0xf7, 0xd2, 0x04, 0x1e, + 0x64, 0x8c, 0x65, 0x05, 0x0d, 0x49, 0x9d, 0x87, 0x25, 0xab, 0x72, 0xc9, 0x38, 0x4d, 0x63, 0x4e, + 0x05, 0x6b, 0x78, 0x42, 0x83, 0x9a, 0x33, 0xc9, 0x30, 0x18, 0x51, 0x40, 0xea, 0xdc, 0x3b, 0x1c, + 0x0c, 0x14, 0x64, 0x4b, 0x0b, 0xa3, 0xf1, 0xee, 0x8d, 0xf0, 0xa6, 0x4a, 0x3e, 0xc7, 0x42, 0x92, + 0xcc, 0xae, 0xf0, 0xee, 0x5a, 0x5a, 0x77, 0xdb, 0xe6, 0x53, 0x28, 0x24, 0x6f, 0x12, 0x69, 0xd8, + 0xd5, 0x2f, 0x04, 0x47, 0x17, 0xdd, 0xf5, 0xc8, 0x1e, 0x7f, 0x49, 0x45, 0xc2, 0xf3, 0x5a, 0x32, + 0x8e, 0x31, 0x2c, 0x2a, 0x52, 0x52, 0x77, 0xcf, 0x47, 0xeb, 0xab, 0x91, 0xae, 0x15, 0x26, 0xdb, + 0x9a, 0xba, 0xc8, 0x60, 0xaa, 0xc6, 0xf7, 0x61, 0x99, 0xe6, 0xa2, 0x2e, 0x48, 0x1b, 0x6b, 0xfd, + 0x4c, 0x73, 0x8e, 0xc5, 0xde, 0xa9, 0x31, 0x1f, 0x9c, 0xd4, 0x2e, 0xce, 0x59, 0xe5, 0xce, 0xad, + 0xa2, 0x87, 0xf0, 0x09, 0xec, 0xeb, 0x60, 0xc2, 0x5d, 0xf8, 0xf3, 0xb5, 0x73, 0x7c, 0x14, 0xf4, + 0xf1, 0x83, 0xb7, 0x8a, 0xe9, 0x9d, 0x45, 0x56, 0x8a, 0x4f, 0x61, 0x39, 0x4c, 0xed, 0x5e, 0xf1, + 0xd1, 0xfa, 0xe0, 0xf8, 0xf6, 0x78, 0x54, 0xf1, 0x97, 0x8a, 0x8e, 0x9c, 0xa2, 0x6f, 0x56, 0xdf, + 0x11, 0xdc, 0x9c, 0xa4, 0xff, 0x6b, 0xbe, 0xb3, 0x9d, 0xb5, 0x99, 0xb6, 0xf6, 0x70, 0xb8, 0x7f, + 0xb2, 0xc2, 0x98, 0x15, 0xaf, 0x2a, 0xc9, 0xdb, 0xce, 0xa8, 0xf7, 0x04, 0x9c, 0x01, 0x8c, 0x6f, + 0xc0, 0xfc, 0x0b, 0x6d, 0xed, 0x11, 0x55, 0xe2, 0x5b, 0xb0, 0xf7, 0x95, 0x14, 0x4d, 0xf7, 0xf1, + 0x4c, 0x73, 0x3a, 0x7b, 0x8c, 0x56, 0x3f, 0x10, 0xdc, 0x99, 0x1c, 0xb9, 0xa0, 0x92, 0xa4, 0x44, + 0x12, 0xfc, 0x0c, 0xae, 0x89, 0x56, 0x48, 0x5a, 0xc6, 0xd6, 0xa2, 0xda, 0xe9, 0xf4, 0x9f, 0xa0, + 0x7b, 0xf9, 0xe0, 0x52, 0xbf, 0x7c, 0xb4, 0x34, 0x6a, 0x63, 0x06, 0x7f, 0x00, 0xa7, 0x11, 0x94, + 0xc7, 0xa3, 0x78, 0x8f, 0xfe, 0x19, 0xaf, 0xbb, 0x1c, 0xbc, 0x17, 0x94, 0x0f, 0xa3, 0x42, 0xb3, + 0x03, 0xbc, 0xe7, 0x70, 0xfd, 0x0f, 0xfa, 0x7f, 0x22, 0x9f, 0xb7, 0x70, 0x90, 0xb0, 0x72, 0x60, + 0xe3, 0xfc, 0x70, 0xe2, 0x63, 0xa3, 0x82, 0x6d, 0xd0, 0xc7, 0x17, 0x56, 0x95, 0xb1, 0x82, 0x54, + 0x59, 0xc0, 0x78, 0x16, 0x66, 0xb4, 0xd2, 0xb1, 0x43, 0x43, 0x91, 0x3a, 0x17, 0xe3, 0x3f, 0x8d, + 0x53, 0xf1, 0x74, 0xd8, 0xfc, 0x44, 0xe8, 0xdb, 0x6c, 0xf1, 0xfa, 0x6c, 0xf3, 0x66, 0xbb, 0xaf, + 0x27, 0x4f, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xa6, 0xca, 0xf1, 0xa2, 0x03, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go new file mode 100644 index 0000000000..0b2647dab3 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go @@ -0,0 +1,9166 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/container/v1/cluster_service.proto + +package container + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Possible values for Effect in taint. +type NodeTaint_Effect int32 + +const ( + // Not set + NodeTaint_EFFECT_UNSPECIFIED NodeTaint_Effect = 0 + // NoSchedule + NodeTaint_NO_SCHEDULE NodeTaint_Effect = 1 + // PreferNoSchedule + NodeTaint_PREFER_NO_SCHEDULE NodeTaint_Effect = 2 + // NoExecute + NodeTaint_NO_EXECUTE NodeTaint_Effect = 3 +) + +var NodeTaint_Effect_name = map[int32]string{ + 0: "EFFECT_UNSPECIFIED", + 1: "NO_SCHEDULE", + 2: "PREFER_NO_SCHEDULE", + 3: "NO_EXECUTE", +} + +var NodeTaint_Effect_value = map[string]int32{ + "EFFECT_UNSPECIFIED": 0, + "NO_SCHEDULE": 1, + "PREFER_NO_SCHEDULE": 2, + "NO_EXECUTE": 3, +} + +func (x NodeTaint_Effect) String() string { + return proto.EnumName(NodeTaint_Effect_name, int32(x)) +} + +func (NodeTaint_Effect) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{2, 0} +} + +// Allowed Network Policy providers. +type NetworkPolicy_Provider int32 + +const ( + // Not set + NetworkPolicy_PROVIDER_UNSPECIFIED NetworkPolicy_Provider = 0 + // Tigera (Calico Felix). + NetworkPolicy_CALICO NetworkPolicy_Provider = 1 +) + +var NetworkPolicy_Provider_name = map[int32]string{ + 0: "PROVIDER_UNSPECIFIED", + 1: "CALICO", +} + +var NetworkPolicy_Provider_value = map[string]int32{ + "PROVIDER_UNSPECIFIED": 0, + "CALICO": 1, +} + +func (x NetworkPolicy_Provider) String() string { + return proto.EnumName(NetworkPolicy_Provider_name, int32(x)) +} + +func (NetworkPolicy_Provider) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{15, 0} +} + +// The current status of the cluster. +type Cluster_Status int32 + +const ( + // Not set. + Cluster_STATUS_UNSPECIFIED Cluster_Status = 0 + // The PROVISIONING state indicates the cluster is being created. + Cluster_PROVISIONING Cluster_Status = 1 + // The RUNNING state indicates the cluster has been created and is fully + // usable. + Cluster_RUNNING Cluster_Status = 2 + // The RECONCILING state indicates that some work is actively being done on + // the cluster, such as upgrading the master or node software. Details can + // be found in the `statusMessage` field. + Cluster_RECONCILING Cluster_Status = 3 + // The STOPPING state indicates the cluster is being deleted. + Cluster_STOPPING Cluster_Status = 4 + // The ERROR state indicates the cluster may be unusable. Details + // can be found in the `statusMessage` field. + Cluster_ERROR Cluster_Status = 5 + // The DEGRADED state indicates the cluster requires user action to restore + // full functionality. Details can be found in the `statusMessage` field. + Cluster_DEGRADED Cluster_Status = 6 +) + +var Cluster_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PROVISIONING", + 2: "RUNNING", + 3: "RECONCILING", + 4: "STOPPING", + 5: "ERROR", + 6: "DEGRADED", +} + +var Cluster_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PROVISIONING": 1, + "RUNNING": 2, + "RECONCILING": 3, + "STOPPING": 4, + "ERROR": 5, + "DEGRADED": 6, +} + +func (x Cluster_Status) String() string { + return proto.EnumName(Cluster_Status_name, int32(x)) +} + +func (Cluster_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{18, 0} +} + +// Current status of the operation. +type Operation_Status int32 + +const ( + // Not set. + Operation_STATUS_UNSPECIFIED Operation_Status = 0 + // The operation has been created. + Operation_PENDING Operation_Status = 1 + // The operation is currently running. + Operation_RUNNING Operation_Status = 2 + // The operation is done, either cancelled or completed. + Operation_DONE Operation_Status = 3 + // The operation is aborting. + Operation_ABORTING Operation_Status = 4 +) + +var Operation_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PENDING", + 2: "RUNNING", + 3: "DONE", + 4: "ABORTING", +} + +var Operation_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PENDING": 1, + "RUNNING": 2, + "DONE": 3, + "ABORTING": 4, +} + +func (x Operation_Status) String() string { + return proto.EnumName(Operation_Status_name, int32(x)) +} + +func (Operation_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{20, 0} +} + +// Operation type. +type Operation_Type int32 + +const ( + // Not set. + Operation_TYPE_UNSPECIFIED Operation_Type = 0 + // Cluster create. + Operation_CREATE_CLUSTER Operation_Type = 1 + // Cluster delete. + Operation_DELETE_CLUSTER Operation_Type = 2 + // A master upgrade. + Operation_UPGRADE_MASTER Operation_Type = 3 + // A node upgrade. + Operation_UPGRADE_NODES Operation_Type = 4 + // Cluster repair. + Operation_REPAIR_CLUSTER Operation_Type = 5 + // Cluster update. + Operation_UPDATE_CLUSTER Operation_Type = 6 + // Node pool create. + Operation_CREATE_NODE_POOL Operation_Type = 7 + // Node pool delete. + Operation_DELETE_NODE_POOL Operation_Type = 8 + // Set node pool management. + Operation_SET_NODE_POOL_MANAGEMENT Operation_Type = 9 + // Automatic node pool repair. + Operation_AUTO_REPAIR_NODES Operation_Type = 10 + // Automatic node upgrade. + Operation_AUTO_UPGRADE_NODES Operation_Type = 11 + // Set labels. + Operation_SET_LABELS Operation_Type = 12 + // Set/generate master auth materials + Operation_SET_MASTER_AUTH Operation_Type = 13 + // Set node pool size. + Operation_SET_NODE_POOL_SIZE Operation_Type = 14 + // Updates network policy for a cluster. + Operation_SET_NETWORK_POLICY Operation_Type = 15 + // Set the maintenance policy. + Operation_SET_MAINTENANCE_POLICY Operation_Type = 16 +) + +var Operation_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CREATE_CLUSTER", + 2: "DELETE_CLUSTER", + 3: "UPGRADE_MASTER", + 4: "UPGRADE_NODES", + 5: "REPAIR_CLUSTER", + 6: "UPDATE_CLUSTER", + 7: "CREATE_NODE_POOL", + 8: "DELETE_NODE_POOL", + 9: "SET_NODE_POOL_MANAGEMENT", + 10: "AUTO_REPAIR_NODES", + 11: "AUTO_UPGRADE_NODES", + 12: "SET_LABELS", + 13: "SET_MASTER_AUTH", + 14: "SET_NODE_POOL_SIZE", + 15: "SET_NETWORK_POLICY", + 16: "SET_MAINTENANCE_POLICY", +} + +var Operation_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CREATE_CLUSTER": 1, + "DELETE_CLUSTER": 2, + "UPGRADE_MASTER": 3, + "UPGRADE_NODES": 4, + "REPAIR_CLUSTER": 5, + "UPDATE_CLUSTER": 6, + "CREATE_NODE_POOL": 7, + "DELETE_NODE_POOL": 8, + "SET_NODE_POOL_MANAGEMENT": 9, + "AUTO_REPAIR_NODES": 10, + "AUTO_UPGRADE_NODES": 11, + "SET_LABELS": 12, + "SET_MASTER_AUTH": 13, + "SET_NODE_POOL_SIZE": 14, + "SET_NETWORK_POLICY": 15, + "SET_MAINTENANCE_POLICY": 16, +} + +func (x Operation_Type) String() string { + return proto.EnumName(Operation_Type_name, int32(x)) +} + +func (Operation_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{20, 1} +} + +// Operation type: what type update to perform. +type SetMasterAuthRequest_Action int32 + +const ( + // Operation is unknown and will error out. + SetMasterAuthRequest_UNKNOWN SetMasterAuthRequest_Action = 0 + // Set the password to a user generated value. + SetMasterAuthRequest_SET_PASSWORD SetMasterAuthRequest_Action = 1 + // Generate a new password and set it to that. + SetMasterAuthRequest_GENERATE_PASSWORD SetMasterAuthRequest_Action = 2 + // Set the username. If an empty username is provided, basic authentication + // is disabled for the cluster. If a non-empty username is provided, basic + // authentication is enabled, with either a provided password or a generated + // one. + SetMasterAuthRequest_SET_USERNAME SetMasterAuthRequest_Action = 3 +) + +var SetMasterAuthRequest_Action_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SET_PASSWORD", + 2: "GENERATE_PASSWORD", + 3: "SET_USERNAME", +} + +var SetMasterAuthRequest_Action_value = map[string]int32{ + "UNKNOWN": 0, + "SET_PASSWORD": 1, + "GENERATE_PASSWORD": 2, + "SET_USERNAME": 3, +} + +func (x SetMasterAuthRequest_Action) String() string { + return proto.EnumName(SetMasterAuthRequest_Action_name, int32(x)) +} + +func (SetMasterAuthRequest_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{31, 0} +} + +// The current status of the node pool instance. +type NodePool_Status int32 + +const ( + // Not set. + NodePool_STATUS_UNSPECIFIED NodePool_Status = 0 + // The PROVISIONING state indicates the node pool is being created. + NodePool_PROVISIONING NodePool_Status = 1 + // The RUNNING state indicates the node pool has been created + // and is fully usable. + NodePool_RUNNING NodePool_Status = 2 + // The RUNNING_WITH_ERROR state indicates the node pool has been created + // and is partially usable. Some error state has occurred and some + // functionality may be impaired. Customer may need to reissue a request + // or trigger a new update. + NodePool_RUNNING_WITH_ERROR NodePool_Status = 3 + // The RECONCILING state indicates that some work is actively being done on + // the node pool, such as upgrading node software. Details can + // be found in the `statusMessage` field. + NodePool_RECONCILING NodePool_Status = 4 + // The STOPPING state indicates the node pool is being deleted. + NodePool_STOPPING NodePool_Status = 5 + // The ERROR state indicates the node pool may be unusable. Details + // can be found in the `statusMessage` field. + NodePool_ERROR NodePool_Status = 6 +) + +var NodePool_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "PROVISIONING", + 2: "RUNNING", + 3: "RUNNING_WITH_ERROR", + 4: "RECONCILING", + 5: "STOPPING", + 6: "ERROR", +} + +var NodePool_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "PROVISIONING": 1, + "RUNNING": 2, + "RUNNING_WITH_ERROR": 3, + "RECONCILING": 4, + "STOPPING": 5, + "ERROR": 6, +} + +func (x NodePool_Status) String() string { + return proto.EnumName(NodePool_Status_name, int32(x)) +} + +func (NodePool_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{45, 0} +} + +// Code for each condition +type StatusCondition_Code int32 + +const ( + // UNKNOWN indicates a generic condition. + StatusCondition_UNKNOWN StatusCondition_Code = 0 + // GCE_STOCKOUT indicates a Google Compute Engine stockout. + StatusCondition_GCE_STOCKOUT StatusCondition_Code = 1 + // GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot + // service account. + StatusCondition_GKE_SERVICE_ACCOUNT_DELETED StatusCondition_Code = 2 + // Google Compute Engine quota was exceeded. + StatusCondition_GCE_QUOTA_EXCEEDED StatusCondition_Code = 3 + // Cluster state was manually changed by an SRE due to a system logic error. + StatusCondition_SET_BY_OPERATOR StatusCondition_Code = 4 + // Unable to perform an encrypt operation against the CloudKMS key used for + // etcd level encryption. + // More codes TBA + StatusCondition_CLOUD_KMS_KEY_ERROR StatusCondition_Code = 7 +) + +var StatusCondition_Code_name = map[int32]string{ + 0: "UNKNOWN", + 1: "GCE_STOCKOUT", + 2: "GKE_SERVICE_ACCOUNT_DELETED", + 3: "GCE_QUOTA_EXCEEDED", + 4: "SET_BY_OPERATOR", + 7: "CLOUD_KMS_KEY_ERROR", +} + +var StatusCondition_Code_value = map[string]int32{ + "UNKNOWN": 0, + "GCE_STOCKOUT": 1, + "GKE_SERVICE_ACCOUNT_DELETED": 2, + "GCE_QUOTA_EXCEEDED": 3, + "SET_BY_OPERATOR": 4, + "CLOUD_KMS_KEY_ERROR": 7, +} + +func (x StatusCondition_Code) String() string { + return proto.EnumName(StatusCondition_Code_name, int32(x)) +} + +func (StatusCondition_Code) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{68, 0} +} + +// State of etcd encryption. +type DatabaseEncryption_State int32 + +const ( + // Should never be set + DatabaseEncryption_UNKNOWN DatabaseEncryption_State = 0 + // Secrets in etcd are encrypted. + DatabaseEncryption_ENCRYPTED DatabaseEncryption_State = 1 + // Secrets in etcd are stored in plain text (at etcd level) - this is + // unrelated to GCE level full disk encryption. + DatabaseEncryption_DECRYPTED DatabaseEncryption_State = 2 +) + +var DatabaseEncryption_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ENCRYPTED", + 2: "DECRYPTED", +} + +var DatabaseEncryption_State_value = map[string]int32{ + "UNKNOWN": 0, + "ENCRYPTED": 1, + "DECRYPTED": 2, +} + +func (x DatabaseEncryption_State) String() string { + return proto.EnumName(DatabaseEncryption_State_name, int32(x)) +} + +func (DatabaseEncryption_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{72, 0} +} + +// Status shows the current usage of a secondary IP range. +type UsableSubnetworkSecondaryRange_Status int32 + +const ( + // UNKNOWN is the zero value of the Status enum. It's not a valid status. + UsableSubnetworkSecondaryRange_UNKNOWN UsableSubnetworkSecondaryRange_Status = 0 + // UNUSED denotes that this range is unclaimed by any cluster. + UsableSubnetworkSecondaryRange_UNUSED UsableSubnetworkSecondaryRange_Status = 1 + // IN_USE_SERVICE denotes that this range is claimed by a cluster for + // services. It cannot be used for other clusters. + UsableSubnetworkSecondaryRange_IN_USE_SERVICE UsableSubnetworkSecondaryRange_Status = 2 + // IN_USE_SHAREABLE_POD denotes this range was created by the network admin + // and is currently claimed by a cluster for pods. It can only be used by + // other clusters as a pod range. + UsableSubnetworkSecondaryRange_IN_USE_SHAREABLE_POD UsableSubnetworkSecondaryRange_Status = 3 + // IN_USE_MANAGED_POD denotes this range was created by GKE and is claimed + // for pods. It cannot be used for other clusters. + UsableSubnetworkSecondaryRange_IN_USE_MANAGED_POD UsableSubnetworkSecondaryRange_Status = 4 +) + +var UsableSubnetworkSecondaryRange_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNUSED", + 2: "IN_USE_SERVICE", + 3: "IN_USE_SHAREABLE_POD", + 4: "IN_USE_MANAGED_POD", +} + +var UsableSubnetworkSecondaryRange_Status_value = map[string]int32{ + "UNKNOWN": 0, + "UNUSED": 1, + "IN_USE_SERVICE": 2, + "IN_USE_SHAREABLE_POD": 3, + "IN_USE_MANAGED_POD": 4, +} + +func (x UsableSubnetworkSecondaryRange_Status) String() string { + return proto.EnumName(UsableSubnetworkSecondaryRange_Status_name, int32(x)) +} + +func (UsableSubnetworkSecondaryRange_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{75, 0} +} + +// Parameters that describe the nodes in a cluster. +type NodeConfig struct { + // The name of a Google Compute Engine [machine + // type](/compute/docs/machine-types) (e.g. + // `n1-standard-1`). + // + // If unspecified, the default machine type is + // `n1-standard-1`. + MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3" json:"machine_type,omitempty"` + // Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. + // + // If unspecified, the default disk size is 100GB. + DiskSizeGb int32 `protobuf:"varint,2,opt,name=disk_size_gb,json=diskSizeGb,proto3" json:"disk_size_gb,omitempty"` + // The set of Google API scopes to be made available on all of the + // node VMs under the "default" service account. + // + // The following scopes are recommended, but not required, and by default are + // not included: + // + // * `https://www.googleapis.com/auth/compute` is required for mounting + // persistent storage on your nodes. + // * `https://www.googleapis.com/auth/devstorage.read_only` is required for + // communicating with **gcr.io** + // (the [Google Container Registry](/container-registry/)). + // + // If unspecified, no scopes are added, unless Cloud Logging or Cloud + // Monitoring are enabled, in which case their required scopes will be added. + OauthScopes []string `protobuf:"bytes,3,rep,name=oauth_scopes,json=oauthScopes,proto3" json:"oauth_scopes,omitempty"` + // The Google Cloud Platform Service Account to be used by the node VMs. If + // no Service Account is specified, the "default" service account is used. + ServiceAccount string `protobuf:"bytes,9,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"` + // The metadata key/value pairs assigned to instances in the cluster. + // + // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes + // in length. These are reflected as part of a URL in the metadata server. + // Additionally, to avoid ambiguity, keys must not conflict with any other + // metadata keys for the project or be one of the reserved keys: + // "cluster-location" + // "cluster-name" + // "cluster-uid" + // "configure-sh" + // "containerd-configure-sh" + // "enable-os-login" + // "gci-update-strategy" + // "gci-ensure-gke-docker" + // "instance-template" + // "kube-env" + // "startup-script" + // "user-data" + // "disable-address-manager" + // "windows-startup-script-ps1" + // "common-psm1" + // "k8s-node-setup-psm1" + // "install-ssh-psm1" + // "user-profile-psm1" + // "serial-port-logging-enable" + // + // Values are free-form strings, and only have meaning as interpreted by + // the image running in the instance. The only restriction placed on them is + // that each value's size must be less than or equal to 32 KB. + // + // The total size of all keys and values must be less than 512 KB. + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The image type to use for this node. Note that for a given image type, + // the latest version of it will be used. + ImageType string `protobuf:"bytes,5,opt,name=image_type,json=imageType,proto3" json:"image_type,omitempty"` + // The map of Kubernetes labels (key/value pairs) to be applied to each node. + // These will added in addition to any default label(s) that + // Kubernetes may apply to the node. + // In case of conflict in label keys, the applied set may differ depending on + // the Kubernetes version -- it's best to assume the behavior is undefined + // and conflicts should be avoided. + // For more information, including usage and the valid values, see: + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of local SSD disks to be attached to the node. + // + // The limit for this value is dependent upon the maximum number of + // disks available on a machine per zone. See: + // https://cloud.google.com/compute/docs/disks/local-ssd + // for more information. + LocalSsdCount int32 `protobuf:"varint,7,opt,name=local_ssd_count,json=localSsdCount,proto3" json:"local_ssd_count,omitempty"` + // The list of instance tags applied to all nodes. Tags are used to identify + // valid sources or targets for network firewalls and are specified by + // the client during cluster or node pool creation. Each tag within the list + // must comply with RFC1035. + Tags []string `protobuf:"bytes,8,rep,name=tags,proto3" json:"tags,omitempty"` + // Whether the nodes are created as preemptible VM instances. See: + // https://cloud.google.com/compute/docs/instances/preemptible for more + // information about preemptible VM instances. + Preemptible bool `protobuf:"varint,10,opt,name=preemptible,proto3" json:"preemptible,omitempty"` + // A list of hardware accelerators to be attached to each node. + // See https://cloud.google.com/compute/docs/gpus for more information about + // support for GPUs. + Accelerators []*AcceleratorConfig `protobuf:"bytes,11,rep,name=accelerators,proto3" json:"accelerators,omitempty"` + // Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd') + // + // If unspecified, the default disk type is 'pd-standard' + DiskType string `protobuf:"bytes,12,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` + // Minimum CPU platform to be used by this instance. The instance may be + // scheduled on the specified or newer CPU platform. Applicable values are the + // friendly names of CPU platforms, such as + // minCpuPlatform: "Intel Haswell" or + // minCpuPlatform: "Intel Sandy Bridge". For more + // information, read [how to specify min CPU + // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) + MinCpuPlatform string `protobuf:"bytes,13,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"` + // List of kubernetes taints to be applied to each node. + // + // For more information, including usage and the valid values, see: + // https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + Taints []*NodeTaint `protobuf:"bytes,15,rep,name=taints,proto3" json:"taints,omitempty"` + // Shielded Instance options. + ShieldedInstanceConfig *ShieldedInstanceConfig `protobuf:"bytes,20,opt,name=shielded_instance_config,json=shieldedInstanceConfig,proto3" json:"shielded_instance_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeConfig) Reset() { *m = NodeConfig{} } +func (m *NodeConfig) String() string { return proto.CompactTextString(m) } +func (*NodeConfig) ProtoMessage() {} +func (*NodeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{0} +} + +func (m *NodeConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeConfig.Unmarshal(m, b) +} +func (m *NodeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeConfig.Marshal(b, m, deterministic) +} +func (m *NodeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfig.Merge(m, src) +} +func (m *NodeConfig) XXX_Size() int { + return xxx_messageInfo_NodeConfig.Size(m) +} +func (m *NodeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeConfig proto.InternalMessageInfo + +func (m *NodeConfig) GetMachineType() string { + if m != nil { + return m.MachineType + } + return "" +} + +func (m *NodeConfig) GetDiskSizeGb() int32 { + if m != nil { + return m.DiskSizeGb + } + return 0 +} + +func (m *NodeConfig) GetOauthScopes() []string { + if m != nil { + return m.OauthScopes + } + return nil +} + +func (m *NodeConfig) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *NodeConfig) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *NodeConfig) GetImageType() string { + if m != nil { + return m.ImageType + } + return "" +} + +func (m *NodeConfig) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NodeConfig) GetLocalSsdCount() int32 { + if m != nil { + return m.LocalSsdCount + } + return 0 +} + +func (m *NodeConfig) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *NodeConfig) GetPreemptible() bool { + if m != nil { + return m.Preemptible + } + return false +} + +func (m *NodeConfig) GetAccelerators() []*AcceleratorConfig { + if m != nil { + return m.Accelerators + } + return nil +} + +func (m *NodeConfig) GetDiskType() string { + if m != nil { + return m.DiskType + } + return "" +} + +func (m *NodeConfig) GetMinCpuPlatform() string { + if m != nil { + return m.MinCpuPlatform + } + return "" +} + +func (m *NodeConfig) GetTaints() []*NodeTaint { + if m != nil { + return m.Taints + } + return nil +} + +func (m *NodeConfig) GetShieldedInstanceConfig() *ShieldedInstanceConfig { + if m != nil { + return m.ShieldedInstanceConfig + } + return nil +} + +// A set of Shielded Instance options. +type ShieldedInstanceConfig struct { + // Defines whether the instance has Secure Boot enabled. + // + // Secure Boot helps ensure that the system only runs authentic software by + // verifying the digital signature of all boot components, and halting the + // boot process if signature verification fails. + EnableSecureBoot bool `protobuf:"varint,1,opt,name=enable_secure_boot,json=enableSecureBoot,proto3" json:"enable_secure_boot,omitempty"` + // Defines whether the instance has integrity monitoring enabled. + // + // Enables monitoring and attestation of the boot integrity of the instance. + // The attestation is performed against the integrity policy baseline. This + // baseline is initially derived from the implicitly trusted boot image when + // the instance is created. + EnableIntegrityMonitoring bool `protobuf:"varint,2,opt,name=enable_integrity_monitoring,json=enableIntegrityMonitoring,proto3" json:"enable_integrity_monitoring,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShieldedInstanceConfig) Reset() { *m = ShieldedInstanceConfig{} } +func (m *ShieldedInstanceConfig) String() string { return proto.CompactTextString(m) } +func (*ShieldedInstanceConfig) ProtoMessage() {} +func (*ShieldedInstanceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{1} +} + +func (m *ShieldedInstanceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShieldedInstanceConfig.Unmarshal(m, b) +} +func (m *ShieldedInstanceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShieldedInstanceConfig.Marshal(b, m, deterministic) +} +func (m *ShieldedInstanceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShieldedInstanceConfig.Merge(m, src) +} +func (m *ShieldedInstanceConfig) XXX_Size() int { + return xxx_messageInfo_ShieldedInstanceConfig.Size(m) +} +func (m *ShieldedInstanceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ShieldedInstanceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ShieldedInstanceConfig proto.InternalMessageInfo + +func (m *ShieldedInstanceConfig) GetEnableSecureBoot() bool { + if m != nil { + return m.EnableSecureBoot + } + return false +} + +func (m *ShieldedInstanceConfig) GetEnableIntegrityMonitoring() bool { + if m != nil { + return m.EnableIntegrityMonitoring + } + return false +} + +// Kubernetes taint is comprised of three fields: key, value, and effect. Effect +// can only be one of three types: NoSchedule, PreferNoSchedule or NoExecute. +// +// For more information, including usage and the valid values, see: +// https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +type NodeTaint struct { + // Key for taint. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Value for taint. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Effect for taint. + Effect NodeTaint_Effect `protobuf:"varint,3,opt,name=effect,proto3,enum=google.container.v1.NodeTaint_Effect" json:"effect,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeTaint) Reset() { *m = NodeTaint{} } +func (m *NodeTaint) String() string { return proto.CompactTextString(m) } +func (*NodeTaint) ProtoMessage() {} +func (*NodeTaint) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{2} +} + +func (m *NodeTaint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeTaint.Unmarshal(m, b) +} +func (m *NodeTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeTaint.Marshal(b, m, deterministic) +} +func (m *NodeTaint) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeTaint.Merge(m, src) +} +func (m *NodeTaint) XXX_Size() int { + return xxx_messageInfo_NodeTaint.Size(m) +} +func (m *NodeTaint) XXX_DiscardUnknown() { + xxx_messageInfo_NodeTaint.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeTaint proto.InternalMessageInfo + +func (m *NodeTaint) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *NodeTaint) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *NodeTaint) GetEffect() NodeTaint_Effect { + if m != nil { + return m.Effect + } + return NodeTaint_EFFECT_UNSPECIFIED +} + +// The authentication information for accessing the master endpoint. +// Authentication can be done using HTTP basic auth or using client +// certificates. +type MasterAuth struct { + // The username to use for HTTP basic authentication to the master endpoint. + // For clusters v1.6.0 and later, basic authentication can be disabled by + // leaving username unspecified (or setting it to the empty string). + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to use for HTTP basic authentication to the master endpoint. + // Because the master endpoint is open to the Internet, you should create a + // strong password. If a password is provided for cluster creation, username + // must be non-empty. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // Configuration for client certificate authentication on the cluster. For + // clusters before v1.12, if no configuration is specified, a client + // certificate is issued. + ClientCertificateConfig *ClientCertificateConfig `protobuf:"bytes,3,opt,name=client_certificate_config,json=clientCertificateConfig,proto3" json:"client_certificate_config,omitempty"` + // [Output only] Base64-encoded public certificate that is the root of + // trust for the cluster. + ClusterCaCertificate string `protobuf:"bytes,100,opt,name=cluster_ca_certificate,json=clusterCaCertificate,proto3" json:"cluster_ca_certificate,omitempty"` + // [Output only] Base64-encoded public certificate used by clients to + // authenticate to the cluster endpoint. + ClientCertificate string `protobuf:"bytes,101,opt,name=client_certificate,json=clientCertificate,proto3" json:"client_certificate,omitempty"` + // [Output only] Base64-encoded private key used by clients to authenticate + // to the cluster endpoint. + ClientKey string `protobuf:"bytes,102,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MasterAuth) Reset() { *m = MasterAuth{} } +func (m *MasterAuth) String() string { return proto.CompactTextString(m) } +func (*MasterAuth) ProtoMessage() {} +func (*MasterAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{3} +} + +func (m *MasterAuth) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MasterAuth.Unmarshal(m, b) +} +func (m *MasterAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MasterAuth.Marshal(b, m, deterministic) +} +func (m *MasterAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_MasterAuth.Merge(m, src) +} +func (m *MasterAuth) XXX_Size() int { + return xxx_messageInfo_MasterAuth.Size(m) +} +func (m *MasterAuth) XXX_DiscardUnknown() { + xxx_messageInfo_MasterAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_MasterAuth proto.InternalMessageInfo + +func (m *MasterAuth) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *MasterAuth) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *MasterAuth) GetClientCertificateConfig() *ClientCertificateConfig { + if m != nil { + return m.ClientCertificateConfig + } + return nil +} + +func (m *MasterAuth) GetClusterCaCertificate() string { + if m != nil { + return m.ClusterCaCertificate + } + return "" +} + +func (m *MasterAuth) GetClientCertificate() string { + if m != nil { + return m.ClientCertificate + } + return "" +} + +func (m *MasterAuth) GetClientKey() string { + if m != nil { + return m.ClientKey + } + return "" +} + +// Configuration for client certificates on the cluster. +type ClientCertificateConfig struct { + // Issue a client certificate. + IssueClientCertificate bool `protobuf:"varint,1,opt,name=issue_client_certificate,json=issueClientCertificate,proto3" json:"issue_client_certificate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientCertificateConfig) Reset() { *m = ClientCertificateConfig{} } +func (m *ClientCertificateConfig) String() string { return proto.CompactTextString(m) } +func (*ClientCertificateConfig) ProtoMessage() {} +func (*ClientCertificateConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{4} +} + +func (m *ClientCertificateConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientCertificateConfig.Unmarshal(m, b) +} +func (m *ClientCertificateConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientCertificateConfig.Marshal(b, m, deterministic) +} +func (m *ClientCertificateConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientCertificateConfig.Merge(m, src) +} +func (m *ClientCertificateConfig) XXX_Size() int { + return xxx_messageInfo_ClientCertificateConfig.Size(m) +} +func (m *ClientCertificateConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClientCertificateConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientCertificateConfig proto.InternalMessageInfo + +func (m *ClientCertificateConfig) GetIssueClientCertificate() bool { + if m != nil { + return m.IssueClientCertificate + } + return false +} + +// Configuration for the addons that can be automatically spun up in the +// cluster, enabling additional functionality. +type AddonsConfig struct { + // Configuration for the HTTP (L7) load balancing controller addon, which + // makes it easy to set up HTTP load balancers for services in a cluster. + HttpLoadBalancing *HttpLoadBalancing `protobuf:"bytes,1,opt,name=http_load_balancing,json=httpLoadBalancing,proto3" json:"http_load_balancing,omitempty"` + // Configuration for the horizontal pod autoscaling feature, which + // increases or decreases the number of replica pods a replication controller + // has based on the resource usage of the existing pods. + HorizontalPodAutoscaling *HorizontalPodAutoscaling `protobuf:"bytes,2,opt,name=horizontal_pod_autoscaling,json=horizontalPodAutoscaling,proto3" json:"horizontal_pod_autoscaling,omitempty"` + // Configuration for the Kubernetes Dashboard. + // This addon is deprecated, and will be disabled in 1.15. It is recommended + // to use the Cloud Console to manage and monitor your Kubernetes clusters, + // workloads and applications. For more information, see: + // https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards + KubernetesDashboard *KubernetesDashboard `protobuf:"bytes,3,opt,name=kubernetes_dashboard,json=kubernetesDashboard,proto3" json:"kubernetes_dashboard,omitempty"` // Deprecated: Do not use. + // Configuration for NetworkPolicy. This only tracks whether the addon + // is enabled or not on the Master, it does not track whether network policy + // is enabled for the nodes. + NetworkPolicyConfig *NetworkPolicyConfig `protobuf:"bytes,4,opt,name=network_policy_config,json=networkPolicyConfig,proto3" json:"network_policy_config,omitempty"` + // Configuration for the Cloud Run addon, which allows the user to use a + // managed Knative service. + CloudRunConfig *CloudRunConfig `protobuf:"bytes,7,opt,name=cloud_run_config,json=cloudRunConfig,proto3" json:"cloud_run_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddonsConfig) Reset() { *m = AddonsConfig{} } +func (m *AddonsConfig) String() string { return proto.CompactTextString(m) } +func (*AddonsConfig) ProtoMessage() {} +func (*AddonsConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{5} +} + +func (m *AddonsConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddonsConfig.Unmarshal(m, b) +} +func (m *AddonsConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddonsConfig.Marshal(b, m, deterministic) +} +func (m *AddonsConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddonsConfig.Merge(m, src) +} +func (m *AddonsConfig) XXX_Size() int { + return xxx_messageInfo_AddonsConfig.Size(m) +} +func (m *AddonsConfig) XXX_DiscardUnknown() { + xxx_messageInfo_AddonsConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_AddonsConfig proto.InternalMessageInfo + +func (m *AddonsConfig) GetHttpLoadBalancing() *HttpLoadBalancing { + if m != nil { + return m.HttpLoadBalancing + } + return nil +} + +func (m *AddonsConfig) GetHorizontalPodAutoscaling() *HorizontalPodAutoscaling { + if m != nil { + return m.HorizontalPodAutoscaling + } + return nil +} + +// Deprecated: Do not use. +func (m *AddonsConfig) GetKubernetesDashboard() *KubernetesDashboard { + if m != nil { + return m.KubernetesDashboard + } + return nil +} + +func (m *AddonsConfig) GetNetworkPolicyConfig() *NetworkPolicyConfig { + if m != nil { + return m.NetworkPolicyConfig + } + return nil +} + +func (m *AddonsConfig) GetCloudRunConfig() *CloudRunConfig { + if m != nil { + return m.CloudRunConfig + } + return nil +} + +// Configuration options for the HTTP (L7) load balancing controller addon, +// which makes it easy to set up HTTP load balancers for services in a cluster. +type HttpLoadBalancing struct { + // Whether the HTTP Load Balancing controller is enabled in the cluster. + // When enabled, it runs a small pod in the cluster that manages the load + // balancers. + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpLoadBalancing) Reset() { *m = HttpLoadBalancing{} } +func (m *HttpLoadBalancing) String() string { return proto.CompactTextString(m) } +func (*HttpLoadBalancing) ProtoMessage() {} +func (*HttpLoadBalancing) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{6} +} + +func (m *HttpLoadBalancing) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpLoadBalancing.Unmarshal(m, b) +} +func (m *HttpLoadBalancing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpLoadBalancing.Marshal(b, m, deterministic) +} +func (m *HttpLoadBalancing) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpLoadBalancing.Merge(m, src) +} +func (m *HttpLoadBalancing) XXX_Size() int { + return xxx_messageInfo_HttpLoadBalancing.Size(m) +} +func (m *HttpLoadBalancing) XXX_DiscardUnknown() { + xxx_messageInfo_HttpLoadBalancing.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpLoadBalancing proto.InternalMessageInfo + +func (m *HttpLoadBalancing) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration options for the horizontal pod autoscaling feature, which +// increases or decreases the number of replica pods a replication controller +// has based on the resource usage of the existing pods. +type HorizontalPodAutoscaling struct { + // Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. + // When enabled, it ensures that a Heapster pod is running in the cluster, + // which is also used by the Cloud Monitoring service. + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HorizontalPodAutoscaling) Reset() { *m = HorizontalPodAutoscaling{} } +func (m *HorizontalPodAutoscaling) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscaling) ProtoMessage() {} +func (*HorizontalPodAutoscaling) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{7} +} + +func (m *HorizontalPodAutoscaling) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HorizontalPodAutoscaling.Unmarshal(m, b) +} +func (m *HorizontalPodAutoscaling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HorizontalPodAutoscaling.Marshal(b, m, deterministic) +} +func (m *HorizontalPodAutoscaling) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaling.Merge(m, src) +} +func (m *HorizontalPodAutoscaling) XXX_Size() int { + return xxx_messageInfo_HorizontalPodAutoscaling.Size(m) +} +func (m *HorizontalPodAutoscaling) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaling.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaling proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaling) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration for the Kubernetes Dashboard. +type KubernetesDashboard struct { + // Whether the Kubernetes Dashboard is enabled for this cluster. + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} } +func (m *KubernetesDashboard) String() string { return proto.CompactTextString(m) } +func (*KubernetesDashboard) ProtoMessage() {} +func (*KubernetesDashboard) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{8} +} + +func (m *KubernetesDashboard) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KubernetesDashboard.Unmarshal(m, b) +} +func (m *KubernetesDashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KubernetesDashboard.Marshal(b, m, deterministic) +} +func (m *KubernetesDashboard) XXX_Merge(src proto.Message) { + xxx_messageInfo_KubernetesDashboard.Merge(m, src) +} +func (m *KubernetesDashboard) XXX_Size() int { + return xxx_messageInfo_KubernetesDashboard.Size(m) +} +func (m *KubernetesDashboard) XXX_DiscardUnknown() { + xxx_messageInfo_KubernetesDashboard.DiscardUnknown(m) +} + +var xxx_messageInfo_KubernetesDashboard proto.InternalMessageInfo + +func (m *KubernetesDashboard) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration for NetworkPolicy. This only tracks whether the addon +// is enabled or not on the Master, it does not track whether network policy +// is enabled for the nodes. +type NetworkPolicyConfig struct { + // Whether NetworkPolicy is enabled for this cluster. + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkPolicyConfig) Reset() { *m = NetworkPolicyConfig{} } +func (m *NetworkPolicyConfig) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicyConfig) ProtoMessage() {} +func (*NetworkPolicyConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{9} +} + +func (m *NetworkPolicyConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkPolicyConfig.Unmarshal(m, b) +} +func (m *NetworkPolicyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkPolicyConfig.Marshal(b, m, deterministic) +} +func (m *NetworkPolicyConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyConfig.Merge(m, src) +} +func (m *NetworkPolicyConfig) XXX_Size() int { + return xxx_messageInfo_NetworkPolicyConfig.Size(m) +} +func (m *NetworkPolicyConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyConfig proto.InternalMessageInfo + +func (m *NetworkPolicyConfig) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration options for private clusters. +type PrivateClusterConfig struct { + // Whether nodes have internal IP addresses only. If enabled, all nodes are + // given only RFC 1918 private addresses and communicate with the master via + // private networking. + EnablePrivateNodes bool `protobuf:"varint,1,opt,name=enable_private_nodes,json=enablePrivateNodes,proto3" json:"enable_private_nodes,omitempty"` + // Whether the master's internal IP address is used as the cluster endpoint. + EnablePrivateEndpoint bool `protobuf:"varint,2,opt,name=enable_private_endpoint,json=enablePrivateEndpoint,proto3" json:"enable_private_endpoint,omitempty"` + // The IP range in CIDR notation to use for the hosted master network. This + // range will be used for assigning internal IP addresses to the master or + // set of masters, as well as the ILB VIP. This range must not overlap with + // any other ranges in use within the cluster's network. + MasterIpv4CidrBlock string `protobuf:"bytes,3,opt,name=master_ipv4_cidr_block,json=masterIpv4CidrBlock,proto3" json:"master_ipv4_cidr_block,omitempty"` + // Output only. The internal IP address of this cluster's master endpoint. + PrivateEndpoint string `protobuf:"bytes,4,opt,name=private_endpoint,json=privateEndpoint,proto3" json:"private_endpoint,omitempty"` + // Output only. The external IP address of this cluster's master endpoint. + PublicEndpoint string `protobuf:"bytes,5,opt,name=public_endpoint,json=publicEndpoint,proto3" json:"public_endpoint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrivateClusterConfig) Reset() { *m = PrivateClusterConfig{} } +func (m *PrivateClusterConfig) String() string { return proto.CompactTextString(m) } +func (*PrivateClusterConfig) ProtoMessage() {} +func (*PrivateClusterConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{10} +} + +func (m *PrivateClusterConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrivateClusterConfig.Unmarshal(m, b) +} +func (m *PrivateClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrivateClusterConfig.Marshal(b, m, deterministic) +} +func (m *PrivateClusterConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrivateClusterConfig.Merge(m, src) +} +func (m *PrivateClusterConfig) XXX_Size() int { + return xxx_messageInfo_PrivateClusterConfig.Size(m) +} +func (m *PrivateClusterConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrivateClusterConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrivateClusterConfig proto.InternalMessageInfo + +func (m *PrivateClusterConfig) GetEnablePrivateNodes() bool { + if m != nil { + return m.EnablePrivateNodes + } + return false +} + +func (m *PrivateClusterConfig) GetEnablePrivateEndpoint() bool { + if m != nil { + return m.EnablePrivateEndpoint + } + return false +} + +func (m *PrivateClusterConfig) GetMasterIpv4CidrBlock() string { + if m != nil { + return m.MasterIpv4CidrBlock + } + return "" +} + +func (m *PrivateClusterConfig) GetPrivateEndpoint() string { + if m != nil { + return m.PrivateEndpoint + } + return "" +} + +func (m *PrivateClusterConfig) GetPublicEndpoint() string { + if m != nil { + return m.PublicEndpoint + } + return "" +} + +// Configuration for returning group information from authenticators. +type AuthenticatorGroupsConfig struct { + // Whether this cluster should return group membership lookups + // during authentication using a group of security groups. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // The name of the security group-of-groups to be used. Only relevant + // if enabled = true. + SecurityGroup string `protobuf:"bytes,2,opt,name=security_group,json=securityGroup,proto3" json:"security_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthenticatorGroupsConfig) Reset() { *m = AuthenticatorGroupsConfig{} } +func (m *AuthenticatorGroupsConfig) String() string { return proto.CompactTextString(m) } +func (*AuthenticatorGroupsConfig) ProtoMessage() {} +func (*AuthenticatorGroupsConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{11} +} + +func (m *AuthenticatorGroupsConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AuthenticatorGroupsConfig.Unmarshal(m, b) +} +func (m *AuthenticatorGroupsConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AuthenticatorGroupsConfig.Marshal(b, m, deterministic) +} +func (m *AuthenticatorGroupsConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthenticatorGroupsConfig.Merge(m, src) +} +func (m *AuthenticatorGroupsConfig) XXX_Size() int { + return xxx_messageInfo_AuthenticatorGroupsConfig.Size(m) +} +func (m *AuthenticatorGroupsConfig) XXX_DiscardUnknown() { + xxx_messageInfo_AuthenticatorGroupsConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthenticatorGroupsConfig proto.InternalMessageInfo + +func (m *AuthenticatorGroupsConfig) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *AuthenticatorGroupsConfig) GetSecurityGroup() string { + if m != nil { + return m.SecurityGroup + } + return "" +} + +// Configuration options for the Cloud Run feature. +type CloudRunConfig struct { + // Whether Cloud Run addon is enabled for this cluster. + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloudRunConfig) Reset() { *m = CloudRunConfig{} } +func (m *CloudRunConfig) String() string { return proto.CompactTextString(m) } +func (*CloudRunConfig) ProtoMessage() {} +func (*CloudRunConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{12} +} + +func (m *CloudRunConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloudRunConfig.Unmarshal(m, b) +} +func (m *CloudRunConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloudRunConfig.Marshal(b, m, deterministic) +} +func (m *CloudRunConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudRunConfig.Merge(m, src) +} +func (m *CloudRunConfig) XXX_Size() int { + return xxx_messageInfo_CloudRunConfig.Size(m) +} +func (m *CloudRunConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CloudRunConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudRunConfig proto.InternalMessageInfo + +func (m *CloudRunConfig) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// Configuration options for the master authorized networks feature. Enabled +// master authorized networks will disallow all external traffic to access +// Kubernetes master through HTTPS except traffic from the given CIDR blocks, +// Google Compute Engine Public IPs and Google Prod IPs. +type MasterAuthorizedNetworksConfig struct { + // Whether or not master authorized networks is enabled. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // cidr_blocks define up to 50 external networks that could access + // Kubernetes master through HTTPS. + CidrBlocks []*MasterAuthorizedNetworksConfig_CidrBlock `protobuf:"bytes,2,rep,name=cidr_blocks,json=cidrBlocks,proto3" json:"cidr_blocks,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MasterAuthorizedNetworksConfig) Reset() { *m = MasterAuthorizedNetworksConfig{} } +func (m *MasterAuthorizedNetworksConfig) String() string { return proto.CompactTextString(m) } +func (*MasterAuthorizedNetworksConfig) ProtoMessage() {} +func (*MasterAuthorizedNetworksConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{13} +} + +func (m *MasterAuthorizedNetworksConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MasterAuthorizedNetworksConfig.Unmarshal(m, b) +} +func (m *MasterAuthorizedNetworksConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MasterAuthorizedNetworksConfig.Marshal(b, m, deterministic) +} +func (m *MasterAuthorizedNetworksConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_MasterAuthorizedNetworksConfig.Merge(m, src) +} +func (m *MasterAuthorizedNetworksConfig) XXX_Size() int { + return xxx_messageInfo_MasterAuthorizedNetworksConfig.Size(m) +} +func (m *MasterAuthorizedNetworksConfig) XXX_DiscardUnknown() { + xxx_messageInfo_MasterAuthorizedNetworksConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_MasterAuthorizedNetworksConfig proto.InternalMessageInfo + +func (m *MasterAuthorizedNetworksConfig) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *MasterAuthorizedNetworksConfig) GetCidrBlocks() []*MasterAuthorizedNetworksConfig_CidrBlock { + if m != nil { + return m.CidrBlocks + } + return nil +} + +// CidrBlock contains an optional name and one CIDR block. +type MasterAuthorizedNetworksConfig_CidrBlock struct { + // display_name is an optional field for users to identify CIDR blocks. + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // cidr_block must be specified in CIDR notation. + CidrBlock string `protobuf:"bytes,2,opt,name=cidr_block,json=cidrBlock,proto3" json:"cidr_block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) Reset() { + *m = MasterAuthorizedNetworksConfig_CidrBlock{} +} +func (m *MasterAuthorizedNetworksConfig_CidrBlock) String() string { return proto.CompactTextString(m) } +func (*MasterAuthorizedNetworksConfig_CidrBlock) ProtoMessage() {} +func (*MasterAuthorizedNetworksConfig_CidrBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{13, 0} +} + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Unmarshal(m, b) +} +func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Marshal(b, m, deterministic) +} +func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Merge(m, src) +} +func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Size() int { + return xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Size(m) +} +func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_DiscardUnknown() { + xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock proto.InternalMessageInfo + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MasterAuthorizedNetworksConfig_CidrBlock) GetCidrBlock() string { + if m != nil { + return m.CidrBlock + } + return "" +} + +// Configuration for the legacy Attribute Based Access Control authorization +// mode. +type LegacyAbac struct { + // Whether the ABAC authorizer is enabled for this cluster. When enabled, + // identities in the system, including service accounts, nodes, and + // controllers, will have statically granted permissions beyond those + // provided by the RBAC configuration or IAM. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LegacyAbac) Reset() { *m = LegacyAbac{} } +func (m *LegacyAbac) String() string { return proto.CompactTextString(m) } +func (*LegacyAbac) ProtoMessage() {} +func (*LegacyAbac) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{14} +} + +func (m *LegacyAbac) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LegacyAbac.Unmarshal(m, b) +} +func (m *LegacyAbac) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LegacyAbac.Marshal(b, m, deterministic) +} +func (m *LegacyAbac) XXX_Merge(src proto.Message) { + xxx_messageInfo_LegacyAbac.Merge(m, src) +} +func (m *LegacyAbac) XXX_Size() int { + return xxx_messageInfo_LegacyAbac.Size(m) +} +func (m *LegacyAbac) XXX_DiscardUnknown() { + xxx_messageInfo_LegacyAbac.DiscardUnknown(m) +} + +var xxx_messageInfo_LegacyAbac proto.InternalMessageInfo + +func (m *LegacyAbac) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// Configuration options for the NetworkPolicy feature. +// https://kubernetes.io/docs/concepts/services-networking/networkpolicies/ +type NetworkPolicy struct { + // The selected network policy provider. + Provider NetworkPolicy_Provider `protobuf:"varint,1,opt,name=provider,proto3,enum=google.container.v1.NetworkPolicy_Provider" json:"provider,omitempty"` + // Whether network policy is enabled on the cluster. + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{15} +} + +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkPolicy.Unmarshal(m, b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkPolicy.Marshal(b, m, deterministic) +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return xxx_messageInfo_NetworkPolicy.Size(m) +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicy) GetProvider() NetworkPolicy_Provider { + if m != nil { + return m.Provider + } + return NetworkPolicy_PROVIDER_UNSPECIFIED +} + +func (m *NetworkPolicy) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// Configuration for Binary Authorization. +type BinaryAuthorization struct { + // Enable Binary Authorization for this cluster. If enabled, all container + // images will be validated by Binary Authorization. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BinaryAuthorization) Reset() { *m = BinaryAuthorization{} } +func (m *BinaryAuthorization) String() string { return proto.CompactTextString(m) } +func (*BinaryAuthorization) ProtoMessage() {} +func (*BinaryAuthorization) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{16} +} + +func (m *BinaryAuthorization) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BinaryAuthorization.Unmarshal(m, b) +} +func (m *BinaryAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BinaryAuthorization.Marshal(b, m, deterministic) +} +func (m *BinaryAuthorization) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryAuthorization.Merge(m, src) +} +func (m *BinaryAuthorization) XXX_Size() int { + return xxx_messageInfo_BinaryAuthorization.Size(m) +} +func (m *BinaryAuthorization) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryAuthorization.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryAuthorization proto.InternalMessageInfo + +func (m *BinaryAuthorization) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// Configuration for controlling how IPs are allocated in the cluster. +type IPAllocationPolicy struct { + // Whether alias IPs will be used for pod IPs in the cluster. + UseIpAliases bool `protobuf:"varint,1,opt,name=use_ip_aliases,json=useIpAliases,proto3" json:"use_ip_aliases,omitempty"` + // Whether a new subnetwork will be created automatically for the cluster. + // + // This field is only applicable when `use_ip_aliases` is true. + CreateSubnetwork bool `protobuf:"varint,2,opt,name=create_subnetwork,json=createSubnetwork,proto3" json:"create_subnetwork,omitempty"` + // A custom subnetwork name to be used if `create_subnetwork` is true. If + // this field is empty, then an automatic name will be chosen for the new + // subnetwork. + SubnetworkName string `protobuf:"bytes,3,opt,name=subnetwork_name,json=subnetworkName,proto3" json:"subnetwork_name,omitempty"` + // This field is deprecated, use cluster_ipv4_cidr_block. + ClusterIpv4Cidr string `protobuf:"bytes,4,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr,proto3" json:"cluster_ipv4_cidr,omitempty"` // Deprecated: Do not use. + // This field is deprecated, use node_ipv4_cidr_block. + NodeIpv4Cidr string `protobuf:"bytes,5,opt,name=node_ipv4_cidr,json=nodeIpv4Cidr,proto3" json:"node_ipv4_cidr,omitempty"` // Deprecated: Do not use. + // This field is deprecated, use services_ipv4_cidr_block. + ServicesIpv4Cidr string `protobuf:"bytes,6,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr,proto3" json:"services_ipv4_cidr,omitempty"` // Deprecated: Do not use. + // The name of the secondary range to be used for the cluster CIDR + // block. The secondary range will be used for pod IP + // addresses. This must be an existing secondary range associated + // with the cluster subnetwork. + // + // This field is only applicable with use_ip_aliases is true and + // create_subnetwork is false. + ClusterSecondaryRangeName string `protobuf:"bytes,7,opt,name=cluster_secondary_range_name,json=clusterSecondaryRangeName,proto3" json:"cluster_secondary_range_name,omitempty"` + // The name of the secondary range to be used as for the services + // CIDR block. The secondary range will be used for service + // ClusterIPs. This must be an existing secondary range associated + // with the cluster subnetwork. + // + // This field is only applicable with use_ip_aliases is true and + // create_subnetwork is false. + ServicesSecondaryRangeName string `protobuf:"bytes,8,opt,name=services_secondary_range_name,json=servicesSecondaryRangeName,proto3" json:"services_secondary_range_name,omitempty"` + // The IP address range for the cluster pod IPs. If this field is set, then + // `cluster.cluster_ipv4_cidr` must be left blank. + // + // This field is only applicable when `use_ip_aliases` is true. + // + // Set to blank to have a range chosen with the default size. + // + // Set to /netmask (e.g. `/14`) to have a range chosen with a specific + // netmask. + // + // Set to a + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + ClusterIpv4CidrBlock string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr_block,json=clusterIpv4CidrBlock,proto3" json:"cluster_ipv4_cidr_block,omitempty"` + // The IP address range of the instance IPs in this cluster. + // + // This is applicable only if `create_subnetwork` is true. + // + // Set to blank to have a range chosen with the default size. + // + // Set to /netmask (e.g. `/14`) to have a range chosen with a specific + // netmask. + // + // Set to a + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + NodeIpv4CidrBlock string `protobuf:"bytes,10,opt,name=node_ipv4_cidr_block,json=nodeIpv4CidrBlock,proto3" json:"node_ipv4_cidr_block,omitempty"` + // The IP address range of the services IPs in this cluster. If blank, a range + // will be automatically chosen with the default size. + // + // This field is only applicable when `use_ip_aliases` is true. + // + // Set to blank to have a range chosen with the default size. + // + // Set to /netmask (e.g. `/14`) to have a range chosen with a specific + // netmask. + // + // Set to a + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + ServicesIpv4CidrBlock string `protobuf:"bytes,11,opt,name=services_ipv4_cidr_block,json=servicesIpv4CidrBlock,proto3" json:"services_ipv4_cidr_block,omitempty"` + // The IP address range of the Cloud TPUs in this cluster. If unspecified, a + // range will be automatically chosen with the default size. + // + // This field is only applicable when `use_ip_aliases` is true. + // + // If unspecified, the range will use the default size. + // + // Set to /netmask (e.g. `/14`) to have a range chosen with a specific + // netmask. + // + // Set to a + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g. + // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range + // to use. + TpuIpv4CidrBlock string `protobuf:"bytes,13,opt,name=tpu_ipv4_cidr_block,json=tpuIpv4CidrBlock,proto3" json:"tpu_ipv4_cidr_block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IPAllocationPolicy) Reset() { *m = IPAllocationPolicy{} } +func (m *IPAllocationPolicy) String() string { return proto.CompactTextString(m) } +func (*IPAllocationPolicy) ProtoMessage() {} +func (*IPAllocationPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{17} +} + +func (m *IPAllocationPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IPAllocationPolicy.Unmarshal(m, b) +} +func (m *IPAllocationPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IPAllocationPolicy.Marshal(b, m, deterministic) +} +func (m *IPAllocationPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAllocationPolicy.Merge(m, src) +} +func (m *IPAllocationPolicy) XXX_Size() int { + return xxx_messageInfo_IPAllocationPolicy.Size(m) +} +func (m *IPAllocationPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_IPAllocationPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAllocationPolicy proto.InternalMessageInfo + +func (m *IPAllocationPolicy) GetUseIpAliases() bool { + if m != nil { + return m.UseIpAliases + } + return false +} + +func (m *IPAllocationPolicy) GetCreateSubnetwork() bool { + if m != nil { + return m.CreateSubnetwork + } + return false +} + +func (m *IPAllocationPolicy) GetSubnetworkName() string { + if m != nil { + return m.SubnetworkName + } + return "" +} + +// Deprecated: Do not use. +func (m *IPAllocationPolicy) GetClusterIpv4Cidr() string { + if m != nil { + return m.ClusterIpv4Cidr + } + return "" +} + +// Deprecated: Do not use. +func (m *IPAllocationPolicy) GetNodeIpv4Cidr() string { + if m != nil { + return m.NodeIpv4Cidr + } + return "" +} + +// Deprecated: Do not use. +func (m *IPAllocationPolicy) GetServicesIpv4Cidr() string { + if m != nil { + return m.ServicesIpv4Cidr + } + return "" +} + +func (m *IPAllocationPolicy) GetClusterSecondaryRangeName() string { + if m != nil { + return m.ClusterSecondaryRangeName + } + return "" +} + +func (m *IPAllocationPolicy) GetServicesSecondaryRangeName() string { + if m != nil { + return m.ServicesSecondaryRangeName + } + return "" +} + +func (m *IPAllocationPolicy) GetClusterIpv4CidrBlock() string { + if m != nil { + return m.ClusterIpv4CidrBlock + } + return "" +} + +func (m *IPAllocationPolicy) GetNodeIpv4CidrBlock() string { + if m != nil { + return m.NodeIpv4CidrBlock + } + return "" +} + +func (m *IPAllocationPolicy) GetServicesIpv4CidrBlock() string { + if m != nil { + return m.ServicesIpv4CidrBlock + } + return "" +} + +func (m *IPAllocationPolicy) GetTpuIpv4CidrBlock() string { + if m != nil { + return m.TpuIpv4CidrBlock + } + return "" +} + +// A Google Kubernetes Engine cluster. +type Cluster struct { + // The name of this cluster. The name must be unique within this project + // and location (e.g. zone or region), and can be up to 40 characters with + // the following restrictions: + // + // * Lowercase letters, numbers, and hyphens only. + // * Must start with a letter. + // * Must end with a number or a letter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // An optional description of this cluster. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The number of nodes to create in this cluster. You must ensure that your + // Compute Engine resource quota + // is sufficient for this number of instances. You must also have available + // firewall and routes quota. + // For requests, this field should only be used in lieu of a + // "node_pool" object, since this configuration (along with the + // "node_config") will be used to create a "NodePool" object with an + // auto-generated name. Do not use this and a node_pool at the same time. + // + // This field is deprecated, use node_pool.initial_node_count instead. + InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount,proto3" json:"initial_node_count,omitempty"` // Deprecated: Do not use. + // Parameters used in creating the cluster's nodes. + // For requests, this field should only be used in lieu of a + // "node_pool" object, since this configuration (along with the + // "initial_node_count") will be used to create a "NodePool" object with an + // auto-generated name. Do not use this and a node_pool at the same time. + // For responses, this field will be populated with the node configuration of + // the first node pool. (For configuration of each node pool, see + // `node_pool.config`) + // + // If unspecified, the defaults are used. + // This field is deprecated, use node_pool.config instead. + NodeConfig *NodeConfig `protobuf:"bytes,4,opt,name=node_config,json=nodeConfig,proto3" json:"node_config,omitempty"` // Deprecated: Do not use. + // The authentication information for accessing the master endpoint. + // If unspecified, the defaults are used: + // For clusters before v1.12, if master_auth is unspecified, `username` will + // be set to "admin", a random password will be generated, and a client + // certificate will be issued. + MasterAuth *MasterAuth `protobuf:"bytes,5,opt,name=master_auth,json=masterAuth,proto3" json:"master_auth,omitempty"` + // The logging service the cluster should use to write logs. + // Currently available options: + // + // * "logging.googleapis.com/kubernetes" - the Google Cloud Logging + // service with Kubernetes-native resource model + // * `logging.googleapis.com` - the Google Cloud Logging service. + // * `none` - no logs will be exported from the cluster. + // * if left as an empty string,`logging.googleapis.com` will be used. + LoggingService string `protobuf:"bytes,6,opt,name=logging_service,json=loggingService,proto3" json:"logging_service,omitempty"` + // The monitoring service the cluster should use to write metrics. + // Currently available options: + // + // * `monitoring.googleapis.com` - the Google Cloud Monitoring service. + // * `none` - no metrics will be exported from the cluster. + // * if left as an empty string, `monitoring.googleapis.com` will be used. + MonitoringService string `protobuf:"bytes,7,opt,name=monitoring_service,json=monitoringService,proto3" json:"monitoring_service,omitempty"` + // The name of the Google Compute Engine + // [network](/compute/docs/networks-and-firewalls#networks) to which the + // cluster is connected. If left unspecified, the `default` network + // will be used. + Network string `protobuf:"bytes,8,opt,name=network,proto3" json:"network,omitempty"` + // The IP address range of the container pods in this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`). Leave blank to have + // one automatically chosen or specify a `/14` block in `10.0.0.0/8`. + ClusterIpv4Cidr string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr,proto3" json:"cluster_ipv4_cidr,omitempty"` + // Configurations for the various addons available to run in the cluster. + AddonsConfig *AddonsConfig `protobuf:"bytes,10,opt,name=addons_config,json=addonsConfig,proto3" json:"addons_config,omitempty"` + // The name of the Google Compute Engine + // [subnetwork](/compute/docs/subnetworks) to which the + // cluster is connected. + Subnetwork string `protobuf:"bytes,11,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"` + // The node pools associated with this cluster. + // This field should not be set if "node_config" or "initial_node_count" are + // specified. + NodePools []*NodePool `protobuf:"bytes,12,rep,name=node_pools,json=nodePools,proto3" json:"node_pools,omitempty"` + // The list of Google Compute Engine + // [zones](/compute/docs/zones#available) in which the cluster's nodes + // should be located. + Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"` + // Kubernetes alpha features are enabled on this cluster. This includes alpha + // API groups (e.g. v1alpha1) and features that may not be production ready in + // the kubernetes version of the master and nodes. + // The cluster has no SLA for uptime and master/node upgrades are disabled. + // Alpha enabled clusters are automatically deleted thirty days after + // creation. + EnableKubernetesAlpha bool `protobuf:"varint,14,opt,name=enable_kubernetes_alpha,json=enableKubernetesAlpha,proto3" json:"enable_kubernetes_alpha,omitempty"` + // The resource labels for the cluster to use to annotate any related + // Google Compute Engine resources. + ResourceLabels map[string]string `protobuf:"bytes,15,rep,name=resource_labels,json=resourceLabels,proto3" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The fingerprint of the set of labels for this cluster. + LabelFingerprint string `protobuf:"bytes,16,opt,name=label_fingerprint,json=labelFingerprint,proto3" json:"label_fingerprint,omitempty"` + // Configuration for the legacy ABAC authorization mode. + LegacyAbac *LegacyAbac `protobuf:"bytes,18,opt,name=legacy_abac,json=legacyAbac,proto3" json:"legacy_abac,omitempty"` + // Configuration options for the NetworkPolicy feature. + NetworkPolicy *NetworkPolicy `protobuf:"bytes,19,opt,name=network_policy,json=networkPolicy,proto3" json:"network_policy,omitempty"` + // Configuration for cluster IP allocation. + IpAllocationPolicy *IPAllocationPolicy `protobuf:"bytes,20,opt,name=ip_allocation_policy,json=ipAllocationPolicy,proto3" json:"ip_allocation_policy,omitempty"` + // The configuration options for master authorized networks feature. + MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,22,opt,name=master_authorized_networks_config,json=masterAuthorizedNetworksConfig,proto3" json:"master_authorized_networks_config,omitempty"` + // Configure the maintenance policy for this cluster. + MaintenancePolicy *MaintenancePolicy `protobuf:"bytes,23,opt,name=maintenance_policy,json=maintenancePolicy,proto3" json:"maintenance_policy,omitempty"` + // Configuration for Binary Authorization. + BinaryAuthorization *BinaryAuthorization `protobuf:"bytes,24,opt,name=binary_authorization,json=binaryAuthorization,proto3" json:"binary_authorization,omitempty"` + // Cluster-level autoscaling configuration. + Autoscaling *ClusterAutoscaling `protobuf:"bytes,26,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"` + // Configuration for cluster networking. + NetworkConfig *NetworkConfig `protobuf:"bytes,27,opt,name=network_config,json=networkConfig,proto3" json:"network_config,omitempty"` + // The default constraint on the maximum number of pods that can be run + // simultaneously on a node in the node pool of this cluster. Only honored + // if cluster created with IP Alias support. + DefaultMaxPodsConstraint *MaxPodsConstraint `protobuf:"bytes,30,opt,name=default_max_pods_constraint,json=defaultMaxPodsConstraint,proto3" json:"default_max_pods_constraint,omitempty"` + // Configuration for exporting resource usages. Resource usage export is + // disabled when this config is unspecified. + ResourceUsageExportConfig *ResourceUsageExportConfig `protobuf:"bytes,33,opt,name=resource_usage_export_config,json=resourceUsageExportConfig,proto3" json:"resource_usage_export_config,omitempty"` + // Configuration controlling RBAC group membership information. + AuthenticatorGroupsConfig *AuthenticatorGroupsConfig `protobuf:"bytes,34,opt,name=authenticator_groups_config,json=authenticatorGroupsConfig,proto3" json:"authenticator_groups_config,omitempty"` + // Configuration for private cluster. + PrivateClusterConfig *PrivateClusterConfig `protobuf:"bytes,37,opt,name=private_cluster_config,json=privateClusterConfig,proto3" json:"private_cluster_config,omitempty"` + // Configuration of etcd encryption. + DatabaseEncryption *DatabaseEncryption `protobuf:"bytes,38,opt,name=database_encryption,json=databaseEncryption,proto3" json:"database_encryption,omitempty"` + // Cluster-level Vertical Pod Autoscaling configuration. + VerticalPodAutoscaling *VerticalPodAutoscaling `protobuf:"bytes,39,opt,name=vertical_pod_autoscaling,json=verticalPodAutoscaling,proto3" json:"vertical_pod_autoscaling,omitempty"` + // [Output only] Server-defined URL for the resource. + SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"` + // [Output only] The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field is deprecated, use location instead. + Zone string `protobuf:"bytes,101,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // [Output only] The IP address of this cluster's master endpoint. + // The endpoint can be accessed from the internet at + // `https://username:password@endpoint/`. + // + // See the `masterAuth` property of this resource for username and + // password information. + Endpoint string `protobuf:"bytes,102,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // The initial Kubernetes version for this cluster. Valid versions are those + // found in validMasterVersions returned by getServerConfig. The version can + // be upgraded over time; such upgrades are reflected in + // currentMasterVersion and currentNodeVersion. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "","-": picks the default Kubernetes version + InitialClusterVersion string `protobuf:"bytes,103,opt,name=initial_cluster_version,json=initialClusterVersion,proto3" json:"initial_cluster_version,omitempty"` + // [Output only] The current software version of the master endpoint. + CurrentMasterVersion string `protobuf:"bytes,104,opt,name=current_master_version,json=currentMasterVersion,proto3" json:"current_master_version,omitempty"` + // [Output only] Deprecated, use + // [NodePools.version](/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools) + // instead. The current version of the node software components. If they are + // currently at multiple versions because they're in the process of being + // upgraded, this reflects the minimum version of all nodes. + CurrentNodeVersion string `protobuf:"bytes,105,opt,name=current_node_version,json=currentNodeVersion,proto3" json:"current_node_version,omitempty"` // Deprecated: Do not use. + // [Output only] The time the cluster was created, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreateTime string `protobuf:"bytes,106,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // [Output only] The current status of this cluster. + Status Cluster_Status `protobuf:"varint,107,opt,name=status,proto3,enum=google.container.v1.Cluster_Status" json:"status,omitempty"` + // [Output only] Additional information about the current status of this + // cluster, if available. + StatusMessage string `protobuf:"bytes,108,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // [Output only] The size of the address space on each node for hosting + // containers. This is provisioned from within the `container_ipv4_cidr` + // range. This field will only be set when cluster is in route-based network + // mode. + NodeIpv4CidrSize int32 `protobuf:"varint,109,opt,name=node_ipv4_cidr_size,json=nodeIpv4CidrSize,proto3" json:"node_ipv4_cidr_size,omitempty"` + // [Output only] The IP address range of the Kubernetes services in + // this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `1.2.3.4/29`). Service addresses are + // typically put in the last `/16` from the container CIDR. + ServicesIpv4Cidr string `protobuf:"bytes,110,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr,proto3" json:"services_ipv4_cidr,omitempty"` + // Deprecated. Use node_pools.instance_group_urls. + InstanceGroupUrls []string `protobuf:"bytes,111,rep,name=instance_group_urls,json=instanceGroupUrls,proto3" json:"instance_group_urls,omitempty"` // Deprecated: Do not use. + // [Output only] The number of nodes currently in the cluster. Deprecated. + // Call Kubernetes API directly to retrieve node information. + CurrentNodeCount int32 `protobuf:"varint,112,opt,name=current_node_count,json=currentNodeCount,proto3" json:"current_node_count,omitempty"` // Deprecated: Do not use. + // [Output only] The time the cluster will be automatically + // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + ExpireTime string `protobuf:"bytes,113,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + // [Output only] The name of the Google Compute Engine + // [zone](/compute/docs/regions-zones/regions-zones#available) or + // [region](/compute/docs/regions-zones/regions-zones#available) in which + // the cluster resides. + Location string `protobuf:"bytes,114,opt,name=location,proto3" json:"location,omitempty"` + // Enable the ability to use Cloud TPUs in this cluster. + EnableTpu bool `protobuf:"varint,115,opt,name=enable_tpu,json=enableTpu,proto3" json:"enable_tpu,omitempty"` + // [Output only] The IP address range of the Cloud TPUs in this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `1.2.3.4/29`). + TpuIpv4CidrBlock string `protobuf:"bytes,116,opt,name=tpu_ipv4_cidr_block,json=tpuIpv4CidrBlock,proto3" json:"tpu_ipv4_cidr_block,omitempty"` + // Which conditions caused the current cluster state. + Conditions []*StatusCondition `protobuf:"bytes,118,rep,name=conditions,proto3" json:"conditions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{18} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Cluster) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// Deprecated: Do not use. +func (m *Cluster) GetInitialNodeCount() int32 { + if m != nil { + return m.InitialNodeCount + } + return 0 +} + +// Deprecated: Do not use. +func (m *Cluster) GetNodeConfig() *NodeConfig { + if m != nil { + return m.NodeConfig + } + return nil +} + +func (m *Cluster) GetMasterAuth() *MasterAuth { + if m != nil { + return m.MasterAuth + } + return nil +} + +func (m *Cluster) GetLoggingService() string { + if m != nil { + return m.LoggingService + } + return "" +} + +func (m *Cluster) GetMonitoringService() string { + if m != nil { + return m.MonitoringService + } + return "" +} + +func (m *Cluster) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *Cluster) GetClusterIpv4Cidr() string { + if m != nil { + return m.ClusterIpv4Cidr + } + return "" +} + +func (m *Cluster) GetAddonsConfig() *AddonsConfig { + if m != nil { + return m.AddonsConfig + } + return nil +} + +func (m *Cluster) GetSubnetwork() string { + if m != nil { + return m.Subnetwork + } + return "" +} + +func (m *Cluster) GetNodePools() []*NodePool { + if m != nil { + return m.NodePools + } + return nil +} + +func (m *Cluster) GetLocations() []string { + if m != nil { + return m.Locations + } + return nil +} + +func (m *Cluster) GetEnableKubernetesAlpha() bool { + if m != nil { + return m.EnableKubernetesAlpha + } + return false +} + +func (m *Cluster) GetResourceLabels() map[string]string { + if m != nil { + return m.ResourceLabels + } + return nil +} + +func (m *Cluster) GetLabelFingerprint() string { + if m != nil { + return m.LabelFingerprint + } + return "" +} + +func (m *Cluster) GetLegacyAbac() *LegacyAbac { + if m != nil { + return m.LegacyAbac + } + return nil +} + +func (m *Cluster) GetNetworkPolicy() *NetworkPolicy { + if m != nil { + return m.NetworkPolicy + } + return nil +} + +func (m *Cluster) GetIpAllocationPolicy() *IPAllocationPolicy { + if m != nil { + return m.IpAllocationPolicy + } + return nil +} + +func (m *Cluster) GetMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig { + if m != nil { + return m.MasterAuthorizedNetworksConfig + } + return nil +} + +func (m *Cluster) GetMaintenancePolicy() *MaintenancePolicy { + if m != nil { + return m.MaintenancePolicy + } + return nil +} + +func (m *Cluster) GetBinaryAuthorization() *BinaryAuthorization { + if m != nil { + return m.BinaryAuthorization + } + return nil +} + +func (m *Cluster) GetAutoscaling() *ClusterAutoscaling { + if m != nil { + return m.Autoscaling + } + return nil +} + +func (m *Cluster) GetNetworkConfig() *NetworkConfig { + if m != nil { + return m.NetworkConfig + } + return nil +} + +func (m *Cluster) GetDefaultMaxPodsConstraint() *MaxPodsConstraint { + if m != nil { + return m.DefaultMaxPodsConstraint + } + return nil +} + +func (m *Cluster) GetResourceUsageExportConfig() *ResourceUsageExportConfig { + if m != nil { + return m.ResourceUsageExportConfig + } + return nil +} + +func (m *Cluster) GetAuthenticatorGroupsConfig() *AuthenticatorGroupsConfig { + if m != nil { + return m.AuthenticatorGroupsConfig + } + return nil +} + +func (m *Cluster) GetPrivateClusterConfig() *PrivateClusterConfig { + if m != nil { + return m.PrivateClusterConfig + } + return nil +} + +func (m *Cluster) GetDatabaseEncryption() *DatabaseEncryption { + if m != nil { + return m.DatabaseEncryption + } + return nil +} + +func (m *Cluster) GetVerticalPodAutoscaling() *VerticalPodAutoscaling { + if m != nil { + return m.VerticalPodAutoscaling + } + return nil +} + +func (m *Cluster) GetSelfLink() string { + if m != nil { + return m.SelfLink + } + return "" +} + +// Deprecated: Do not use. +func (m *Cluster) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *Cluster) GetEndpoint() string { + if m != nil { + return m.Endpoint + } + return "" +} + +func (m *Cluster) GetInitialClusterVersion() string { + if m != nil { + return m.InitialClusterVersion + } + return "" +} + +func (m *Cluster) GetCurrentMasterVersion() string { + if m != nil { + return m.CurrentMasterVersion + } + return "" +} + +// Deprecated: Do not use. +func (m *Cluster) GetCurrentNodeVersion() string { + if m != nil { + return m.CurrentNodeVersion + } + return "" +} + +func (m *Cluster) GetCreateTime() string { + if m != nil { + return m.CreateTime + } + return "" +} + +func (m *Cluster) GetStatus() Cluster_Status { + if m != nil { + return m.Status + } + return Cluster_STATUS_UNSPECIFIED +} + +func (m *Cluster) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Cluster) GetNodeIpv4CidrSize() int32 { + if m != nil { + return m.NodeIpv4CidrSize + } + return 0 +} + +func (m *Cluster) GetServicesIpv4Cidr() string { + if m != nil { + return m.ServicesIpv4Cidr + } + return "" +} + +// Deprecated: Do not use. +func (m *Cluster) GetInstanceGroupUrls() []string { + if m != nil { + return m.InstanceGroupUrls + } + return nil +} + +// Deprecated: Do not use. +func (m *Cluster) GetCurrentNodeCount() int32 { + if m != nil { + return m.CurrentNodeCount + } + return 0 +} + +func (m *Cluster) GetExpireTime() string { + if m != nil { + return m.ExpireTime + } + return "" +} + +func (m *Cluster) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *Cluster) GetEnableTpu() bool { + if m != nil { + return m.EnableTpu + } + return false +} + +func (m *Cluster) GetTpuIpv4CidrBlock() string { + if m != nil { + return m.TpuIpv4CidrBlock + } + return "" +} + +func (m *Cluster) GetConditions() []*StatusCondition { + if m != nil { + return m.Conditions + } + return nil +} + +// ClusterUpdate describes an update to the cluster. Exactly one update can +// be applied to a cluster with each request, so at most one field can be +// provided. +type ClusterUpdate struct { + // The Kubernetes version to change the nodes to (typically an + // upgrade). + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the Kubernetes master version + DesiredNodeVersion string `protobuf:"bytes,4,opt,name=desired_node_version,json=desiredNodeVersion,proto3" json:"desired_node_version,omitempty"` + // The monitoring service the cluster should use to write metrics. + // Currently available options: + // + // * "monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring + // service with Kubernetes-native resource model + // * "monitoring.googleapis.com" - the Google Cloud Monitoring service + // * "none" - no metrics will be exported from the cluster + DesiredMonitoringService string `protobuf:"bytes,5,opt,name=desired_monitoring_service,json=desiredMonitoringService,proto3" json:"desired_monitoring_service,omitempty"` + // Configurations for the various addons available to run in the cluster. + DesiredAddonsConfig *AddonsConfig `protobuf:"bytes,6,opt,name=desired_addons_config,json=desiredAddonsConfig,proto3" json:"desired_addons_config,omitempty"` + // The node pool to be upgraded. This field is mandatory if + // "desired_node_version", "desired_image_family" or + // "desired_node_pool_autoscaling" is specified and there is more than one + // node pool on the cluster. + DesiredNodePoolId string `protobuf:"bytes,7,opt,name=desired_node_pool_id,json=desiredNodePoolId,proto3" json:"desired_node_pool_id,omitempty"` + // The desired image type for the node pool. + // NOTE: Set the "desired_node_pool" field as well. + DesiredImageType string `protobuf:"bytes,8,opt,name=desired_image_type,json=desiredImageType,proto3" json:"desired_image_type,omitempty"` + // Configuration of etcd encryption. + DesiredDatabaseEncryption *DatabaseEncryption `protobuf:"bytes,46,opt,name=desired_database_encryption,json=desiredDatabaseEncryption,proto3" json:"desired_database_encryption,omitempty"` + // Autoscaler configuration for the node pool specified in + // desired_node_pool_id. If there is only one pool in the + // cluster and desired_node_pool_id is not provided then + // the change applies to that single node pool. + DesiredNodePoolAutoscaling *NodePoolAutoscaling `protobuf:"bytes,9,opt,name=desired_node_pool_autoscaling,json=desiredNodePoolAutoscaling,proto3" json:"desired_node_pool_autoscaling,omitempty"` + // The desired list of Google Compute Engine + // [zones](/compute/docs/zones#available) in which the cluster's nodes + // should be located. Changing the locations a cluster is in will result + // in nodes being either created or removed from the cluster, depending on + // whether locations are being added or removed. + // + // This list must always include the cluster's primary zone. + DesiredLocations []string `protobuf:"bytes,10,rep,name=desired_locations,json=desiredLocations,proto3" json:"desired_locations,omitempty"` + // The desired configuration options for master authorized networks feature. + DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,12,opt,name=desired_master_authorized_networks_config,json=desiredMasterAuthorizedNetworksConfig,proto3" json:"desired_master_authorized_networks_config,omitempty"` + // Cluster-level autoscaling configuration. + DesiredClusterAutoscaling *ClusterAutoscaling `protobuf:"bytes,15,opt,name=desired_cluster_autoscaling,json=desiredClusterAutoscaling,proto3" json:"desired_cluster_autoscaling,omitempty"` + // The desired configuration options for the Binary Authorization feature. + DesiredBinaryAuthorization *BinaryAuthorization `protobuf:"bytes,16,opt,name=desired_binary_authorization,json=desiredBinaryAuthorization,proto3" json:"desired_binary_authorization,omitempty"` + // The logging service the cluster should use to write logs. + // Currently available options: + // + // * "logging.googleapis.com/kubernetes" - the Google Cloud Logging + // service with Kubernetes-native resource model + // * "logging.googleapis.com" - the Google Cloud Logging service + // * "none" - no logs will be exported from the cluster + DesiredLoggingService string `protobuf:"bytes,19,opt,name=desired_logging_service,json=desiredLoggingService,proto3" json:"desired_logging_service,omitempty"` + // The desired configuration for exporting resource usage. + DesiredResourceUsageExportConfig *ResourceUsageExportConfig `protobuf:"bytes,21,opt,name=desired_resource_usage_export_config,json=desiredResourceUsageExportConfig,proto3" json:"desired_resource_usage_export_config,omitempty"` + // Cluster-level Vertical Pod Autoscaling configuration. + DesiredVerticalPodAutoscaling *VerticalPodAutoscaling `protobuf:"bytes,22,opt,name=desired_vertical_pod_autoscaling,json=desiredVerticalPodAutoscaling,proto3" json:"desired_vertical_pod_autoscaling,omitempty"` + // The desired config of Intra-node visibility. + DesiredIntraNodeVisibilityConfig *IntraNodeVisibilityConfig `protobuf:"bytes,26,opt,name=desired_intra_node_visibility_config,json=desiredIntraNodeVisibilityConfig,proto3" json:"desired_intra_node_visibility_config,omitempty"` + // The Kubernetes version to change the master to. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the default Kubernetes version + DesiredMasterVersion string `protobuf:"bytes,100,opt,name=desired_master_version,json=desiredMasterVersion,proto3" json:"desired_master_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterUpdate) Reset() { *m = ClusterUpdate{} } +func (m *ClusterUpdate) String() string { return proto.CompactTextString(m) } +func (*ClusterUpdate) ProtoMessage() {} +func (*ClusterUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{19} +} + +func (m *ClusterUpdate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterUpdate.Unmarshal(m, b) +} +func (m *ClusterUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterUpdate.Marshal(b, m, deterministic) +} +func (m *ClusterUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterUpdate.Merge(m, src) +} +func (m *ClusterUpdate) XXX_Size() int { + return xxx_messageInfo_ClusterUpdate.Size(m) +} +func (m *ClusterUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterUpdate proto.InternalMessageInfo + +func (m *ClusterUpdate) GetDesiredNodeVersion() string { + if m != nil { + return m.DesiredNodeVersion + } + return "" +} + +func (m *ClusterUpdate) GetDesiredMonitoringService() string { + if m != nil { + return m.DesiredMonitoringService + } + return "" +} + +func (m *ClusterUpdate) GetDesiredAddonsConfig() *AddonsConfig { + if m != nil { + return m.DesiredAddonsConfig + } + return nil +} + +func (m *ClusterUpdate) GetDesiredNodePoolId() string { + if m != nil { + return m.DesiredNodePoolId + } + return "" +} + +func (m *ClusterUpdate) GetDesiredImageType() string { + if m != nil { + return m.DesiredImageType + } + return "" +} + +func (m *ClusterUpdate) GetDesiredDatabaseEncryption() *DatabaseEncryption { + if m != nil { + return m.DesiredDatabaseEncryption + } + return nil +} + +func (m *ClusterUpdate) GetDesiredNodePoolAutoscaling() *NodePoolAutoscaling { + if m != nil { + return m.DesiredNodePoolAutoscaling + } + return nil +} + +func (m *ClusterUpdate) GetDesiredLocations() []string { + if m != nil { + return m.DesiredLocations + } + return nil +} + +func (m *ClusterUpdate) GetDesiredMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig { + if m != nil { + return m.DesiredMasterAuthorizedNetworksConfig + } + return nil +} + +func (m *ClusterUpdate) GetDesiredClusterAutoscaling() *ClusterAutoscaling { + if m != nil { + return m.DesiredClusterAutoscaling + } + return nil +} + +func (m *ClusterUpdate) GetDesiredBinaryAuthorization() *BinaryAuthorization { + if m != nil { + return m.DesiredBinaryAuthorization + } + return nil +} + +func (m *ClusterUpdate) GetDesiredLoggingService() string { + if m != nil { + return m.DesiredLoggingService + } + return "" +} + +func (m *ClusterUpdate) GetDesiredResourceUsageExportConfig() *ResourceUsageExportConfig { + if m != nil { + return m.DesiredResourceUsageExportConfig + } + return nil +} + +func (m *ClusterUpdate) GetDesiredVerticalPodAutoscaling() *VerticalPodAutoscaling { + if m != nil { + return m.DesiredVerticalPodAutoscaling + } + return nil +} + +func (m *ClusterUpdate) GetDesiredIntraNodeVisibilityConfig() *IntraNodeVisibilityConfig { + if m != nil { + return m.DesiredIntraNodeVisibilityConfig + } + return nil +} + +func (m *ClusterUpdate) GetDesiredMasterVersion() string { + if m != nil { + return m.DesiredMasterVersion + } + return "" +} + +// This operation resource represents operations that may have happened or are +// happening on the cluster. All fields are output only. +type Operation struct { + // The server-assigned ID for the operation. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the operation + // is taking place. + // This field is deprecated, use location instead. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // The operation type. + OperationType Operation_Type `protobuf:"varint,3,opt,name=operation_type,json=operationType,proto3,enum=google.container.v1.Operation_Type" json:"operation_type,omitempty"` + // The current status of the operation. + Status Operation_Status `protobuf:"varint,4,opt,name=status,proto3,enum=google.container.v1.Operation_Status" json:"status,omitempty"` + // Detailed operation progress, if available. + Detail string `protobuf:"bytes,8,opt,name=detail,proto3" json:"detail,omitempty"` + // If an error has occurred, a textual description of the error. + StatusMessage string `protobuf:"bytes,5,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // Server-defined URL for the resource. + SelfLink string `protobuf:"bytes,6,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"` + // Server-defined URL for the target of the operation. + TargetLink string `protobuf:"bytes,7,opt,name=target_link,json=targetLink,proto3" json:"target_link,omitempty"` + // [Output only] The name of the Google Compute Engine + // [zone](/compute/docs/regions-zones/regions-zones#available) or + // [region](/compute/docs/regions-zones/regions-zones#available) in which + // the cluster resides. + Location string `protobuf:"bytes,9,opt,name=location,proto3" json:"location,omitempty"` + // [Output only] The time the operation started, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + StartTime string `protobuf:"bytes,10,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // [Output only] The time the operation completed, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + EndTime string `protobuf:"bytes,11,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Which conditions caused the current cluster state. + ClusterConditions []*StatusCondition `protobuf:"bytes,13,rep,name=cluster_conditions,json=clusterConditions,proto3" json:"cluster_conditions,omitempty"` + // Which conditions caused the current node pool state. + NodepoolConditions []*StatusCondition `protobuf:"bytes,14,rep,name=nodepool_conditions,json=nodepoolConditions,proto3" json:"nodepool_conditions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{20} +} + +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (m *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(m, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) +} + +var xxx_messageInfo_Operation proto.InternalMessageInfo + +func (m *Operation) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Deprecated: Do not use. +func (m *Operation) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *Operation) GetOperationType() Operation_Type { + if m != nil { + return m.OperationType + } + return Operation_TYPE_UNSPECIFIED +} + +func (m *Operation) GetStatus() Operation_Status { + if m != nil { + return m.Status + } + return Operation_STATUS_UNSPECIFIED +} + +func (m *Operation) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Operation) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Operation) GetSelfLink() string { + if m != nil { + return m.SelfLink + } + return "" +} + +func (m *Operation) GetTargetLink() string { + if m != nil { + return m.TargetLink + } + return "" +} + +func (m *Operation) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *Operation) GetStartTime() string { + if m != nil { + return m.StartTime + } + return "" +} + +func (m *Operation) GetEndTime() string { + if m != nil { + return m.EndTime + } + return "" +} + +func (m *Operation) GetClusterConditions() []*StatusCondition { + if m != nil { + return m.ClusterConditions + } + return nil +} + +func (m *Operation) GetNodepoolConditions() []*StatusCondition { + if m != nil { + return m.NodepoolConditions + } + return nil +} + +// CreateClusterRequest creates a cluster. +type CreateClusterRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the parent field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the parent field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Required. A [cluster + // resource](/container-engine/reference/rest/v1/projects.zones.clusters) + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` + // The parent (project and location) where the cluster will be created. + // Specified in the format 'projects/*/locations/*'. + Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{21} +} + +func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b) +} +func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic) +} +func (m *CreateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateClusterRequest.Merge(m, src) +} +func (m *CreateClusterRequest) XXX_Size() int { + return xxx_messageInfo_CreateClusterRequest.Size(m) +} +func (m *CreateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *CreateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *CreateClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *CreateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *CreateClusterRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +// GetClusterRequest gets the settings of a cluster. +type GetClusterRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to retrieve. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster) of the cluster to retrieve. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{22} +} + +func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b) +} +func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic) +} +func (m *GetClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterRequest.Merge(m, src) +} +func (m *GetClusterRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterRequest.Size(m) +} +func (m *GetClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *GetClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *GetClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *GetClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *GetClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// UpdateClusterRequest updates the settings of a cluster. +type UpdateClusterRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. A description of the update. + Update *ClusterUpdate `protobuf:"bytes,4,opt,name=update,proto3" json:"update,omitempty"` + // The name (project, location, cluster) of the cluster to update. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{23} +} + +func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b) +} +func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic) +} +func (m *UpdateClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateClusterRequest.Merge(m, src) +} +func (m *UpdateClusterRequest) XXX_Size() int { + return xxx_messageInfo_UpdateClusterRequest.Size(m) +} +func (m *UpdateClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *UpdateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateClusterRequest) GetUpdate() *ClusterUpdate { + if m != nil { + return m.Update + } + return nil +} + +func (m *UpdateClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// UpdateNodePoolRequests update a node pool's image and/or version. +type UpdateNodePoolRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool to upgrade. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // Required. The Kubernetes version to change the nodes to (typically an + // upgrade). + // + // Users may specify either explicit versions offered by Kubernetes Engine or + // version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the Kubernetes master version + NodeVersion string `protobuf:"bytes,5,opt,name=node_version,json=nodeVersion,proto3" json:"node_version,omitempty"` + // Required. The desired image type for the node pool. + ImageType string `protobuf:"bytes,6,opt,name=image_type,json=imageType,proto3" json:"image_type,omitempty"` + // The name (project, location, cluster, node pool) of the node pool to + // update. Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateNodePoolRequest) Reset() { *m = UpdateNodePoolRequest{} } +func (m *UpdateNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNodePoolRequest) ProtoMessage() {} +func (*UpdateNodePoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{24} +} + +func (m *UpdateNodePoolRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateNodePoolRequest.Unmarshal(m, b) +} +func (m *UpdateNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateNodePoolRequest.Marshal(b, m, deterministic) +} +func (m *UpdateNodePoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateNodePoolRequest.Merge(m, src) +} +func (m *UpdateNodePoolRequest) XXX_Size() int { + return xxx_messageInfo_UpdateNodePoolRequest.Size(m) +} +func (m *UpdateNodePoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateNodePoolRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateNodePoolRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *UpdateNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateNodePoolRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *UpdateNodePoolRequest) GetNodeVersion() string { + if m != nil { + return m.NodeVersion + } + return "" +} + +func (m *UpdateNodePoolRequest) GetImageType() string { + if m != nil { + return m.ImageType + } + return "" +} + +func (m *UpdateNodePoolRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool. +type SetNodePoolAutoscalingRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool to upgrade. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // Required. Autoscaling configuration for the node pool. + Autoscaling *NodePoolAutoscaling `protobuf:"bytes,5,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"` + // The name (project, location, cluster, node pool) of the node pool to set + // autoscaler settings. Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNodePoolAutoscalingRequest) Reset() { *m = SetNodePoolAutoscalingRequest{} } +func (m *SetNodePoolAutoscalingRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodePoolAutoscalingRequest) ProtoMessage() {} +func (*SetNodePoolAutoscalingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{25} +} + +func (m *SetNodePoolAutoscalingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNodePoolAutoscalingRequest.Unmarshal(m, b) +} +func (m *SetNodePoolAutoscalingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNodePoolAutoscalingRequest.Marshal(b, m, deterministic) +} +func (m *SetNodePoolAutoscalingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNodePoolAutoscalingRequest.Merge(m, src) +} +func (m *SetNodePoolAutoscalingRequest) XXX_Size() int { + return xxx_messageInfo_SetNodePoolAutoscalingRequest.Size(m) +} +func (m *SetNodePoolAutoscalingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNodePoolAutoscalingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNodePoolAutoscalingRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetNodePoolAutoscalingRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolAutoscalingRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolAutoscalingRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolAutoscalingRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *SetNodePoolAutoscalingRequest) GetAutoscaling() *NodePoolAutoscaling { + if m != nil { + return m.Autoscaling + } + return nil +} + +func (m *SetNodePoolAutoscalingRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetLoggingServiceRequest sets the logging service of a cluster. +type SetLoggingServiceRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The logging service the cluster should use to write metrics. + // Currently available options: + // + // * "logging.googleapis.com" - the Google Cloud Logging service + // * "none" - no metrics will be exported from the cluster + LoggingService string `protobuf:"bytes,4,opt,name=logging_service,json=loggingService,proto3" json:"logging_service,omitempty"` + // The name (project, location, cluster) of the cluster to set logging. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetLoggingServiceRequest) Reset() { *m = SetLoggingServiceRequest{} } +func (m *SetLoggingServiceRequest) String() string { return proto.CompactTextString(m) } +func (*SetLoggingServiceRequest) ProtoMessage() {} +func (*SetLoggingServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{26} +} + +func (m *SetLoggingServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetLoggingServiceRequest.Unmarshal(m, b) +} +func (m *SetLoggingServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetLoggingServiceRequest.Marshal(b, m, deterministic) +} +func (m *SetLoggingServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetLoggingServiceRequest.Merge(m, src) +} +func (m *SetLoggingServiceRequest) XXX_Size() int { + return xxx_messageInfo_SetLoggingServiceRequest.Size(m) +} +func (m *SetLoggingServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetLoggingServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetLoggingServiceRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetLoggingServiceRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLoggingServiceRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLoggingServiceRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLoggingServiceRequest) GetLoggingService() string { + if m != nil { + return m.LoggingService + } + return "" +} + +func (m *SetLoggingServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetMonitoringServiceRequest sets the monitoring service of a cluster. +type SetMonitoringServiceRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The monitoring service the cluster should use to write metrics. + // Currently available options: + // + // * "monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring + // service with Kubernetes-native resource model + // * "monitoring.googleapis.com" - the Google Cloud Monitoring service + // * "none" - no metrics will be exported from the cluster + MonitoringService string `protobuf:"bytes,4,opt,name=monitoring_service,json=monitoringService,proto3" json:"monitoring_service,omitempty"` + // The name (project, location, cluster) of the cluster to set monitoring. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMonitoringServiceRequest) Reset() { *m = SetMonitoringServiceRequest{} } +func (m *SetMonitoringServiceRequest) String() string { return proto.CompactTextString(m) } +func (*SetMonitoringServiceRequest) ProtoMessage() {} +func (*SetMonitoringServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{27} +} + +func (m *SetMonitoringServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMonitoringServiceRequest.Unmarshal(m, b) +} +func (m *SetMonitoringServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMonitoringServiceRequest.Marshal(b, m, deterministic) +} +func (m *SetMonitoringServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMonitoringServiceRequest.Merge(m, src) +} +func (m *SetMonitoringServiceRequest) XXX_Size() int { + return xxx_messageInfo_SetMonitoringServiceRequest.Size(m) +} +func (m *SetMonitoringServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMonitoringServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMonitoringServiceRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetMonitoringServiceRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetMonitoringServiceRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetMonitoringServiceRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetMonitoringServiceRequest) GetMonitoringService() string { + if m != nil { + return m.MonitoringService + } + return "" +} + +func (m *SetMonitoringServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetAddonsConfigRequest sets the addons associated with the cluster. +type SetAddonsConfigRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The desired configurations for the various addons available to run in the + // cluster. + AddonsConfig *AddonsConfig `protobuf:"bytes,4,opt,name=addons_config,json=addonsConfig,proto3" json:"addons_config,omitempty"` + // The name (project, location, cluster) of the cluster to set addons. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetAddonsConfigRequest) Reset() { *m = SetAddonsConfigRequest{} } +func (m *SetAddonsConfigRequest) String() string { return proto.CompactTextString(m) } +func (*SetAddonsConfigRequest) ProtoMessage() {} +func (*SetAddonsConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{28} +} + +func (m *SetAddonsConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetAddonsConfigRequest.Unmarshal(m, b) +} +func (m *SetAddonsConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetAddonsConfigRequest.Marshal(b, m, deterministic) +} +func (m *SetAddonsConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetAddonsConfigRequest.Merge(m, src) +} +func (m *SetAddonsConfigRequest) XXX_Size() int { + return xxx_messageInfo_SetAddonsConfigRequest.Size(m) +} +func (m *SetAddonsConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetAddonsConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetAddonsConfigRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetAddonsConfigRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetAddonsConfigRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetAddonsConfigRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetAddonsConfigRequest) GetAddonsConfig() *AddonsConfig { + if m != nil { + return m.AddonsConfig + } + return nil +} + +func (m *SetAddonsConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetLocationsRequest sets the locations of the cluster. +type SetLocationsRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The desired list of Google Compute Engine + // [zones](/compute/docs/zones#available) in which the cluster's nodes + // should be located. Changing the locations a cluster is in will result + // in nodes being either created or removed from the cluster, depending on + // whether locations are being added or removed. + // + // This list must always include the cluster's primary zone. + Locations []string `protobuf:"bytes,4,rep,name=locations,proto3" json:"locations,omitempty"` + // The name (project, location, cluster) of the cluster to set locations. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetLocationsRequest) Reset() { *m = SetLocationsRequest{} } +func (m *SetLocationsRequest) String() string { return proto.CompactTextString(m) } +func (*SetLocationsRequest) ProtoMessage() {} +func (*SetLocationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{29} +} + +func (m *SetLocationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetLocationsRequest.Unmarshal(m, b) +} +func (m *SetLocationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetLocationsRequest.Marshal(b, m, deterministic) +} +func (m *SetLocationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetLocationsRequest.Merge(m, src) +} +func (m *SetLocationsRequest) XXX_Size() int { + return xxx_messageInfo_SetLocationsRequest.Size(m) +} +func (m *SetLocationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetLocationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetLocationsRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetLocationsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLocationsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLocationsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLocationsRequest) GetLocations() []string { + if m != nil { + return m.Locations + } + return nil +} + +func (m *SetLocationsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// UpdateMasterRequest updates the master of the cluster. +type UpdateMasterRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The Kubernetes version to change the master to. + // + // Users may specify either explicit versions offered by Kubernetes Engine or + // version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the default Kubernetes version + MasterVersion string `protobuf:"bytes,4,opt,name=master_version,json=masterVersion,proto3" json:"master_version,omitempty"` + // The name (project, location, cluster) of the cluster to update. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateMasterRequest) Reset() { *m = UpdateMasterRequest{} } +func (m *UpdateMasterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateMasterRequest) ProtoMessage() {} +func (*UpdateMasterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{30} +} + +func (m *UpdateMasterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateMasterRequest.Unmarshal(m, b) +} +func (m *UpdateMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateMasterRequest.Marshal(b, m, deterministic) +} +func (m *UpdateMasterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateMasterRequest.Merge(m, src) +} +func (m *UpdateMasterRequest) XXX_Size() int { + return xxx_messageInfo_UpdateMasterRequest.Size(m) +} +func (m *UpdateMasterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateMasterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateMasterRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *UpdateMasterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateMasterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *UpdateMasterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *UpdateMasterRequest) GetMasterVersion() string { + if m != nil { + return m.MasterVersion + } + return "" +} + +func (m *UpdateMasterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetMasterAuthRequest updates the admin password of a cluster. +type SetMasterAuthRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The exact form of action to be taken on the master auth. + Action SetMasterAuthRequest_Action `protobuf:"varint,4,opt,name=action,proto3,enum=google.container.v1.SetMasterAuthRequest_Action" json:"action,omitempty"` + // Required. A description of the update. + Update *MasterAuth `protobuf:"bytes,5,opt,name=update,proto3" json:"update,omitempty"` + // The name (project, location, cluster) of the cluster to set auth. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMasterAuthRequest) Reset() { *m = SetMasterAuthRequest{} } +func (m *SetMasterAuthRequest) String() string { return proto.CompactTextString(m) } +func (*SetMasterAuthRequest) ProtoMessage() {} +func (*SetMasterAuthRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{31} +} + +func (m *SetMasterAuthRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMasterAuthRequest.Unmarshal(m, b) +} +func (m *SetMasterAuthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMasterAuthRequest.Marshal(b, m, deterministic) +} +func (m *SetMasterAuthRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMasterAuthRequest.Merge(m, src) +} +func (m *SetMasterAuthRequest) XXX_Size() int { + return xxx_messageInfo_SetMasterAuthRequest.Size(m) +} +func (m *SetMasterAuthRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMasterAuthRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMasterAuthRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetMasterAuthRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetMasterAuthRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetMasterAuthRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetMasterAuthRequest) GetAction() SetMasterAuthRequest_Action { + if m != nil { + return m.Action + } + return SetMasterAuthRequest_UNKNOWN +} + +func (m *SetMasterAuthRequest) GetUpdate() *MasterAuth { + if m != nil { + return m.Update + } + return nil +} + +func (m *SetMasterAuthRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// DeleteClusterRequest deletes a cluster. +type DeleteClusterRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to delete. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster) of the cluster to delete. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{32} +} + +func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b) +} +func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic) +} +func (m *DeleteClusterRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteClusterRequest.Merge(m, src) +} +func (m *DeleteClusterRequest) XXX_Size() int { + return xxx_messageInfo_DeleteClusterRequest.Size(m) +} +func (m *DeleteClusterRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *DeleteClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *DeleteClusterRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *DeleteClusterRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *DeleteClusterRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ListClustersRequest lists clusters. +type ListClustersRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the parent field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides, or "-" for all zones. + // This field has been deprecated and replaced by the parent field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // The parent (project and location) where the clusters will be listed. + // Specified in the format 'projects/*/locations/*'. + // Location "-" matches all zones and all regions. + Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{33} +} + +func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b) +} +func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic) +} +func (m *ListClustersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersRequest.Merge(m, src) +} +func (m *ListClustersRequest) XXX_Size() int { + return xxx_messageInfo_ListClustersRequest.Size(m) +} +func (m *ListClustersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *ListClustersRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *ListClustersRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *ListClustersRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +// ListClustersResponse is the result of ListClustersRequest. +type ListClustersResponse struct { + // A list of clusters in the project in the specified zone, or + // across all ones. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + // If any zones are listed here, the list of clusters returned + // may be missing those zones. + MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones,proto3" json:"missing_zones,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{34} +} + +func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b) +} +func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic) +} +func (m *ListClustersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListClustersResponse.Merge(m, src) +} +func (m *ListClustersResponse) XXX_Size() int { + return xxx_messageInfo_ListClustersResponse.Size(m) +} +func (m *ListClustersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListClustersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetMissingZones() []string { + if m != nil { + return m.MissingZones + } + return nil +} + +// GetOperationRequest gets a single operation. +type GetOperationRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The server-assigned `name` of the operation. + // This field has been deprecated and replaced by the name field. + OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, operation id) of the operation to get. + // Specified in the format 'projects/*/locations/*/operations/*'. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetOperationRequest) Reset() { *m = GetOperationRequest{} } +func (m *GetOperationRequest) String() string { return proto.CompactTextString(m) } +func (*GetOperationRequest) ProtoMessage() {} +func (*GetOperationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{35} +} + +func (m *GetOperationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetOperationRequest.Unmarshal(m, b) +} +func (m *GetOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetOperationRequest.Marshal(b, m, deterministic) +} +func (m *GetOperationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOperationRequest.Merge(m, src) +} +func (m *GetOperationRequest) XXX_Size() int { + return xxx_messageInfo_GetOperationRequest.Size(m) +} +func (m *GetOperationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetOperationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOperationRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *GetOperationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *GetOperationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *GetOperationRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *GetOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ListOperationsRequest lists operations. +type ListOperationsRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the parent field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) to return operations for, or `-` for + // all zones. This field has been deprecated and replaced by the parent field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // The parent (project and location) where the operations will be listed. + // Specified in the format 'projects/*/locations/*'. + // Location "-" matches all zones and all regions. + Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListOperationsRequest) Reset() { *m = ListOperationsRequest{} } +func (m *ListOperationsRequest) String() string { return proto.CompactTextString(m) } +func (*ListOperationsRequest) ProtoMessage() {} +func (*ListOperationsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{36} +} + +func (m *ListOperationsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListOperationsRequest.Unmarshal(m, b) +} +func (m *ListOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListOperationsRequest.Marshal(b, m, deterministic) +} +func (m *ListOperationsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOperationsRequest.Merge(m, src) +} +func (m *ListOperationsRequest) XXX_Size() int { + return xxx_messageInfo_ListOperationsRequest.Size(m) +} +func (m *ListOperationsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListOperationsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOperationsRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *ListOperationsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *ListOperationsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *ListOperationsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +// CancelOperationRequest cancels a single operation. +type CancelOperationRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the operation resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The server-assigned `name` of the operation. + // This field has been deprecated and replaced by the name field. + OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, operation id) of the operation to cancel. + // Specified in the format 'projects/*/locations/*/operations/*'. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CancelOperationRequest) Reset() { *m = CancelOperationRequest{} } +func (m *CancelOperationRequest) String() string { return proto.CompactTextString(m) } +func (*CancelOperationRequest) ProtoMessage() {} +func (*CancelOperationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{37} +} + +func (m *CancelOperationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CancelOperationRequest.Unmarshal(m, b) +} +func (m *CancelOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CancelOperationRequest.Marshal(b, m, deterministic) +} +func (m *CancelOperationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CancelOperationRequest.Merge(m, src) +} +func (m *CancelOperationRequest) XXX_Size() int { + return xxx_messageInfo_CancelOperationRequest.Size(m) +} +func (m *CancelOperationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CancelOperationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CancelOperationRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *CancelOperationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *CancelOperationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *CancelOperationRequest) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *CancelOperationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ListOperationsResponse is the result of ListOperationsRequest. +type ListOperationsResponse struct { + // A list of operations in the project in the specified zone. + Operations []*Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` + // If any zones are listed here, the list of operations returned + // may be missing the operations from those zones. + MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones,proto3" json:"missing_zones,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListOperationsResponse) Reset() { *m = ListOperationsResponse{} } +func (m *ListOperationsResponse) String() string { return proto.CompactTextString(m) } +func (*ListOperationsResponse) ProtoMessage() {} +func (*ListOperationsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{38} +} + +func (m *ListOperationsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListOperationsResponse.Unmarshal(m, b) +} +func (m *ListOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListOperationsResponse.Marshal(b, m, deterministic) +} +func (m *ListOperationsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOperationsResponse.Merge(m, src) +} +func (m *ListOperationsResponse) XXX_Size() int { + return xxx_messageInfo_ListOperationsResponse.Size(m) +} +func (m *ListOperationsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListOperationsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOperationsResponse proto.InternalMessageInfo + +func (m *ListOperationsResponse) GetOperations() []*Operation { + if m != nil { + return m.Operations + } + return nil +} + +func (m *ListOperationsResponse) GetMissingZones() []string { + if m != nil { + return m.MissingZones + } + return nil +} + +// Gets the current Kubernetes Engine service configuration. +type GetServerConfigRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) to return operations for. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // The name (project and location) of the server config to get, + // specified in the format 'projects/*/locations/*'. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServerConfigRequest) Reset() { *m = GetServerConfigRequest{} } +func (m *GetServerConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetServerConfigRequest) ProtoMessage() {} +func (*GetServerConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{39} +} + +func (m *GetServerConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServerConfigRequest.Unmarshal(m, b) +} +func (m *GetServerConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServerConfigRequest.Marshal(b, m, deterministic) +} +func (m *GetServerConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServerConfigRequest.Merge(m, src) +} +func (m *GetServerConfigRequest) XXX_Size() int { + return xxx_messageInfo_GetServerConfigRequest.Size(m) +} +func (m *GetServerConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServerConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServerConfigRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *GetServerConfigRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *GetServerConfigRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *GetServerConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Kubernetes Engine service configuration. +type ServerConfig struct { + // Version of Kubernetes the service deploys by default. + DefaultClusterVersion string `protobuf:"bytes,1,opt,name=default_cluster_version,json=defaultClusterVersion,proto3" json:"default_cluster_version,omitempty"` + // List of valid node upgrade target versions. + ValidNodeVersions []string `protobuf:"bytes,3,rep,name=valid_node_versions,json=validNodeVersions,proto3" json:"valid_node_versions,omitempty"` + // Default image type. + DefaultImageType string `protobuf:"bytes,4,opt,name=default_image_type,json=defaultImageType,proto3" json:"default_image_type,omitempty"` + // List of valid image types. + ValidImageTypes []string `protobuf:"bytes,5,rep,name=valid_image_types,json=validImageTypes,proto3" json:"valid_image_types,omitempty"` + // List of valid master versions. + ValidMasterVersions []string `protobuf:"bytes,6,rep,name=valid_master_versions,json=validMasterVersions,proto3" json:"valid_master_versions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} +func (*ServerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{40} +} + +func (m *ServerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerConfig.Unmarshal(m, b) +} +func (m *ServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerConfig.Marshal(b, m, deterministic) +} +func (m *ServerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerConfig.Merge(m, src) +} +func (m *ServerConfig) XXX_Size() int { + return xxx_messageInfo_ServerConfig.Size(m) +} +func (m *ServerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ServerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerConfig proto.InternalMessageInfo + +func (m *ServerConfig) GetDefaultClusterVersion() string { + if m != nil { + return m.DefaultClusterVersion + } + return "" +} + +func (m *ServerConfig) GetValidNodeVersions() []string { + if m != nil { + return m.ValidNodeVersions + } + return nil +} + +func (m *ServerConfig) GetDefaultImageType() string { + if m != nil { + return m.DefaultImageType + } + return "" +} + +func (m *ServerConfig) GetValidImageTypes() []string { + if m != nil { + return m.ValidImageTypes + } + return nil +} + +func (m *ServerConfig) GetValidMasterVersions() []string { + if m != nil { + return m.ValidMasterVersions + } + return nil +} + +// CreateNodePoolRequest creates a node pool for a cluster. +type CreateNodePoolRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the parent field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the parent field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the parent field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The node pool to create. + NodePool *NodePool `protobuf:"bytes,4,opt,name=node_pool,json=nodePool,proto3" json:"node_pool,omitempty"` + // The parent (project, location, cluster id) where the node pool will be + // created. Specified in the format + // 'projects/*/locations/*/clusters/*'. + Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNodePoolRequest) Reset() { *m = CreateNodePoolRequest{} } +func (m *CreateNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNodePoolRequest) ProtoMessage() {} +func (*CreateNodePoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{41} +} + +func (m *CreateNodePoolRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNodePoolRequest.Unmarshal(m, b) +} +func (m *CreateNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNodePoolRequest.Marshal(b, m, deterministic) +} +func (m *CreateNodePoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNodePoolRequest.Merge(m, src) +} +func (m *CreateNodePoolRequest) XXX_Size() int { + return xxx_messageInfo_CreateNodePoolRequest.Size(m) +} +func (m *CreateNodePoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNodePoolRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNodePoolRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *CreateNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *CreateNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *CreateNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CreateNodePoolRequest) GetNodePool() *NodePool { + if m != nil { + return m.NodePool + } + return nil +} + +func (m *CreateNodePoolRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +// DeleteNodePoolRequest deletes a node pool for a cluster. +type DeleteNodePoolRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool to delete. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster, node pool id) of the node pool to + // delete. Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNodePoolRequest) Reset() { *m = DeleteNodePoolRequest{} } +func (m *DeleteNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNodePoolRequest) ProtoMessage() {} +func (*DeleteNodePoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{42} +} + +func (m *DeleteNodePoolRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNodePoolRequest.Unmarshal(m, b) +} +func (m *DeleteNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNodePoolRequest.Marshal(b, m, deterministic) +} +func (m *DeleteNodePoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNodePoolRequest.Merge(m, src) +} +func (m *DeleteNodePoolRequest) XXX_Size() int { + return xxx_messageInfo_DeleteNodePoolRequest.Size(m) +} +func (m *DeleteNodePoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNodePoolRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNodePoolRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *DeleteNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *DeleteNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *DeleteNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *DeleteNodePoolRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *DeleteNodePoolRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ListNodePoolsRequest lists the node pool(s) for a cluster. +type ListNodePoolsRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the parent field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the parent field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the parent field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // The parent (project, location, cluster id) where the node pools will be + // listed. Specified in the format 'projects/*/locations/*/clusters/*'. + Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNodePoolsRequest) Reset() { *m = ListNodePoolsRequest{} } +func (m *ListNodePoolsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNodePoolsRequest) ProtoMessage() {} +func (*ListNodePoolsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{43} +} + +func (m *ListNodePoolsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNodePoolsRequest.Unmarshal(m, b) +} +func (m *ListNodePoolsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNodePoolsRequest.Marshal(b, m, deterministic) +} +func (m *ListNodePoolsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNodePoolsRequest.Merge(m, src) +} +func (m *ListNodePoolsRequest) XXX_Size() int { + return xxx_messageInfo_ListNodePoolsRequest.Size(m) +} +func (m *ListNodePoolsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNodePoolsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNodePoolsRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *ListNodePoolsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *ListNodePoolsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *ListNodePoolsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *ListNodePoolsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +// GetNodePoolRequest retrieves a node pool for a cluster. +type GetNodePoolRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster, node pool id) of the node pool to + // get. Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNodePoolRequest) Reset() { *m = GetNodePoolRequest{} } +func (m *GetNodePoolRequest) String() string { return proto.CompactTextString(m) } +func (*GetNodePoolRequest) ProtoMessage() {} +func (*GetNodePoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{44} +} + +func (m *GetNodePoolRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNodePoolRequest.Unmarshal(m, b) +} +func (m *GetNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNodePoolRequest.Marshal(b, m, deterministic) +} +func (m *GetNodePoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNodePoolRequest.Merge(m, src) +} +func (m *GetNodePoolRequest) XXX_Size() int { + return xxx_messageInfo_GetNodePoolRequest.Size(m) +} +func (m *GetNodePoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNodePoolRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNodePoolRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *GetNodePoolRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *GetNodePoolRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *GetNodePoolRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *GetNodePoolRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *GetNodePoolRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// NodePool contains the name and configuration for a cluster's node pool. +// Node pools are a set of nodes (i.e. VM's), with a common configuration and +// specification, under the control of the cluster master. They may have a set +// of Kubernetes labels applied to them, which may be used to reference them +// during pod scheduling. They may also be resized up or down, to accommodate +// the workload. +type NodePool struct { + // The name of the node pool. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The node configuration of the pool. + Config *NodeConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + // The initial node count for the pool. You must ensure that your + // Compute Engine resource quota + // is sufficient for this number of instances. You must also have available + // firewall and routes quota. + InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount,proto3" json:"initial_node_count,omitempty"` + // [Output only] Server-defined URL for the resource. + SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"` + // The version of the Kubernetes of this node. + Version string `protobuf:"bytes,101,opt,name=version,proto3" json:"version,omitempty"` + // [Output only] The resource URLs of the [managed instance + // groups](/compute/docs/instance-groups/creating-groups-of-managed-instances) + // associated with this node pool. + InstanceGroupUrls []string `protobuf:"bytes,102,rep,name=instance_group_urls,json=instanceGroupUrls,proto3" json:"instance_group_urls,omitempty"` + // [Output only] The status of the nodes in this pool instance. + Status NodePool_Status `protobuf:"varint,103,opt,name=status,proto3,enum=google.container.v1.NodePool_Status" json:"status,omitempty"` + // [Output only] Additional information about the current status of this + // node pool instance, if available. + StatusMessage string `protobuf:"bytes,104,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // Autoscaler configuration for this NodePool. Autoscaler is enabled + // only if a valid configuration is present. + Autoscaling *NodePoolAutoscaling `protobuf:"bytes,4,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"` + // NodeManagement configuration for this NodePool. + Management *NodeManagement `protobuf:"bytes,5,opt,name=management,proto3" json:"management,omitempty"` + // The constraint on the maximum number of pods that can be run + // simultaneously on a node in the node pool. + MaxPodsConstraint *MaxPodsConstraint `protobuf:"bytes,6,opt,name=max_pods_constraint,json=maxPodsConstraint,proto3" json:"max_pods_constraint,omitempty"` + // Which conditions caused the current node pool state. + Conditions []*StatusCondition `protobuf:"bytes,105,rep,name=conditions,proto3" json:"conditions,omitempty"` + // [Output only] The pod CIDR block size per node in this node pool. + PodIpv4CidrSize int32 `protobuf:"varint,7,opt,name=pod_ipv4_cidr_size,json=podIpv4CidrSize,proto3" json:"pod_ipv4_cidr_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePool) Reset() { *m = NodePool{} } +func (m *NodePool) String() string { return proto.CompactTextString(m) } +func (*NodePool) ProtoMessage() {} +func (*NodePool) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{45} +} + +func (m *NodePool) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePool.Unmarshal(m, b) +} +func (m *NodePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePool.Marshal(b, m, deterministic) +} +func (m *NodePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePool.Merge(m, src) +} +func (m *NodePool) XXX_Size() int { + return xxx_messageInfo_NodePool.Size(m) +} +func (m *NodePool) XXX_DiscardUnknown() { + xxx_messageInfo_NodePool.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePool proto.InternalMessageInfo + +func (m *NodePool) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NodePool) GetConfig() *NodeConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *NodePool) GetInitialNodeCount() int32 { + if m != nil { + return m.InitialNodeCount + } + return 0 +} + +func (m *NodePool) GetSelfLink() string { + if m != nil { + return m.SelfLink + } + return "" +} + +func (m *NodePool) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *NodePool) GetInstanceGroupUrls() []string { + if m != nil { + return m.InstanceGroupUrls + } + return nil +} + +func (m *NodePool) GetStatus() NodePool_Status { + if m != nil { + return m.Status + } + return NodePool_STATUS_UNSPECIFIED +} + +func (m *NodePool) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *NodePool) GetAutoscaling() *NodePoolAutoscaling { + if m != nil { + return m.Autoscaling + } + return nil +} + +func (m *NodePool) GetManagement() *NodeManagement { + if m != nil { + return m.Management + } + return nil +} + +func (m *NodePool) GetMaxPodsConstraint() *MaxPodsConstraint { + if m != nil { + return m.MaxPodsConstraint + } + return nil +} + +func (m *NodePool) GetConditions() []*StatusCondition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *NodePool) GetPodIpv4CidrSize() int32 { + if m != nil { + return m.PodIpv4CidrSize + } + return 0 +} + +// NodeManagement defines the set of node management services turned on for the +// node pool. +type NodeManagement struct { + // A flag that specifies whether node auto-upgrade is enabled for the node + // pool. If enabled, node auto-upgrade helps keep the nodes in your node pool + // up to date with the latest release version of Kubernetes. + AutoUpgrade bool `protobuf:"varint,1,opt,name=auto_upgrade,json=autoUpgrade,proto3" json:"auto_upgrade,omitempty"` + // A flag that specifies whether the node auto-repair is enabled for the node + // pool. If enabled, the nodes in this node pool will be monitored and, if + // they fail health checks too many times, an automatic repair action will be + // triggered. + AutoRepair bool `protobuf:"varint,2,opt,name=auto_repair,json=autoRepair,proto3" json:"auto_repair,omitempty"` + // Specifies the Auto Upgrade knobs for the node pool. + UpgradeOptions *AutoUpgradeOptions `protobuf:"bytes,10,opt,name=upgrade_options,json=upgradeOptions,proto3" json:"upgrade_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeManagement) Reset() { *m = NodeManagement{} } +func (m *NodeManagement) String() string { return proto.CompactTextString(m) } +func (*NodeManagement) ProtoMessage() {} +func (*NodeManagement) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{46} +} + +func (m *NodeManagement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeManagement.Unmarshal(m, b) +} +func (m *NodeManagement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeManagement.Marshal(b, m, deterministic) +} +func (m *NodeManagement) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeManagement.Merge(m, src) +} +func (m *NodeManagement) XXX_Size() int { + return xxx_messageInfo_NodeManagement.Size(m) +} +func (m *NodeManagement) XXX_DiscardUnknown() { + xxx_messageInfo_NodeManagement.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeManagement proto.InternalMessageInfo + +func (m *NodeManagement) GetAutoUpgrade() bool { + if m != nil { + return m.AutoUpgrade + } + return false +} + +func (m *NodeManagement) GetAutoRepair() bool { + if m != nil { + return m.AutoRepair + } + return false +} + +func (m *NodeManagement) GetUpgradeOptions() *AutoUpgradeOptions { + if m != nil { + return m.UpgradeOptions + } + return nil +} + +// AutoUpgradeOptions defines the set of options for the user to control how +// the Auto Upgrades will proceed. +type AutoUpgradeOptions struct { + // [Output only] This field is set when upgrades are about to commence + // with the approximate start time for the upgrades, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + AutoUpgradeStartTime string `protobuf:"bytes,1,opt,name=auto_upgrade_start_time,json=autoUpgradeStartTime,proto3" json:"auto_upgrade_start_time,omitempty"` + // [Output only] This field is set when upgrades are about to commence + // with the description of the upgrade. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutoUpgradeOptions) Reset() { *m = AutoUpgradeOptions{} } +func (m *AutoUpgradeOptions) String() string { return proto.CompactTextString(m) } +func (*AutoUpgradeOptions) ProtoMessage() {} +func (*AutoUpgradeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{47} +} + +func (m *AutoUpgradeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutoUpgradeOptions.Unmarshal(m, b) +} +func (m *AutoUpgradeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutoUpgradeOptions.Marshal(b, m, deterministic) +} +func (m *AutoUpgradeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutoUpgradeOptions.Merge(m, src) +} +func (m *AutoUpgradeOptions) XXX_Size() int { + return xxx_messageInfo_AutoUpgradeOptions.Size(m) +} +func (m *AutoUpgradeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_AutoUpgradeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_AutoUpgradeOptions proto.InternalMessageInfo + +func (m *AutoUpgradeOptions) GetAutoUpgradeStartTime() string { + if m != nil { + return m.AutoUpgradeStartTime + } + return "" +} + +func (m *AutoUpgradeOptions) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// MaintenancePolicy defines the maintenance policy to be used for the cluster. +type MaintenancePolicy struct { + // Specifies the maintenance window in which maintenance may be performed. + Window *MaintenanceWindow `protobuf:"bytes,1,opt,name=window,proto3" json:"window,omitempty"` + // A hash identifying the version of this policy, so that updates to fields of + // the policy won't accidentally undo intermediate changes (and so that users + // of the API unaware of some fields won't accidentally remove other fields). + // Make a get() request to the cluster to get the current + // resource version and include it with requests to set the policy. + ResourceVersion string `protobuf:"bytes,3,opt,name=resource_version,json=resourceVersion,proto3" json:"resource_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaintenancePolicy) Reset() { *m = MaintenancePolicy{} } +func (m *MaintenancePolicy) String() string { return proto.CompactTextString(m) } +func (*MaintenancePolicy) ProtoMessage() {} +func (*MaintenancePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{48} +} + +func (m *MaintenancePolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaintenancePolicy.Unmarshal(m, b) +} +func (m *MaintenancePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaintenancePolicy.Marshal(b, m, deterministic) +} +func (m *MaintenancePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaintenancePolicy.Merge(m, src) +} +func (m *MaintenancePolicy) XXX_Size() int { + return xxx_messageInfo_MaintenancePolicy.Size(m) +} +func (m *MaintenancePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_MaintenancePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_MaintenancePolicy proto.InternalMessageInfo + +func (m *MaintenancePolicy) GetWindow() *MaintenanceWindow { + if m != nil { + return m.Window + } + return nil +} + +func (m *MaintenancePolicy) GetResourceVersion() string { + if m != nil { + return m.ResourceVersion + } + return "" +} + +// MaintenanceWindow defines the maintenance window to be used for the cluster. +type MaintenanceWindow struct { + // Types that are valid to be assigned to Policy: + // *MaintenanceWindow_DailyMaintenanceWindow + // *MaintenanceWindow_RecurringWindow + Policy isMaintenanceWindow_Policy `protobuf_oneof:"policy"` + // Exceptions to maintenance window. Non-emergency maintenance should not + // occur in these windows. + MaintenanceExclusions map[string]*TimeWindow `protobuf:"bytes,4,rep,name=maintenance_exclusions,json=maintenanceExclusions,proto3" json:"maintenance_exclusions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaintenanceWindow) Reset() { *m = MaintenanceWindow{} } +func (m *MaintenanceWindow) String() string { return proto.CompactTextString(m) } +func (*MaintenanceWindow) ProtoMessage() {} +func (*MaintenanceWindow) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{49} +} + +func (m *MaintenanceWindow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaintenanceWindow.Unmarshal(m, b) +} +func (m *MaintenanceWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaintenanceWindow.Marshal(b, m, deterministic) +} +func (m *MaintenanceWindow) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaintenanceWindow.Merge(m, src) +} +func (m *MaintenanceWindow) XXX_Size() int { + return xxx_messageInfo_MaintenanceWindow.Size(m) +} +func (m *MaintenanceWindow) XXX_DiscardUnknown() { + xxx_messageInfo_MaintenanceWindow.DiscardUnknown(m) +} + +var xxx_messageInfo_MaintenanceWindow proto.InternalMessageInfo + +type isMaintenanceWindow_Policy interface { + isMaintenanceWindow_Policy() +} + +type MaintenanceWindow_DailyMaintenanceWindow struct { + DailyMaintenanceWindow *DailyMaintenanceWindow `protobuf:"bytes,2,opt,name=daily_maintenance_window,json=dailyMaintenanceWindow,proto3,oneof"` +} + +type MaintenanceWindow_RecurringWindow struct { + RecurringWindow *RecurringTimeWindow `protobuf:"bytes,3,opt,name=recurring_window,json=recurringWindow,proto3,oneof"` +} + +func (*MaintenanceWindow_DailyMaintenanceWindow) isMaintenanceWindow_Policy() {} + +func (*MaintenanceWindow_RecurringWindow) isMaintenanceWindow_Policy() {} + +func (m *MaintenanceWindow) GetPolicy() isMaintenanceWindow_Policy { + if m != nil { + return m.Policy + } + return nil +} + +func (m *MaintenanceWindow) GetDailyMaintenanceWindow() *DailyMaintenanceWindow { + if x, ok := m.GetPolicy().(*MaintenanceWindow_DailyMaintenanceWindow); ok { + return x.DailyMaintenanceWindow + } + return nil +} + +func (m *MaintenanceWindow) GetRecurringWindow() *RecurringTimeWindow { + if x, ok := m.GetPolicy().(*MaintenanceWindow_RecurringWindow); ok { + return x.RecurringWindow + } + return nil +} + +func (m *MaintenanceWindow) GetMaintenanceExclusions() map[string]*TimeWindow { + if m != nil { + return m.MaintenanceExclusions + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*MaintenanceWindow) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*MaintenanceWindow_DailyMaintenanceWindow)(nil), + (*MaintenanceWindow_RecurringWindow)(nil), + } +} + +// Represents an arbitrary window of time. +type TimeWindow struct { + // The time that the window first starts. + StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The time that the window ends. The end time should take place after the + // start time. + EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeWindow) Reset() { *m = TimeWindow{} } +func (m *TimeWindow) String() string { return proto.CompactTextString(m) } +func (*TimeWindow) ProtoMessage() {} +func (*TimeWindow) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{50} +} + +func (m *TimeWindow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeWindow.Unmarshal(m, b) +} +func (m *TimeWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeWindow.Marshal(b, m, deterministic) +} +func (m *TimeWindow) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeWindow.Merge(m, src) +} +func (m *TimeWindow) XXX_Size() int { + return xxx_messageInfo_TimeWindow.Size(m) +} +func (m *TimeWindow) XXX_DiscardUnknown() { + xxx_messageInfo_TimeWindow.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeWindow proto.InternalMessageInfo + +func (m *TimeWindow) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *TimeWindow) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +// Represents an arbitrary window of time that recurs. +type RecurringTimeWindow struct { + // The window of the first recurrence. + Window *TimeWindow `protobuf:"bytes,1,opt,name=window,proto3" json:"window,omitempty"` + // An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how + // this window reccurs. They go on for the span of time between the start and + // end time. + // + // For example, to have something repeat every weekday, you'd use: + // FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR + // To repeat some window daily (equivalent to the DailyMaintenanceWindow): + // FREQ=DAILY + // For the first weekend of every month: + // FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU + // This specifies how frequently the window starts. Eg, if you wanted to have + // a 9-5 UTC-4 window every weekday, you'd use something like: + // + // start time = 2019-01-01T09:00:00-0400 + // end time = 2019-01-01T17:00:00-0400 + // recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR + // + // Windows can span multiple days. Eg, to make the window encompass every + // weekend from midnight Saturday till the last minute of Sunday UTC: + // + // start time = 2019-01-05T00:00:00Z + // end time = 2019-01-07T23:59:00Z + // recurrence = FREQ=WEEKLY;BYDAY=SA + // + // Note the start and end time's specific dates are largely arbitrary except + // to specify duration of the window and when it first starts. + // The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported. + Recurrence string `protobuf:"bytes,2,opt,name=recurrence,proto3" json:"recurrence,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RecurringTimeWindow) Reset() { *m = RecurringTimeWindow{} } +func (m *RecurringTimeWindow) String() string { return proto.CompactTextString(m) } +func (*RecurringTimeWindow) ProtoMessage() {} +func (*RecurringTimeWindow) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{51} +} + +func (m *RecurringTimeWindow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RecurringTimeWindow.Unmarshal(m, b) +} +func (m *RecurringTimeWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RecurringTimeWindow.Marshal(b, m, deterministic) +} +func (m *RecurringTimeWindow) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecurringTimeWindow.Merge(m, src) +} +func (m *RecurringTimeWindow) XXX_Size() int { + return xxx_messageInfo_RecurringTimeWindow.Size(m) +} +func (m *RecurringTimeWindow) XXX_DiscardUnknown() { + xxx_messageInfo_RecurringTimeWindow.DiscardUnknown(m) +} + +var xxx_messageInfo_RecurringTimeWindow proto.InternalMessageInfo + +func (m *RecurringTimeWindow) GetWindow() *TimeWindow { + if m != nil { + return m.Window + } + return nil +} + +func (m *RecurringTimeWindow) GetRecurrence() string { + if m != nil { + return m.Recurrence + } + return "" +} + +// Time window specified for daily maintenance operations. +type DailyMaintenanceWindow struct { + // Time within the maintenance window to start the maintenance operations. + // Time format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) + // format "HH:MM", where HH : [00-23] and MM : [00-59] GMT. + StartTime string `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // [Output only] Duration of the time window, automatically chosen to be + // smallest possible in the given scenario. + // Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) + // format "PTnHnMnS". + Duration string `protobuf:"bytes,3,opt,name=duration,proto3" json:"duration,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DailyMaintenanceWindow) Reset() { *m = DailyMaintenanceWindow{} } +func (m *DailyMaintenanceWindow) String() string { return proto.CompactTextString(m) } +func (*DailyMaintenanceWindow) ProtoMessage() {} +func (*DailyMaintenanceWindow) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{52} +} + +func (m *DailyMaintenanceWindow) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DailyMaintenanceWindow.Unmarshal(m, b) +} +func (m *DailyMaintenanceWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DailyMaintenanceWindow.Marshal(b, m, deterministic) +} +func (m *DailyMaintenanceWindow) XXX_Merge(src proto.Message) { + xxx_messageInfo_DailyMaintenanceWindow.Merge(m, src) +} +func (m *DailyMaintenanceWindow) XXX_Size() int { + return xxx_messageInfo_DailyMaintenanceWindow.Size(m) +} +func (m *DailyMaintenanceWindow) XXX_DiscardUnknown() { + xxx_messageInfo_DailyMaintenanceWindow.DiscardUnknown(m) +} + +var xxx_messageInfo_DailyMaintenanceWindow proto.InternalMessageInfo + +func (m *DailyMaintenanceWindow) GetStartTime() string { + if m != nil { + return m.StartTime + } + return "" +} + +func (m *DailyMaintenanceWindow) GetDuration() string { + if m != nil { + return m.Duration + } + return "" +} + +// SetNodePoolManagementRequest sets the node management properties of a node +// pool. +type SetNodePoolManagementRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to update. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool to update. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // Required. NodeManagement configuration for the node pool. + Management *NodeManagement `protobuf:"bytes,5,opt,name=management,proto3" json:"management,omitempty"` + // The name (project, location, cluster, node pool id) of the node pool to set + // management properties. Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNodePoolManagementRequest) Reset() { *m = SetNodePoolManagementRequest{} } +func (m *SetNodePoolManagementRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodePoolManagementRequest) ProtoMessage() {} +func (*SetNodePoolManagementRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{53} +} + +func (m *SetNodePoolManagementRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNodePoolManagementRequest.Unmarshal(m, b) +} +func (m *SetNodePoolManagementRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNodePoolManagementRequest.Marshal(b, m, deterministic) +} +func (m *SetNodePoolManagementRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNodePoolManagementRequest.Merge(m, src) +} +func (m *SetNodePoolManagementRequest) XXX_Size() int { + return xxx_messageInfo_SetNodePoolManagementRequest.Size(m) +} +func (m *SetNodePoolManagementRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNodePoolManagementRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNodePoolManagementRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetNodePoolManagementRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolManagementRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolManagementRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolManagementRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *SetNodePoolManagementRequest) GetManagement() *NodeManagement { + if m != nil { + return m.Management + } + return nil +} + +func (m *SetNodePoolManagementRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetNodePoolSizeRequest sets the size a node +// pool. +type SetNodePoolSizeRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to update. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool to update. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // Required. The desired node count for the pool. + NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3" json:"node_count,omitempty"` + // The name (project, location, cluster, node pool id) of the node pool to set + // size. + // Specified in the format 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNodePoolSizeRequest) Reset() { *m = SetNodePoolSizeRequest{} } +func (m *SetNodePoolSizeRequest) String() string { return proto.CompactTextString(m) } +func (*SetNodePoolSizeRequest) ProtoMessage() {} +func (*SetNodePoolSizeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{54} +} + +func (m *SetNodePoolSizeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNodePoolSizeRequest.Unmarshal(m, b) +} +func (m *SetNodePoolSizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNodePoolSizeRequest.Marshal(b, m, deterministic) +} +func (m *SetNodePoolSizeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNodePoolSizeRequest.Merge(m, src) +} +func (m *SetNodePoolSizeRequest) XXX_Size() int { + return xxx_messageInfo_SetNodePoolSizeRequest.Size(m) +} +func (m *SetNodePoolSizeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNodePoolSizeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNodePoolSizeRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetNodePoolSizeRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolSizeRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolSizeRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNodePoolSizeRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *SetNodePoolSizeRequest) GetNodeCount() int32 { + if m != nil { + return m.NodeCount + } + return 0 +} + +func (m *SetNodePoolSizeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed +// NodePool upgrade. This will be an no-op if the last upgrade successfully +// completed. +type RollbackNodePoolUpgradeRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to rollback. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the node pool to rollback. + // This field has been deprecated and replaced by the name field. + NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster, node pool id) of the node poll to + // rollback upgrade. + // Specified in the format 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RollbackNodePoolUpgradeRequest) Reset() { *m = RollbackNodePoolUpgradeRequest{} } +func (m *RollbackNodePoolUpgradeRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackNodePoolUpgradeRequest) ProtoMessage() {} +func (*RollbackNodePoolUpgradeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{55} +} + +func (m *RollbackNodePoolUpgradeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RollbackNodePoolUpgradeRequest.Unmarshal(m, b) +} +func (m *RollbackNodePoolUpgradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RollbackNodePoolUpgradeRequest.Marshal(b, m, deterministic) +} +func (m *RollbackNodePoolUpgradeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackNodePoolUpgradeRequest.Merge(m, src) +} +func (m *RollbackNodePoolUpgradeRequest) XXX_Size() int { + return xxx_messageInfo_RollbackNodePoolUpgradeRequest.Size(m) +} +func (m *RollbackNodePoolUpgradeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackNodePoolUpgradeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackNodePoolUpgradeRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *RollbackNodePoolUpgradeRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *RollbackNodePoolUpgradeRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *RollbackNodePoolUpgradeRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +// Deprecated: Do not use. +func (m *RollbackNodePoolUpgradeRequest) GetNodePoolId() string { + if m != nil { + return m.NodePoolId + } + return "" +} + +func (m *RollbackNodePoolUpgradeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ListNodePoolsResponse is the result of ListNodePoolsRequest. +type ListNodePoolsResponse struct { + // A list of node pools for a cluster. + NodePools []*NodePool `protobuf:"bytes,1,rep,name=node_pools,json=nodePools,proto3" json:"node_pools,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNodePoolsResponse) Reset() { *m = ListNodePoolsResponse{} } +func (m *ListNodePoolsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNodePoolsResponse) ProtoMessage() {} +func (*ListNodePoolsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{56} +} + +func (m *ListNodePoolsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNodePoolsResponse.Unmarshal(m, b) +} +func (m *ListNodePoolsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNodePoolsResponse.Marshal(b, m, deterministic) +} +func (m *ListNodePoolsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNodePoolsResponse.Merge(m, src) +} +func (m *ListNodePoolsResponse) XXX_Size() int { + return xxx_messageInfo_ListNodePoolsResponse.Size(m) +} +func (m *ListNodePoolsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNodePoolsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNodePoolsResponse proto.InternalMessageInfo + +func (m *ListNodePoolsResponse) GetNodePools() []*NodePool { + if m != nil { + return m.NodePools + } + return nil +} + +// ClusterAutoscaling contains global, per-cluster information +// required by Cluster Autoscaler to automatically adjust +// the size of the cluster and create/delete +// node pools based on the current needs. +type ClusterAutoscaling struct { + // Enables automatic node pool creation and deletion. + EnableNodeAutoprovisioning bool `protobuf:"varint,1,opt,name=enable_node_autoprovisioning,json=enableNodeAutoprovisioning,proto3" json:"enable_node_autoprovisioning,omitempty"` + // Contains global constraints regarding minimum and maximum + // amount of resources in the cluster. + ResourceLimits []*ResourceLimit `protobuf:"bytes,2,rep,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"` + // AutoprovisioningNodePoolDefaults contains defaults for a node pool + // created by NAP. + AutoprovisioningNodePoolDefaults *AutoprovisioningNodePoolDefaults `protobuf:"bytes,4,opt,name=autoprovisioning_node_pool_defaults,json=autoprovisioningNodePoolDefaults,proto3" json:"autoprovisioning_node_pool_defaults,omitempty"` + // The list of Google Compute Engine [zones](/compute/docs/zones#available) + // in which the NodePool's nodes can be created by NAP. + AutoprovisioningLocations []string `protobuf:"bytes,5,rep,name=autoprovisioning_locations,json=autoprovisioningLocations,proto3" json:"autoprovisioning_locations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterAutoscaling) Reset() { *m = ClusterAutoscaling{} } +func (m *ClusterAutoscaling) String() string { return proto.CompactTextString(m) } +func (*ClusterAutoscaling) ProtoMessage() {} +func (*ClusterAutoscaling) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{57} +} + +func (m *ClusterAutoscaling) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterAutoscaling.Unmarshal(m, b) +} +func (m *ClusterAutoscaling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterAutoscaling.Marshal(b, m, deterministic) +} +func (m *ClusterAutoscaling) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterAutoscaling.Merge(m, src) +} +func (m *ClusterAutoscaling) XXX_Size() int { + return xxx_messageInfo_ClusterAutoscaling.Size(m) +} +func (m *ClusterAutoscaling) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterAutoscaling.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterAutoscaling proto.InternalMessageInfo + +func (m *ClusterAutoscaling) GetEnableNodeAutoprovisioning() bool { + if m != nil { + return m.EnableNodeAutoprovisioning + } + return false +} + +func (m *ClusterAutoscaling) GetResourceLimits() []*ResourceLimit { + if m != nil { + return m.ResourceLimits + } + return nil +} + +func (m *ClusterAutoscaling) GetAutoprovisioningNodePoolDefaults() *AutoprovisioningNodePoolDefaults { + if m != nil { + return m.AutoprovisioningNodePoolDefaults + } + return nil +} + +func (m *ClusterAutoscaling) GetAutoprovisioningLocations() []string { + if m != nil { + return m.AutoprovisioningLocations + } + return nil +} + +// AutoprovisioningNodePoolDefaults contains defaults for a node pool created +// by NAP. +type AutoprovisioningNodePoolDefaults struct { + // Scopes that are used by NAP when creating node pools. If oauth_scopes are + // specified, service_account should be empty. + OauthScopes []string `protobuf:"bytes,1,rep,name=oauth_scopes,json=oauthScopes,proto3" json:"oauth_scopes,omitempty"` + // The Google Cloud Platform Service Account to be used by the node VMs. If + // service_account is specified, scopes should be empty. + ServiceAccount string `protobuf:"bytes,2,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AutoprovisioningNodePoolDefaults) Reset() { *m = AutoprovisioningNodePoolDefaults{} } +func (m *AutoprovisioningNodePoolDefaults) String() string { return proto.CompactTextString(m) } +func (*AutoprovisioningNodePoolDefaults) ProtoMessage() {} +func (*AutoprovisioningNodePoolDefaults) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{58} +} + +func (m *AutoprovisioningNodePoolDefaults) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AutoprovisioningNodePoolDefaults.Unmarshal(m, b) +} +func (m *AutoprovisioningNodePoolDefaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AutoprovisioningNodePoolDefaults.Marshal(b, m, deterministic) +} +func (m *AutoprovisioningNodePoolDefaults) XXX_Merge(src proto.Message) { + xxx_messageInfo_AutoprovisioningNodePoolDefaults.Merge(m, src) +} +func (m *AutoprovisioningNodePoolDefaults) XXX_Size() int { + return xxx_messageInfo_AutoprovisioningNodePoolDefaults.Size(m) +} +func (m *AutoprovisioningNodePoolDefaults) XXX_DiscardUnknown() { + xxx_messageInfo_AutoprovisioningNodePoolDefaults.DiscardUnknown(m) +} + +var xxx_messageInfo_AutoprovisioningNodePoolDefaults proto.InternalMessageInfo + +func (m *AutoprovisioningNodePoolDefaults) GetOauthScopes() []string { + if m != nil { + return m.OauthScopes + } + return nil +} + +func (m *AutoprovisioningNodePoolDefaults) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +// Contains information about amount of some resource in the cluster. +// For memory, value should be in GB. +type ResourceLimit struct { + // Resource name "cpu", "memory" or gpu-specific string. + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // Minimum amount of the resource in the cluster. + Minimum int64 `protobuf:"varint,2,opt,name=minimum,proto3" json:"minimum,omitempty"` + // Maximum amount of the resource in the cluster. + Maximum int64 `protobuf:"varint,3,opt,name=maximum,proto3" json:"maximum,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceLimit) Reset() { *m = ResourceLimit{} } +func (m *ResourceLimit) String() string { return proto.CompactTextString(m) } +func (*ResourceLimit) ProtoMessage() {} +func (*ResourceLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{59} +} + +func (m *ResourceLimit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceLimit.Unmarshal(m, b) +} +func (m *ResourceLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceLimit.Marshal(b, m, deterministic) +} +func (m *ResourceLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceLimit.Merge(m, src) +} +func (m *ResourceLimit) XXX_Size() int { + return xxx_messageInfo_ResourceLimit.Size(m) +} +func (m *ResourceLimit) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceLimit proto.InternalMessageInfo + +func (m *ResourceLimit) GetResourceType() string { + if m != nil { + return m.ResourceType + } + return "" +} + +func (m *ResourceLimit) GetMinimum() int64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *ResourceLimit) GetMaximum() int64 { + if m != nil { + return m.Maximum + } + return 0 +} + +// NodePoolAutoscaling contains information required by cluster autoscaler to +// adjust the size of the node pool to the current cluster usage. +type NodePoolAutoscaling struct { + // Is autoscaling enabled for this node pool. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Minimum number of nodes in the NodePool. Must be >= 1 and <= + // max_node_count. + MinNodeCount int32 `protobuf:"varint,2,opt,name=min_node_count,json=minNodeCount,proto3" json:"min_node_count,omitempty"` + // Maximum number of nodes in the NodePool. Must be >= min_node_count. There + // has to enough quota to scale up the cluster. + MaxNodeCount int32 `protobuf:"varint,3,opt,name=max_node_count,json=maxNodeCount,proto3" json:"max_node_count,omitempty"` + // Can this node pool be deleted automatically. + Autoprovisioned bool `protobuf:"varint,4,opt,name=autoprovisioned,proto3" json:"autoprovisioned,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePoolAutoscaling) Reset() { *m = NodePoolAutoscaling{} } +func (m *NodePoolAutoscaling) String() string { return proto.CompactTextString(m) } +func (*NodePoolAutoscaling) ProtoMessage() {} +func (*NodePoolAutoscaling) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{60} +} + +func (m *NodePoolAutoscaling) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePoolAutoscaling.Unmarshal(m, b) +} +func (m *NodePoolAutoscaling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePoolAutoscaling.Marshal(b, m, deterministic) +} +func (m *NodePoolAutoscaling) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePoolAutoscaling.Merge(m, src) +} +func (m *NodePoolAutoscaling) XXX_Size() int { + return xxx_messageInfo_NodePoolAutoscaling.Size(m) +} +func (m *NodePoolAutoscaling) XXX_DiscardUnknown() { + xxx_messageInfo_NodePoolAutoscaling.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePoolAutoscaling proto.InternalMessageInfo + +func (m *NodePoolAutoscaling) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *NodePoolAutoscaling) GetMinNodeCount() int32 { + if m != nil { + return m.MinNodeCount + } + return 0 +} + +func (m *NodePoolAutoscaling) GetMaxNodeCount() int32 { + if m != nil { + return m.MaxNodeCount + } + return 0 +} + +func (m *NodePoolAutoscaling) GetAutoprovisioned() bool { + if m != nil { + return m.Autoprovisioned + } + return false +} + +// SetLabelsRequest sets the Google Cloud Platform labels on a Google Container +// Engine cluster, which will in turn set them for Google Compute Engine +// resources used by that cluster +type SetLabelsRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. The labels to set for that cluster. + ResourceLabels map[string]string `protobuf:"bytes,4,rep,name=resource_labels,json=resourceLabels,proto3" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Required. The fingerprint of the previous set of labels for this resource, + // used to detect conflicts. The fingerprint is initially generated by + // Kubernetes Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash when + // updating or changing labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `protobuf:"bytes,5,opt,name=label_fingerprint,json=labelFingerprint,proto3" json:"label_fingerprint,omitempty"` + // The name (project, location, cluster id) of the cluster to set labels. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetLabelsRequest) Reset() { *m = SetLabelsRequest{} } +func (m *SetLabelsRequest) String() string { return proto.CompactTextString(m) } +func (*SetLabelsRequest) ProtoMessage() {} +func (*SetLabelsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{61} +} + +func (m *SetLabelsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetLabelsRequest.Unmarshal(m, b) +} +func (m *SetLabelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetLabelsRequest.Marshal(b, m, deterministic) +} +func (m *SetLabelsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetLabelsRequest.Merge(m, src) +} +func (m *SetLabelsRequest) XXX_Size() int { + return xxx_messageInfo_SetLabelsRequest.Size(m) +} +func (m *SetLabelsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetLabelsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetLabelsRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetLabelsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLabelsRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLabelsRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLabelsRequest) GetResourceLabels() map[string]string { + if m != nil { + return m.ResourceLabels + } + return nil +} + +func (m *SetLabelsRequest) GetLabelFingerprint() string { + if m != nil { + return m.LabelFingerprint + } + return "" +} + +func (m *SetLabelsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for +// a cluster. +type SetLegacyAbacRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster to update. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. Whether ABAC authorization will be enabled in the cluster. + Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"` + // The name (project, location, cluster id) of the cluster to set legacy abac. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetLegacyAbacRequest) Reset() { *m = SetLegacyAbacRequest{} } +func (m *SetLegacyAbacRequest) String() string { return proto.CompactTextString(m) } +func (*SetLegacyAbacRequest) ProtoMessage() {} +func (*SetLegacyAbacRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{62} +} + +func (m *SetLegacyAbacRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetLegacyAbacRequest.Unmarshal(m, b) +} +func (m *SetLegacyAbacRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetLegacyAbacRequest.Marshal(b, m, deterministic) +} +func (m *SetLegacyAbacRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetLegacyAbacRequest.Merge(m, src) +} +func (m *SetLegacyAbacRequest) XXX_Size() int { + return xxx_messageInfo_SetLegacyAbacRequest.Size(m) +} +func (m *SetLegacyAbacRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetLegacyAbacRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetLegacyAbacRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetLegacyAbacRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLegacyAbacRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetLegacyAbacRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetLegacyAbacRequest) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *SetLegacyAbacRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// StartIPRotationRequest creates a new IP for the cluster and then performs +// a node upgrade on each node pool to point to the new IP. +type StartIPRotationRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster id) of the cluster to start IP + // rotation. Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // Whether to rotate credentials during IP rotation. + RotateCredentials bool `protobuf:"varint,7,opt,name=rotate_credentials,json=rotateCredentials,proto3" json:"rotate_credentials,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartIPRotationRequest) Reset() { *m = StartIPRotationRequest{} } +func (m *StartIPRotationRequest) String() string { return proto.CompactTextString(m) } +func (*StartIPRotationRequest) ProtoMessage() {} +func (*StartIPRotationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{63} +} + +func (m *StartIPRotationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartIPRotationRequest.Unmarshal(m, b) +} +func (m *StartIPRotationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartIPRotationRequest.Marshal(b, m, deterministic) +} +func (m *StartIPRotationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartIPRotationRequest.Merge(m, src) +} +func (m *StartIPRotationRequest) XXX_Size() int { + return xxx_messageInfo_StartIPRotationRequest.Size(m) +} +func (m *StartIPRotationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartIPRotationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartIPRotationRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *StartIPRotationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *StartIPRotationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *StartIPRotationRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *StartIPRotationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StartIPRotationRequest) GetRotateCredentials() bool { + if m != nil { + return m.RotateCredentials + } + return false +} + +// CompleteIPRotationRequest moves the cluster master back into single-IP mode. +type CompleteIPRotationRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // The name (project, location, cluster id) of the cluster to complete IP + // rotation. Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompleteIPRotationRequest) Reset() { *m = CompleteIPRotationRequest{} } +func (m *CompleteIPRotationRequest) String() string { return proto.CompactTextString(m) } +func (*CompleteIPRotationRequest) ProtoMessage() {} +func (*CompleteIPRotationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{64} +} + +func (m *CompleteIPRotationRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompleteIPRotationRequest.Unmarshal(m, b) +} +func (m *CompleteIPRotationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompleteIPRotationRequest.Marshal(b, m, deterministic) +} +func (m *CompleteIPRotationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteIPRotationRequest.Merge(m, src) +} +func (m *CompleteIPRotationRequest) XXX_Size() int { + return xxx_messageInfo_CompleteIPRotationRequest.Size(m) +} +func (m *CompleteIPRotationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteIPRotationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompleteIPRotationRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *CompleteIPRotationRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *CompleteIPRotationRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *CompleteIPRotationRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *CompleteIPRotationRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// AcceleratorConfig represents a Hardware Accelerator request. +type AcceleratorConfig struct { + // The number of the accelerator cards exposed to an instance. + AcceleratorCount int64 `protobuf:"varint,1,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"` + // The accelerator type resource name. List of supported accelerators + // [here](/compute/docs/gpus) + AcceleratorType string `protobuf:"bytes,2,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} } +func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) } +func (*AcceleratorConfig) ProtoMessage() {} +func (*AcceleratorConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{65} +} + +func (m *AcceleratorConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceleratorConfig.Unmarshal(m, b) +} +func (m *AcceleratorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceleratorConfig.Marshal(b, m, deterministic) +} +func (m *AcceleratorConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceleratorConfig.Merge(m, src) +} +func (m *AcceleratorConfig) XXX_Size() int { + return xxx_messageInfo_AcceleratorConfig.Size(m) +} +func (m *AcceleratorConfig) XXX_DiscardUnknown() { + xxx_messageInfo_AcceleratorConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceleratorConfig proto.InternalMessageInfo + +func (m *AcceleratorConfig) GetAcceleratorCount() int64 { + if m != nil { + return m.AcceleratorCount + } + return 0 +} + +func (m *AcceleratorConfig) GetAcceleratorType() string { + if m != nil { + return m.AcceleratorType + } + return "" +} + +// SetNetworkPolicyRequest enables/disables network policy for a cluster. +type SetNetworkPolicyRequest struct { + // Deprecated. The Google Developers Console [project ID or project + // number](https://developers.google.com/console/help/new/#projectnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use. + // Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use. + // Required. Configuration options for the NetworkPolicy feature. + NetworkPolicy *NetworkPolicy `protobuf:"bytes,4,opt,name=network_policy,json=networkPolicy,proto3" json:"network_policy,omitempty"` + // The name (project, location, cluster id) of the cluster to set networking + // policy. Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNetworkPolicyRequest) Reset() { *m = SetNetworkPolicyRequest{} } +func (m *SetNetworkPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*SetNetworkPolicyRequest) ProtoMessage() {} +func (*SetNetworkPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{66} +} + +func (m *SetNetworkPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNetworkPolicyRequest.Unmarshal(m, b) +} +func (m *SetNetworkPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNetworkPolicyRequest.Marshal(b, m, deterministic) +} +func (m *SetNetworkPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNetworkPolicyRequest.Merge(m, src) +} +func (m *SetNetworkPolicyRequest) XXX_Size() int { + return xxx_messageInfo_SetNetworkPolicyRequest.Size(m) +} +func (m *SetNetworkPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNetworkPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNetworkPolicyRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *SetNetworkPolicyRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNetworkPolicyRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +// Deprecated: Do not use. +func (m *SetNetworkPolicyRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetNetworkPolicyRequest) GetNetworkPolicy() *NetworkPolicy { + if m != nil { + return m.NetworkPolicy + } + return nil +} + +func (m *SetNetworkPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// SetMaintenancePolicyRequest sets the maintenance policy for a cluster. +type SetMaintenancePolicyRequest struct { + // Required. The Google Developers Console [project ID or project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // Required. The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` + // Required. The name of the cluster to update. + ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // Required. The maintenance policy to be set for the cluster. An empty field + // clears the existing maintenance policy. + MaintenancePolicy *MaintenancePolicy `protobuf:"bytes,4,opt,name=maintenance_policy,json=maintenancePolicy,proto3" json:"maintenance_policy,omitempty"` + // The name (project, location, cluster id) of the cluster to set maintenance + // policy. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetMaintenancePolicyRequest) Reset() { *m = SetMaintenancePolicyRequest{} } +func (m *SetMaintenancePolicyRequest) String() string { return proto.CompactTextString(m) } +func (*SetMaintenancePolicyRequest) ProtoMessage() {} +func (*SetMaintenancePolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{67} +} + +func (m *SetMaintenancePolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetMaintenancePolicyRequest.Unmarshal(m, b) +} +func (m *SetMaintenancePolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetMaintenancePolicyRequest.Marshal(b, m, deterministic) +} +func (m *SetMaintenancePolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetMaintenancePolicyRequest.Merge(m, src) +} +func (m *SetMaintenancePolicyRequest) XXX_Size() int { + return xxx_messageInfo_SetMaintenancePolicyRequest.Size(m) +} +func (m *SetMaintenancePolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetMaintenancePolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetMaintenancePolicyRequest proto.InternalMessageInfo + +func (m *SetMaintenancePolicyRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SetMaintenancePolicyRequest) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *SetMaintenancePolicyRequest) GetClusterId() string { + if m != nil { + return m.ClusterId + } + return "" +} + +func (m *SetMaintenancePolicyRequest) GetMaintenancePolicy() *MaintenancePolicy { + if m != nil { + return m.MaintenancePolicy + } + return nil +} + +func (m *SetMaintenancePolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// StatusCondition describes why a cluster or a node pool has a certain status +// (e.g., ERROR or DEGRADED). +type StatusCondition struct { + // Machine-friendly representation of the condition + Code StatusCondition_Code `protobuf:"varint,1,opt,name=code,proto3,enum=google.container.v1.StatusCondition_Code" json:"code,omitempty"` + // Human-friendly representation of the condition + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusCondition) Reset() { *m = StatusCondition{} } +func (m *StatusCondition) String() string { return proto.CompactTextString(m) } +func (*StatusCondition) ProtoMessage() {} +func (*StatusCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{68} +} + +func (m *StatusCondition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatusCondition.Unmarshal(m, b) +} +func (m *StatusCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatusCondition.Marshal(b, m, deterministic) +} +func (m *StatusCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCondition.Merge(m, src) +} +func (m *StatusCondition) XXX_Size() int { + return xxx_messageInfo_StatusCondition.Size(m) +} +func (m *StatusCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusCondition proto.InternalMessageInfo + +func (m *StatusCondition) GetCode() StatusCondition_Code { + if m != nil { + return m.Code + } + return StatusCondition_UNKNOWN +} + +func (m *StatusCondition) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// NetworkConfig reports the relative names of network & subnetwork. +type NetworkConfig struct { + // Output only. The relative name of the Google Compute Engine + // [network][google.container.v1.NetworkConfig.network](/compute/docs/networks-and-firewalls#networks) to which + // the cluster is connected. + // Example: projects/my-project/global/networks/my-network + Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"` + // Output only. The relative name of the Google Compute Engine + // [subnetwork](/compute/docs/vpc) to which the cluster is connected. + // Example: projects/my-project/regions/us-central1/subnetworks/my-subnet + Subnetwork string `protobuf:"bytes,2,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"` + // Whether Intra-node visibility is enabled for this cluster. + // This makes same node pod to pod traffic visible for VPC network. + EnableIntraNodeVisibility bool `protobuf:"varint,5,opt,name=enable_intra_node_visibility,json=enableIntraNodeVisibility,proto3" json:"enable_intra_node_visibility,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkConfig) Reset() { *m = NetworkConfig{} } +func (m *NetworkConfig) String() string { return proto.CompactTextString(m) } +func (*NetworkConfig) ProtoMessage() {} +func (*NetworkConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{69} +} + +func (m *NetworkConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetworkConfig.Unmarshal(m, b) +} +func (m *NetworkConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetworkConfig.Marshal(b, m, deterministic) +} +func (m *NetworkConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkConfig.Merge(m, src) +} +func (m *NetworkConfig) XXX_Size() int { + return xxx_messageInfo_NetworkConfig.Size(m) +} +func (m *NetworkConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkConfig proto.InternalMessageInfo + +func (m *NetworkConfig) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *NetworkConfig) GetSubnetwork() string { + if m != nil { + return m.Subnetwork + } + return "" +} + +func (m *NetworkConfig) GetEnableIntraNodeVisibility() bool { + if m != nil { + return m.EnableIntraNodeVisibility + } + return false +} + +// IntraNodeVisibilityConfig contains the desired config of the intra-node +// visibility on this cluster. +type IntraNodeVisibilityConfig struct { + // Enables intra node visibility for this cluster. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntraNodeVisibilityConfig) Reset() { *m = IntraNodeVisibilityConfig{} } +func (m *IntraNodeVisibilityConfig) String() string { return proto.CompactTextString(m) } +func (*IntraNodeVisibilityConfig) ProtoMessage() {} +func (*IntraNodeVisibilityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{70} +} + +func (m *IntraNodeVisibilityConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntraNodeVisibilityConfig.Unmarshal(m, b) +} +func (m *IntraNodeVisibilityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntraNodeVisibilityConfig.Marshal(b, m, deterministic) +} +func (m *IntraNodeVisibilityConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntraNodeVisibilityConfig.Merge(m, src) +} +func (m *IntraNodeVisibilityConfig) XXX_Size() int { + return xxx_messageInfo_IntraNodeVisibilityConfig.Size(m) +} +func (m *IntraNodeVisibilityConfig) XXX_DiscardUnknown() { + xxx_messageInfo_IntraNodeVisibilityConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_IntraNodeVisibilityConfig proto.InternalMessageInfo + +func (m *IntraNodeVisibilityConfig) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// Constraints applied to pods. +type MaxPodsConstraint struct { + // Constraint enforced on the max num of pods per node. + MaxPodsPerNode int64 `protobuf:"varint,1,opt,name=max_pods_per_node,json=maxPodsPerNode,proto3" json:"max_pods_per_node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaxPodsConstraint) Reset() { *m = MaxPodsConstraint{} } +func (m *MaxPodsConstraint) String() string { return proto.CompactTextString(m) } +func (*MaxPodsConstraint) ProtoMessage() {} +func (*MaxPodsConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{71} +} + +func (m *MaxPodsConstraint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaxPodsConstraint.Unmarshal(m, b) +} +func (m *MaxPodsConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaxPodsConstraint.Marshal(b, m, deterministic) +} +func (m *MaxPodsConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaxPodsConstraint.Merge(m, src) +} +func (m *MaxPodsConstraint) XXX_Size() int { + return xxx_messageInfo_MaxPodsConstraint.Size(m) +} +func (m *MaxPodsConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_MaxPodsConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_MaxPodsConstraint proto.InternalMessageInfo + +func (m *MaxPodsConstraint) GetMaxPodsPerNode() int64 { + if m != nil { + return m.MaxPodsPerNode + } + return 0 +} + +// Configuration of etcd encryption. +type DatabaseEncryption struct { + // Denotes the state of etcd encryption. + State DatabaseEncryption_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.container.v1.DatabaseEncryption_State" json:"state,omitempty"` + // Name of CloudKMS key to use for the encryption of secrets in etcd. + // Ex. projects/my-project/locations/global/keyRings/my-ring/cryptoKeys/my-key + KeyName string `protobuf:"bytes,1,opt,name=key_name,json=keyName,proto3" json:"key_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DatabaseEncryption) Reset() { *m = DatabaseEncryption{} } +func (m *DatabaseEncryption) String() string { return proto.CompactTextString(m) } +func (*DatabaseEncryption) ProtoMessage() {} +func (*DatabaseEncryption) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{72} +} + +func (m *DatabaseEncryption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DatabaseEncryption.Unmarshal(m, b) +} +func (m *DatabaseEncryption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DatabaseEncryption.Marshal(b, m, deterministic) +} +func (m *DatabaseEncryption) XXX_Merge(src proto.Message) { + xxx_messageInfo_DatabaseEncryption.Merge(m, src) +} +func (m *DatabaseEncryption) XXX_Size() int { + return xxx_messageInfo_DatabaseEncryption.Size(m) +} +func (m *DatabaseEncryption) XXX_DiscardUnknown() { + xxx_messageInfo_DatabaseEncryption.DiscardUnknown(m) +} + +var xxx_messageInfo_DatabaseEncryption proto.InternalMessageInfo + +func (m *DatabaseEncryption) GetState() DatabaseEncryption_State { + if m != nil { + return m.State + } + return DatabaseEncryption_UNKNOWN +} + +func (m *DatabaseEncryption) GetKeyName() string { + if m != nil { + return m.KeyName + } + return "" +} + +// ListUsableSubnetworksRequest requests the list of usable subnetworks +// available to a user for creating clusters. +type ListUsableSubnetworksRequest struct { + // The parent project where subnetworks are usable. + // Specified in the format 'projects/*'. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Filtering currently only supports equality on the networkProjectId and must + // be in the form: "networkProjectId=[PROJECTID]", where `networkProjectId` + // is the project which owns the listed subnetworks. This defaults to the + // parent project ID. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The max number of results per page that should be returned. If the number + // of available results is larger than `page_size`, a `next_page_token` is + // returned which can be used to get the next page of results in subsequent + // requests. Acceptable values are 0 to 500, inclusive. (Default: 500) + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Specifies a page token to use. Set this to the nextPageToken returned by + // previous list requests to get the next page of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsableSubnetworksRequest) Reset() { *m = ListUsableSubnetworksRequest{} } +func (m *ListUsableSubnetworksRequest) String() string { return proto.CompactTextString(m) } +func (*ListUsableSubnetworksRequest) ProtoMessage() {} +func (*ListUsableSubnetworksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{73} +} + +func (m *ListUsableSubnetworksRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsableSubnetworksRequest.Unmarshal(m, b) +} +func (m *ListUsableSubnetworksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsableSubnetworksRequest.Marshal(b, m, deterministic) +} +func (m *ListUsableSubnetworksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsableSubnetworksRequest.Merge(m, src) +} +func (m *ListUsableSubnetworksRequest) XXX_Size() int { + return xxx_messageInfo_ListUsableSubnetworksRequest.Size(m) +} +func (m *ListUsableSubnetworksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsableSubnetworksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsableSubnetworksRequest proto.InternalMessageInfo + +func (m *ListUsableSubnetworksRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListUsableSubnetworksRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListUsableSubnetworksRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUsableSubnetworksRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// ListUsableSubnetworksResponse is the response of +// ListUsableSubnetworksRequest. +type ListUsableSubnetworksResponse struct { + // A list of usable subnetworks in the specified network project. + Subnetworks []*UsableSubnetwork `protobuf:"bytes,1,rep,name=subnetworks,proto3" json:"subnetworks,omitempty"` + // This token allows you to get the next page of results for list requests. + // If the number of results is larger than `page_size`, use the + // `next_page_token` as a value for the query parameter `page_token` in the + // next request. The value will become empty when there are no more pages. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUsableSubnetworksResponse) Reset() { *m = ListUsableSubnetworksResponse{} } +func (m *ListUsableSubnetworksResponse) String() string { return proto.CompactTextString(m) } +func (*ListUsableSubnetworksResponse) ProtoMessage() {} +func (*ListUsableSubnetworksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{74} +} + +func (m *ListUsableSubnetworksResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUsableSubnetworksResponse.Unmarshal(m, b) +} +func (m *ListUsableSubnetworksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUsableSubnetworksResponse.Marshal(b, m, deterministic) +} +func (m *ListUsableSubnetworksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUsableSubnetworksResponse.Merge(m, src) +} +func (m *ListUsableSubnetworksResponse) XXX_Size() int { + return xxx_messageInfo_ListUsableSubnetworksResponse.Size(m) +} +func (m *ListUsableSubnetworksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUsableSubnetworksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUsableSubnetworksResponse proto.InternalMessageInfo + +func (m *ListUsableSubnetworksResponse) GetSubnetworks() []*UsableSubnetwork { + if m != nil { + return m.Subnetworks + } + return nil +} + +func (m *ListUsableSubnetworksResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Secondary IP range of a usable subnetwork. +type UsableSubnetworkSecondaryRange struct { + // The name associated with this subnetwork secondary range, used when adding + // an alias IP range to a VM instance. + RangeName string `protobuf:"bytes,1,opt,name=range_name,json=rangeName,proto3" json:"range_name,omitempty"` + // The range of IP addresses belonging to this subnetwork secondary range. + IpCidrRange string `protobuf:"bytes,2,opt,name=ip_cidr_range,json=ipCidrRange,proto3" json:"ip_cidr_range,omitempty"` + // This field is to determine the status of the secondary range programmably. + Status UsableSubnetworkSecondaryRange_Status `protobuf:"varint,3,opt,name=status,proto3,enum=google.container.v1.UsableSubnetworkSecondaryRange_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UsableSubnetworkSecondaryRange) Reset() { *m = UsableSubnetworkSecondaryRange{} } +func (m *UsableSubnetworkSecondaryRange) String() string { return proto.CompactTextString(m) } +func (*UsableSubnetworkSecondaryRange) ProtoMessage() {} +func (*UsableSubnetworkSecondaryRange) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{75} +} + +func (m *UsableSubnetworkSecondaryRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UsableSubnetworkSecondaryRange.Unmarshal(m, b) +} +func (m *UsableSubnetworkSecondaryRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UsableSubnetworkSecondaryRange.Marshal(b, m, deterministic) +} +func (m *UsableSubnetworkSecondaryRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsableSubnetworkSecondaryRange.Merge(m, src) +} +func (m *UsableSubnetworkSecondaryRange) XXX_Size() int { + return xxx_messageInfo_UsableSubnetworkSecondaryRange.Size(m) +} +func (m *UsableSubnetworkSecondaryRange) XXX_DiscardUnknown() { + xxx_messageInfo_UsableSubnetworkSecondaryRange.DiscardUnknown(m) +} + +var xxx_messageInfo_UsableSubnetworkSecondaryRange proto.InternalMessageInfo + +func (m *UsableSubnetworkSecondaryRange) GetRangeName() string { + if m != nil { + return m.RangeName + } + return "" +} + +func (m *UsableSubnetworkSecondaryRange) GetIpCidrRange() string { + if m != nil { + return m.IpCidrRange + } + return "" +} + +func (m *UsableSubnetworkSecondaryRange) GetStatus() UsableSubnetworkSecondaryRange_Status { + if m != nil { + return m.Status + } + return UsableSubnetworkSecondaryRange_UNKNOWN +} + +// UsableSubnetwork resource returns the subnetwork name, its associated network +// and the primary CIDR range. +type UsableSubnetwork struct { + // Subnetwork Name. + // Example: projects/my-project/regions/us-central1/subnetworks/my-subnet + Subnetwork string `protobuf:"bytes,1,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"` + // Network Name. + // Example: projects/my-project/global/networks/my-network + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + // The range of internal addresses that are owned by this subnetwork. + IpCidrRange string `protobuf:"bytes,3,opt,name=ip_cidr_range,json=ipCidrRange,proto3" json:"ip_cidr_range,omitempty"` + // Secondary IP ranges. + SecondaryIpRanges []*UsableSubnetworkSecondaryRange `protobuf:"bytes,4,rep,name=secondary_ip_ranges,json=secondaryIpRanges,proto3" json:"secondary_ip_ranges,omitempty"` + // A human readable status message representing the reasons for cases where + // the caller cannot use the secondary ranges under the subnet. For example if + // the secondary_ip_ranges is empty due to a permission issue, an insufficient + // permission message will be given by status_message. + StatusMessage string `protobuf:"bytes,5,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UsableSubnetwork) Reset() { *m = UsableSubnetwork{} } +func (m *UsableSubnetwork) String() string { return proto.CompactTextString(m) } +func (*UsableSubnetwork) ProtoMessage() {} +func (*UsableSubnetwork) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{76} +} + +func (m *UsableSubnetwork) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UsableSubnetwork.Unmarshal(m, b) +} +func (m *UsableSubnetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UsableSubnetwork.Marshal(b, m, deterministic) +} +func (m *UsableSubnetwork) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsableSubnetwork.Merge(m, src) +} +func (m *UsableSubnetwork) XXX_Size() int { + return xxx_messageInfo_UsableSubnetwork.Size(m) +} +func (m *UsableSubnetwork) XXX_DiscardUnknown() { + xxx_messageInfo_UsableSubnetwork.DiscardUnknown(m) +} + +var xxx_messageInfo_UsableSubnetwork proto.InternalMessageInfo + +func (m *UsableSubnetwork) GetSubnetwork() string { + if m != nil { + return m.Subnetwork + } + return "" +} + +func (m *UsableSubnetwork) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *UsableSubnetwork) GetIpCidrRange() string { + if m != nil { + return m.IpCidrRange + } + return "" +} + +func (m *UsableSubnetwork) GetSecondaryIpRanges() []*UsableSubnetworkSecondaryRange { + if m != nil { + return m.SecondaryIpRanges + } + return nil +} + +func (m *UsableSubnetwork) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +// Configuration for exporting cluster resource usages. +type ResourceUsageExportConfig struct { + // Configuration to use BigQuery as usage export destination. + BigqueryDestination *ResourceUsageExportConfig_BigQueryDestination `protobuf:"bytes,1,opt,name=bigquery_destination,json=bigqueryDestination,proto3" json:"bigquery_destination,omitempty"` + // Whether to enable network egress metering for this cluster. If enabled, a + // daemonset will be created in the cluster to meter network egress traffic. + EnableNetworkEgressMetering bool `protobuf:"varint,2,opt,name=enable_network_egress_metering,json=enableNetworkEgressMetering,proto3" json:"enable_network_egress_metering,omitempty"` + // Configuration to enable resource consumption metering. + ConsumptionMeteringConfig *ResourceUsageExportConfig_ConsumptionMeteringConfig `protobuf:"bytes,3,opt,name=consumption_metering_config,json=consumptionMeteringConfig,proto3" json:"consumption_metering_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceUsageExportConfig) Reset() { *m = ResourceUsageExportConfig{} } +func (m *ResourceUsageExportConfig) String() string { return proto.CompactTextString(m) } +func (*ResourceUsageExportConfig) ProtoMessage() {} +func (*ResourceUsageExportConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{77} +} + +func (m *ResourceUsageExportConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceUsageExportConfig.Unmarshal(m, b) +} +func (m *ResourceUsageExportConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceUsageExportConfig.Marshal(b, m, deterministic) +} +func (m *ResourceUsageExportConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceUsageExportConfig.Merge(m, src) +} +func (m *ResourceUsageExportConfig) XXX_Size() int { + return xxx_messageInfo_ResourceUsageExportConfig.Size(m) +} +func (m *ResourceUsageExportConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceUsageExportConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceUsageExportConfig proto.InternalMessageInfo + +func (m *ResourceUsageExportConfig) GetBigqueryDestination() *ResourceUsageExportConfig_BigQueryDestination { + if m != nil { + return m.BigqueryDestination + } + return nil +} + +func (m *ResourceUsageExportConfig) GetEnableNetworkEgressMetering() bool { + if m != nil { + return m.EnableNetworkEgressMetering + } + return false +} + +func (m *ResourceUsageExportConfig) GetConsumptionMeteringConfig() *ResourceUsageExportConfig_ConsumptionMeteringConfig { + if m != nil { + return m.ConsumptionMeteringConfig + } + return nil +} + +// Parameters for using BigQuery as the destination of resource usage export. +type ResourceUsageExportConfig_BigQueryDestination struct { + // The ID of a BigQuery Dataset. + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceUsageExportConfig_BigQueryDestination) Reset() { + *m = ResourceUsageExportConfig_BigQueryDestination{} +} +func (m *ResourceUsageExportConfig_BigQueryDestination) String() string { + return proto.CompactTextString(m) +} +func (*ResourceUsageExportConfig_BigQueryDestination) ProtoMessage() {} +func (*ResourceUsageExportConfig_BigQueryDestination) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{77, 0} +} + +func (m *ResourceUsageExportConfig_BigQueryDestination) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceUsageExportConfig_BigQueryDestination.Unmarshal(m, b) +} +func (m *ResourceUsageExportConfig_BigQueryDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceUsageExportConfig_BigQueryDestination.Marshal(b, m, deterministic) +} +func (m *ResourceUsageExportConfig_BigQueryDestination) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceUsageExportConfig_BigQueryDestination.Merge(m, src) +} +func (m *ResourceUsageExportConfig_BigQueryDestination) XXX_Size() int { + return xxx_messageInfo_ResourceUsageExportConfig_BigQueryDestination.Size(m) +} +func (m *ResourceUsageExportConfig_BigQueryDestination) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceUsageExportConfig_BigQueryDestination.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceUsageExportConfig_BigQueryDestination proto.InternalMessageInfo + +func (m *ResourceUsageExportConfig_BigQueryDestination) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +// Parameters for controlling consumption metering. +type ResourceUsageExportConfig_ConsumptionMeteringConfig struct { + // Whether to enable consumption metering for this cluster. If enabled, a + // second BigQuery table will be created to hold resource consumption + // records. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) Reset() { + *m = ResourceUsageExportConfig_ConsumptionMeteringConfig{} +} +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) String() string { + return proto.CompactTextString(m) +} +func (*ResourceUsageExportConfig_ConsumptionMeteringConfig) ProtoMessage() {} +func (*ResourceUsageExportConfig_ConsumptionMeteringConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{77, 1} +} + +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceUsageExportConfig_ConsumptionMeteringConfig.Unmarshal(m, b) +} +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceUsageExportConfig_ConsumptionMeteringConfig.Marshal(b, m, deterministic) +} +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceUsageExportConfig_ConsumptionMeteringConfig.Merge(m, src) +} +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) XXX_Size() int { + return xxx_messageInfo_ResourceUsageExportConfig_ConsumptionMeteringConfig.Size(m) +} +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceUsageExportConfig_ConsumptionMeteringConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceUsageExportConfig_ConsumptionMeteringConfig proto.InternalMessageInfo + +func (m *ResourceUsageExportConfig_ConsumptionMeteringConfig) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +// VerticalPodAutoscaling contains global, per-cluster information +// required by Vertical Pod Autoscaler to automatically adjust +// the resources of pods controlled by it. +type VerticalPodAutoscaling struct { + // Enables vertical pod autoscaling. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerticalPodAutoscaling) Reset() { *m = VerticalPodAutoscaling{} } +func (m *VerticalPodAutoscaling) String() string { return proto.CompactTextString(m) } +func (*VerticalPodAutoscaling) ProtoMessage() {} +func (*VerticalPodAutoscaling) Descriptor() ([]byte, []int) { + return fileDescriptor_1c7f18b1699f357a, []int{78} +} + +func (m *VerticalPodAutoscaling) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerticalPodAutoscaling.Unmarshal(m, b) +} +func (m *VerticalPodAutoscaling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerticalPodAutoscaling.Marshal(b, m, deterministic) +} +func (m *VerticalPodAutoscaling) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerticalPodAutoscaling.Merge(m, src) +} +func (m *VerticalPodAutoscaling) XXX_Size() int { + return xxx_messageInfo_VerticalPodAutoscaling.Size(m) +} +func (m *VerticalPodAutoscaling) XXX_DiscardUnknown() { + xxx_messageInfo_VerticalPodAutoscaling.DiscardUnknown(m) +} + +var xxx_messageInfo_VerticalPodAutoscaling proto.InternalMessageInfo + +func (m *VerticalPodAutoscaling) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func init() { + proto.RegisterEnum("google.container.v1.NodeTaint_Effect", NodeTaint_Effect_name, NodeTaint_Effect_value) + proto.RegisterEnum("google.container.v1.NetworkPolicy_Provider", NetworkPolicy_Provider_name, NetworkPolicy_Provider_value) + proto.RegisterEnum("google.container.v1.Cluster_Status", Cluster_Status_name, Cluster_Status_value) + proto.RegisterEnum("google.container.v1.Operation_Status", Operation_Status_name, Operation_Status_value) + proto.RegisterEnum("google.container.v1.Operation_Type", Operation_Type_name, Operation_Type_value) + proto.RegisterEnum("google.container.v1.SetMasterAuthRequest_Action", SetMasterAuthRequest_Action_name, SetMasterAuthRequest_Action_value) + proto.RegisterEnum("google.container.v1.NodePool_Status", NodePool_Status_name, NodePool_Status_value) + proto.RegisterEnum("google.container.v1.StatusCondition_Code", StatusCondition_Code_name, StatusCondition_Code_value) + proto.RegisterEnum("google.container.v1.DatabaseEncryption_State", DatabaseEncryption_State_name, DatabaseEncryption_State_value) + proto.RegisterEnum("google.container.v1.UsableSubnetworkSecondaryRange_Status", UsableSubnetworkSecondaryRange_Status_name, UsableSubnetworkSecondaryRange_Status_value) + proto.RegisterType((*NodeConfig)(nil), "google.container.v1.NodeConfig") + proto.RegisterMapType((map[string]string)(nil), "google.container.v1.NodeConfig.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "google.container.v1.NodeConfig.MetadataEntry") + proto.RegisterType((*ShieldedInstanceConfig)(nil), "google.container.v1.ShieldedInstanceConfig") + proto.RegisterType((*NodeTaint)(nil), "google.container.v1.NodeTaint") + proto.RegisterType((*MasterAuth)(nil), "google.container.v1.MasterAuth") + proto.RegisterType((*ClientCertificateConfig)(nil), "google.container.v1.ClientCertificateConfig") + proto.RegisterType((*AddonsConfig)(nil), "google.container.v1.AddonsConfig") + proto.RegisterType((*HttpLoadBalancing)(nil), "google.container.v1.HttpLoadBalancing") + proto.RegisterType((*HorizontalPodAutoscaling)(nil), "google.container.v1.HorizontalPodAutoscaling") + proto.RegisterType((*KubernetesDashboard)(nil), "google.container.v1.KubernetesDashboard") + proto.RegisterType((*NetworkPolicyConfig)(nil), "google.container.v1.NetworkPolicyConfig") + proto.RegisterType((*PrivateClusterConfig)(nil), "google.container.v1.PrivateClusterConfig") + proto.RegisterType((*AuthenticatorGroupsConfig)(nil), "google.container.v1.AuthenticatorGroupsConfig") + proto.RegisterType((*CloudRunConfig)(nil), "google.container.v1.CloudRunConfig") + proto.RegisterType((*MasterAuthorizedNetworksConfig)(nil), "google.container.v1.MasterAuthorizedNetworksConfig") + proto.RegisterType((*MasterAuthorizedNetworksConfig_CidrBlock)(nil), "google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock") + proto.RegisterType((*LegacyAbac)(nil), "google.container.v1.LegacyAbac") + proto.RegisterType((*NetworkPolicy)(nil), "google.container.v1.NetworkPolicy") + proto.RegisterType((*BinaryAuthorization)(nil), "google.container.v1.BinaryAuthorization") + proto.RegisterType((*IPAllocationPolicy)(nil), "google.container.v1.IPAllocationPolicy") + proto.RegisterType((*Cluster)(nil), "google.container.v1.Cluster") + proto.RegisterMapType((map[string]string)(nil), "google.container.v1.Cluster.ResourceLabelsEntry") + proto.RegisterType((*ClusterUpdate)(nil), "google.container.v1.ClusterUpdate") + proto.RegisterType((*Operation)(nil), "google.container.v1.Operation") + proto.RegisterType((*CreateClusterRequest)(nil), "google.container.v1.CreateClusterRequest") + proto.RegisterType((*GetClusterRequest)(nil), "google.container.v1.GetClusterRequest") + proto.RegisterType((*UpdateClusterRequest)(nil), "google.container.v1.UpdateClusterRequest") + proto.RegisterType((*UpdateNodePoolRequest)(nil), "google.container.v1.UpdateNodePoolRequest") + proto.RegisterType((*SetNodePoolAutoscalingRequest)(nil), "google.container.v1.SetNodePoolAutoscalingRequest") + proto.RegisterType((*SetLoggingServiceRequest)(nil), "google.container.v1.SetLoggingServiceRequest") + proto.RegisterType((*SetMonitoringServiceRequest)(nil), "google.container.v1.SetMonitoringServiceRequest") + proto.RegisterType((*SetAddonsConfigRequest)(nil), "google.container.v1.SetAddonsConfigRequest") + proto.RegisterType((*SetLocationsRequest)(nil), "google.container.v1.SetLocationsRequest") + proto.RegisterType((*UpdateMasterRequest)(nil), "google.container.v1.UpdateMasterRequest") + proto.RegisterType((*SetMasterAuthRequest)(nil), "google.container.v1.SetMasterAuthRequest") + proto.RegisterType((*DeleteClusterRequest)(nil), "google.container.v1.DeleteClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "google.container.v1.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "google.container.v1.ListClustersResponse") + proto.RegisterType((*GetOperationRequest)(nil), "google.container.v1.GetOperationRequest") + proto.RegisterType((*ListOperationsRequest)(nil), "google.container.v1.ListOperationsRequest") + proto.RegisterType((*CancelOperationRequest)(nil), "google.container.v1.CancelOperationRequest") + proto.RegisterType((*ListOperationsResponse)(nil), "google.container.v1.ListOperationsResponse") + proto.RegisterType((*GetServerConfigRequest)(nil), "google.container.v1.GetServerConfigRequest") + proto.RegisterType((*ServerConfig)(nil), "google.container.v1.ServerConfig") + proto.RegisterType((*CreateNodePoolRequest)(nil), "google.container.v1.CreateNodePoolRequest") + proto.RegisterType((*DeleteNodePoolRequest)(nil), "google.container.v1.DeleteNodePoolRequest") + proto.RegisterType((*ListNodePoolsRequest)(nil), "google.container.v1.ListNodePoolsRequest") + proto.RegisterType((*GetNodePoolRequest)(nil), "google.container.v1.GetNodePoolRequest") + proto.RegisterType((*NodePool)(nil), "google.container.v1.NodePool") + proto.RegisterType((*NodeManagement)(nil), "google.container.v1.NodeManagement") + proto.RegisterType((*AutoUpgradeOptions)(nil), "google.container.v1.AutoUpgradeOptions") + proto.RegisterType((*MaintenancePolicy)(nil), "google.container.v1.MaintenancePolicy") + proto.RegisterType((*MaintenanceWindow)(nil), "google.container.v1.MaintenanceWindow") + proto.RegisterMapType((map[string]*TimeWindow)(nil), "google.container.v1.MaintenanceWindow.MaintenanceExclusionsEntry") + proto.RegisterType((*TimeWindow)(nil), "google.container.v1.TimeWindow") + proto.RegisterType((*RecurringTimeWindow)(nil), "google.container.v1.RecurringTimeWindow") + proto.RegisterType((*DailyMaintenanceWindow)(nil), "google.container.v1.DailyMaintenanceWindow") + proto.RegisterType((*SetNodePoolManagementRequest)(nil), "google.container.v1.SetNodePoolManagementRequest") + proto.RegisterType((*SetNodePoolSizeRequest)(nil), "google.container.v1.SetNodePoolSizeRequest") + proto.RegisterType((*RollbackNodePoolUpgradeRequest)(nil), "google.container.v1.RollbackNodePoolUpgradeRequest") + proto.RegisterType((*ListNodePoolsResponse)(nil), "google.container.v1.ListNodePoolsResponse") + proto.RegisterType((*ClusterAutoscaling)(nil), "google.container.v1.ClusterAutoscaling") + proto.RegisterType((*AutoprovisioningNodePoolDefaults)(nil), "google.container.v1.AutoprovisioningNodePoolDefaults") + proto.RegisterType((*ResourceLimit)(nil), "google.container.v1.ResourceLimit") + proto.RegisterType((*NodePoolAutoscaling)(nil), "google.container.v1.NodePoolAutoscaling") + proto.RegisterType((*SetLabelsRequest)(nil), "google.container.v1.SetLabelsRequest") + proto.RegisterMapType((map[string]string)(nil), "google.container.v1.SetLabelsRequest.ResourceLabelsEntry") + proto.RegisterType((*SetLegacyAbacRequest)(nil), "google.container.v1.SetLegacyAbacRequest") + proto.RegisterType((*StartIPRotationRequest)(nil), "google.container.v1.StartIPRotationRequest") + proto.RegisterType((*CompleteIPRotationRequest)(nil), "google.container.v1.CompleteIPRotationRequest") + proto.RegisterType((*AcceleratorConfig)(nil), "google.container.v1.AcceleratorConfig") + proto.RegisterType((*SetNetworkPolicyRequest)(nil), "google.container.v1.SetNetworkPolicyRequest") + proto.RegisterType((*SetMaintenancePolicyRequest)(nil), "google.container.v1.SetMaintenancePolicyRequest") + proto.RegisterType((*StatusCondition)(nil), "google.container.v1.StatusCondition") + proto.RegisterType((*NetworkConfig)(nil), "google.container.v1.NetworkConfig") + proto.RegisterType((*IntraNodeVisibilityConfig)(nil), "google.container.v1.IntraNodeVisibilityConfig") + proto.RegisterType((*MaxPodsConstraint)(nil), "google.container.v1.MaxPodsConstraint") + proto.RegisterType((*DatabaseEncryption)(nil), "google.container.v1.DatabaseEncryption") + proto.RegisterType((*ListUsableSubnetworksRequest)(nil), "google.container.v1.ListUsableSubnetworksRequest") + proto.RegisterType((*ListUsableSubnetworksResponse)(nil), "google.container.v1.ListUsableSubnetworksResponse") + proto.RegisterType((*UsableSubnetworkSecondaryRange)(nil), "google.container.v1.UsableSubnetworkSecondaryRange") + proto.RegisterType((*UsableSubnetwork)(nil), "google.container.v1.UsableSubnetwork") + proto.RegisterType((*ResourceUsageExportConfig)(nil), "google.container.v1.ResourceUsageExportConfig") + proto.RegisterType((*ResourceUsageExportConfig_BigQueryDestination)(nil), "google.container.v1.ResourceUsageExportConfig.BigQueryDestination") + proto.RegisterType((*ResourceUsageExportConfig_ConsumptionMeteringConfig)(nil), "google.container.v1.ResourceUsageExportConfig.ConsumptionMeteringConfig") + proto.RegisterType((*VerticalPodAutoscaling)(nil), "google.container.v1.VerticalPodAutoscaling") +} + +func init() { + proto.RegisterFile("google/container/v1/cluster_service.proto", fileDescriptor_1c7f18b1699f357a) +} + +var fileDescriptor_1c7f18b1699f357a = []byte{ + // 7438 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x7d, 0x59, 0x6c, 0x1c, 0xc9, + 0x95, 0xa0, 0xb3, 0x78, 0x3f, 0x92, 0xc5, 0x62, 0x90, 0xa2, 0x4a, 0xa5, 0xa3, 0xa5, 0xec, 0x4b, + 0x52, 0xab, 0xc9, 0x96, 0xba, 0xd5, 0xee, 0x43, 0x7d, 0x24, 0x8b, 0x25, 0x8a, 0x2d, 0x1e, 0xd5, + 0x59, 0xa4, 0xda, 0xd2, 0xf6, 0x3a, 0x91, 0xac, 0x0a, 0x15, 0xb3, 0x59, 0x95, 0x59, 0xce, 0xcc, + 0x92, 0xc4, 0x6e, 0xa8, 0xb1, 0x36, 0x6c, 0x6f, 0xfb, 0x84, 0x17, 0x5e, 0xef, 0x61, 0x2c, 0x16, + 0x06, 0x76, 0x6d, 0xaf, 0xbd, 0xbd, 0xd8, 0xf1, 0xcc, 0x18, 0x73, 0x19, 0x18, 0xc0, 0x83, 0x01, + 0xc6, 0xf6, 0x1c, 0x18, 0x8f, 0x81, 0x01, 0x38, 0xc0, 0xdc, 0xc0, 0xd8, 0x03, 0xcc, 0x05, 0x78, + 0x3e, 0x06, 0x33, 0x03, 0x0c, 0xe2, 0xca, 0x8c, 0xac, 0xca, 0xac, 0x62, 0x91, 0x6a, 0xb9, 0xbf, + 0xa4, 0x7c, 0xf1, 0x5e, 0x44, 0xbc, 0x17, 0x2f, 0x5e, 0xbc, 0xf7, 0xe2, 0x45, 0x11, 0xce, 0x54, + 0x1d, 0xa7, 0x5a, 0xc3, 0x73, 0x65, 0xc7, 0xf6, 0x4d, 0xcb, 0xc6, 0xee, 0xdc, 0xad, 0xf3, 0x73, + 0xe5, 0x5a, 0xd3, 0xf3, 0xb1, 0x6b, 0x78, 0xd8, 0xbd, 0x65, 0x95, 0xf1, 0x6c, 0xc3, 0x75, 0x7c, + 0x07, 0x4d, 0x31, 0xd4, 0xd9, 0x00, 0x75, 0xf6, 0xd6, 0xf9, 0xdc, 0x31, 0x4e, 0x6f, 0x36, 0xac, + 0x39, 0xd3, 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0x18, 0x49, 0xee, 0xb0, 0xd4, 0x5a, 0xae, + 0x59, 0xd8, 0xf6, 0x79, 0xc3, 0x03, 0x52, 0xc3, 0x4d, 0x0b, 0xd7, 0x2a, 0xc6, 0x26, 0xde, 0x32, + 0x6f, 0x59, 0x8e, 0xcb, 0x11, 0x8e, 0x72, 0x04, 0xfa, 0xb5, 0xd9, 0xbc, 0x39, 0x87, 0xeb, 0x0d, + 0x7f, 0xa7, 0x85, 0x3a, 0x68, 0xf4, 0xad, 0x3a, 0xf6, 0x7c, 0xb3, 0xde, 0x60, 0x08, 0xea, 0x8f, + 0x06, 0x01, 0x56, 0x9d, 0x0a, 0xce, 0x3b, 0xf6, 0x4d, 0xab, 0x8a, 0x4e, 0xc1, 0x58, 0xdd, 0x2c, + 0x6f, 0x59, 0x36, 0x36, 0xfc, 0x9d, 0x06, 0xce, 0x2a, 0x27, 0x95, 0xd3, 0x23, 0xfa, 0x28, 0x87, + 0xad, 0xef, 0x34, 0x30, 0x3a, 0x09, 0x63, 0x15, 0xcb, 0xdb, 0x36, 0x3c, 0xeb, 0x4d, 0x6c, 0x54, + 0x37, 0xb3, 0xa9, 0x93, 0xca, 0xe9, 0x01, 0x1d, 0x08, 0xac, 0x64, 0xbd, 0x89, 0x17, 0x37, 0x49, + 0x27, 0x8e, 0xd9, 0xf4, 0xb7, 0x0c, 0xaf, 0xec, 0x34, 0xb0, 0x97, 0xed, 0x3b, 0xd9, 0x47, 0x3a, + 0xa1, 0xb0, 0x12, 0x05, 0xa1, 0x47, 0x61, 0x82, 0x8b, 0xcc, 0x30, 0xcb, 0x65, 0xa7, 0x69, 0xfb, + 0xd9, 0x11, 0x3a, 0x54, 0x9a, 0x83, 0x35, 0x06, 0x45, 0x4b, 0x30, 0x5c, 0xc7, 0xbe, 0x59, 0x31, + 0x7d, 0x33, 0xdb, 0x7f, 0xb2, 0xef, 0xf4, 0xe8, 0x85, 0xc7, 0x67, 0x63, 0xa4, 0x3b, 0x1b, 0xf2, + 0x30, 0xbb, 0xc2, 0xf1, 0x0b, 0xb6, 0xef, 0xee, 0xe8, 0x01, 0x39, 0x3a, 0x0e, 0x60, 0xd5, 0xcd, + 0x2a, 0xe7, 0x6c, 0x80, 0x0e, 0x37, 0x42, 0x21, 0x94, 0xaf, 0x3c, 0x0c, 0xd6, 0xcc, 0x4d, 0x5c, + 0xf3, 0xb2, 0x83, 0x74, 0x9c, 0xc7, 0xba, 0x8d, 0xb3, 0x4c, 0xb1, 0xd9, 0x28, 0x9c, 0x14, 0x3d, + 0x02, 0x13, 0x35, 0xa7, 0x6c, 0xd6, 0x0c, 0xcf, 0xab, 0x18, 0x8c, 0xaf, 0x21, 0x2a, 0x9f, 0x71, + 0x0a, 0x2e, 0x79, 0x95, 0x3c, 0x65, 0x0b, 0x41, 0xbf, 0x6f, 0x56, 0xbd, 0xec, 0x30, 0x15, 0x0d, + 0xfd, 0x3f, 0x3a, 0x09, 0xa3, 0x0d, 0x17, 0x93, 0xd5, 0xb3, 0x36, 0x6b, 0x38, 0x0b, 0x27, 0x95, + 0xd3, 0xc3, 0xba, 0x0c, 0x42, 0xaf, 0xc0, 0x98, 0x59, 0x2e, 0xe3, 0x1a, 0x76, 0x4d, 0xdf, 0x71, + 0xbd, 0xec, 0x28, 0x9d, 0xe8, 0x23, 0xb1, 0x13, 0xd5, 0x42, 0x44, 0x36, 0x5f, 0x3d, 0x42, 0x8b, + 0x8e, 0xc2, 0x08, 0x5d, 0x46, 0x2a, 0x8c, 0x31, 0x2a, 0x8c, 0x61, 0x02, 0xa0, 0xb2, 0x38, 0x0d, + 0x99, 0xba, 0x65, 0x1b, 0xe5, 0x46, 0xd3, 0x68, 0xd4, 0x4c, 0xff, 0xa6, 0xe3, 0xd6, 0xb3, 0xe3, + 0x6c, 0x7d, 0xea, 0x96, 0x9d, 0x6f, 0x34, 0x8b, 0x1c, 0x8a, 0x9e, 0x86, 0x41, 0x32, 0xa6, 0xef, + 0x65, 0x27, 0xe8, 0x64, 0x4e, 0x24, 0x4a, 0x6d, 0x9d, 0xa0, 0xe9, 0x1c, 0x1b, 0x61, 0xc8, 0x7a, + 0x5b, 0x44, 0x9d, 0x71, 0xc5, 0xb0, 0x6c, 0xcf, 0x37, 0xed, 0x32, 0x36, 0xca, 0x74, 0xa2, 0xd9, + 0xe9, 0x93, 0x4a, 0xa2, 0xfc, 0x4b, 0x9c, 0x68, 0x89, 0xd3, 0x70, 0xde, 0x66, 0xbc, 0x58, 0x78, + 0xee, 0x79, 0x18, 0x8f, 0xa8, 0x03, 0xca, 0x40, 0xdf, 0x36, 0xde, 0xe1, 0x7a, 0x4d, 0xfe, 0x8b, + 0xa6, 0x61, 0xe0, 0x96, 0x59, 0x6b, 0x62, 0xaa, 0xc8, 0x23, 0x3a, 0xfb, 0x78, 0x2e, 0xf5, 0x8c, + 0x92, 0x7b, 0x16, 0x46, 0xa5, 0x35, 0xee, 0x85, 0x54, 0xfd, 0xa4, 0x02, 0x33, 0xf1, 0x53, 0x45, + 0xe7, 0x00, 0x61, 0xdb, 0xdc, 0xac, 0x61, 0xc3, 0xc3, 0xe5, 0xa6, 0x8b, 0x8d, 0x4d, 0xc7, 0xf1, + 0x69, 0xaf, 0xc3, 0x7a, 0x86, 0xb5, 0x94, 0x68, 0xc3, 0xbc, 0xe3, 0xf8, 0xe8, 0x45, 0x38, 0xca, + 0xb1, 0x2d, 0xdb, 0xc7, 0x55, 0xd7, 0xf2, 0x77, 0x8c, 0xba, 0x63, 0x5b, 0xbe, 0xe3, 0x5a, 0x76, + 0x95, 0x0e, 0x3c, 0xac, 0x1f, 0x61, 0x28, 0x4b, 0x02, 0x63, 0x25, 0x40, 0x50, 0x7f, 0x47, 0x81, + 0x91, 0x40, 0xfa, 0x7b, 0x65, 0x01, 0xbd, 0x00, 0x83, 0xf8, 0xe6, 0x4d, 0x5c, 0xf6, 0xb3, 0x7d, + 0x27, 0x95, 0xd3, 0xe9, 0x0b, 0x0f, 0x77, 0x5e, 0xd5, 0xd9, 0x02, 0x45, 0xd6, 0x39, 0x91, 0x7a, + 0x1d, 0x06, 0x19, 0x04, 0xcd, 0x00, 0x2a, 0x5c, 0xbe, 0x5c, 0xc8, 0xaf, 0x1b, 0x1b, 0xab, 0xa5, + 0x62, 0x21, 0xbf, 0x74, 0x79, 0xa9, 0xb0, 0x90, 0xf9, 0x00, 0x9a, 0x80, 0xd1, 0xd5, 0x35, 0xa3, + 0x94, 0xbf, 0x52, 0x58, 0xd8, 0x58, 0x2e, 0x64, 0x14, 0x82, 0x58, 0xd4, 0x0b, 0x97, 0x0b, 0xba, + 0x21, 0xc3, 0x53, 0x28, 0x0d, 0xb0, 0xba, 0x66, 0x14, 0x3e, 0x54, 0xc8, 0x6f, 0xac, 0x17, 0x32, + 0x7d, 0xea, 0xcf, 0xa7, 0x00, 0x56, 0x4c, 0x62, 0x73, 0xb5, 0xa6, 0xbf, 0x85, 0x72, 0x30, 0xdc, + 0xf4, 0xb0, 0x6b, 0x9b, 0x75, 0x61, 0xab, 0x82, 0x6f, 0xd2, 0xd6, 0x30, 0x3d, 0xef, 0xb6, 0xe3, + 0x56, 0x38, 0x77, 0xc1, 0x37, 0xda, 0x82, 0x23, 0xcc, 0xca, 0x1a, 0x65, 0xec, 0xfa, 0xd6, 0x4d, + 0xab, 0x6c, 0xfa, 0x81, 0xfe, 0xf5, 0x51, 0xfd, 0x3b, 0x17, 0xcb, 0x73, 0x9e, 0x52, 0xe5, 0x43, + 0x22, 0xae, 0x80, 0x87, 0xcb, 0xf1, 0x0d, 0xe8, 0x29, 0x98, 0x11, 0x87, 0x44, 0xd9, 0x94, 0x47, + 0xcb, 0x56, 0xe8, 0x9c, 0xa6, 0x79, 0x6b, 0xde, 0x94, 0x68, 0xd1, 0xe3, 0x80, 0xda, 0xe7, 0x97, + 0xc5, 0x94, 0x62, 0xb2, 0x6d, 0x28, 0x62, 0xda, 0x38, 0x3a, 0x59, 0xde, 0x9b, 0xcc, 0xb4, 0x31, + 0xc8, 0x55, 0xbc, 0xa3, 0x96, 0xe0, 0x70, 0xc2, 0xbc, 0xd1, 0x33, 0x90, 0xb5, 0x3c, 0xaf, 0x89, + 0x8d, 0x98, 0xe1, 0x98, 0x4e, 0xce, 0xd0, 0xf6, 0x36, 0x7a, 0xf5, 0xcf, 0xfa, 0x60, 0x4c, 0xab, + 0x54, 0x1c, 0xdb, 0xe3, 0x5d, 0x5d, 0x83, 0xa9, 0x2d, 0xdf, 0x6f, 0x18, 0x35, 0xc7, 0xac, 0x18, + 0x9b, 0x66, 0xcd, 0xb4, 0xcb, 0x44, 0x45, 0x15, 0x2a, 0xcd, 0x78, 0x23, 0x75, 0xc5, 0xf7, 0x1b, + 0xcb, 0x8e, 0x59, 0x99, 0x17, 0xd8, 0xfa, 0xe4, 0x56, 0x2b, 0x08, 0x6d, 0x43, 0x6e, 0xcb, 0x71, + 0xad, 0x37, 0x09, 0x61, 0xcd, 0x68, 0x38, 0x15, 0xc3, 0x6c, 0xfa, 0x8e, 0x57, 0x36, 0x6b, 0x62, + 0x07, 0x24, 0x1d, 0x0a, 0x57, 0x02, 0xb2, 0xa2, 0x53, 0xd1, 0x42, 0x22, 0x3d, 0xbb, 0x95, 0xd0, + 0x82, 0x0c, 0x98, 0xde, 0x6e, 0x6e, 0x62, 0xd7, 0xc6, 0x3e, 0xf6, 0x8c, 0x8a, 0xe9, 0x6d, 0x6d, + 0x3a, 0xa6, 0x5b, 0xe1, 0x3a, 0x71, 0x3a, 0x76, 0x98, 0xab, 0x01, 0xc1, 0x82, 0xc0, 0x9f, 0x4f, + 0x65, 0x15, 0x7d, 0x6a, 0xbb, 0xbd, 0x01, 0xbd, 0x0e, 0x87, 0x6c, 0xec, 0xdf, 0x76, 0xdc, 0x6d, + 0xa3, 0xe1, 0xd4, 0xac, 0xf2, 0x8e, 0xd0, 0xba, 0xfe, 0x0e, 0x23, 0xac, 0x32, 0x8a, 0x22, 0x25, + 0xe0, 0x1a, 0x37, 0x65, 0xb7, 0x03, 0xd1, 0x0a, 0x64, 0xca, 0x35, 0xa7, 0x59, 0x31, 0xdc, 0xa6, + 0x2d, 0x3a, 0x1e, 0xa2, 0x1d, 0x3f, 0x98, 0xa0, 0xce, 0x4e, 0xb3, 0xa2, 0x37, 0x6d, 0xde, 0x67, + 0xba, 0x1c, 0xf9, 0x56, 0xe7, 0x60, 0xb2, 0x6d, 0x89, 0xc8, 0xbe, 0xaa, 0x58, 0x1e, 0x31, 0x38, + 0x15, 0xae, 0x22, 0xc1, 0xb7, 0xfa, 0x34, 0x64, 0x93, 0x84, 0xde, 0x91, 0xee, 0x3c, 0x4c, 0xc5, + 0x48, 0xb1, 0x1b, 0x49, 0x8c, 0x58, 0x3a, 0x92, 0x7c, 0x2a, 0x05, 0xd3, 0x45, 0xd7, 0xba, 0x45, + 0xd4, 0x9f, 0xef, 0x3a, 0x46, 0xf4, 0x04, 0x4c, 0x73, 0x2b, 0xdb, 0x60, 0xcd, 0x86, 0xed, 0x54, + 0xb0, 0xc7, 0x3b, 0xe0, 0xf6, 0x9a, 0x53, 0x12, 0xab, 0xe7, 0xa1, 0xa7, 0xe1, 0x70, 0x0b, 0x05, + 0xb6, 0x2b, 0x0d, 0xc7, 0xb2, 0x7d, 0x6e, 0x93, 0x0f, 0x45, 0x88, 0x0a, 0xbc, 0x11, 0x3d, 0x09, + 0x33, 0x75, 0x6a, 0xbe, 0x0c, 0xab, 0x71, 0xeb, 0x29, 0xa3, 0x6c, 0x55, 0x5c, 0x63, 0xb3, 0xe6, + 0x94, 0xb7, 0xa9, 0x86, 0x8d, 0xe8, 0x53, 0xac, 0x75, 0xa9, 0x71, 0xeb, 0xa9, 0xbc, 0x55, 0x71, + 0xe7, 0x49, 0x13, 0x3a, 0x03, 0x99, 0xb6, 0x51, 0xfa, 0x29, 0xfa, 0x44, 0xa3, 0xa5, 0xff, 0x47, + 0x61, 0xa2, 0xd1, 0xdc, 0xac, 0x59, 0xe5, 0x10, 0x93, 0x79, 0x3a, 0x69, 0x06, 0x16, 0x88, 0xea, + 0xeb, 0x70, 0x84, 0x58, 0x50, 0x6c, 0xfb, 0x64, 0x3b, 0x3b, 0xee, 0xa2, 0xeb, 0x34, 0x1b, 0x62, + 0x2b, 0x67, 0x61, 0x88, 0x4d, 0x5f, 0xc8, 0x50, 0x7c, 0xa2, 0x87, 0x21, 0x4d, 0x8f, 0x2d, 0x72, + 0x0e, 0x55, 0x09, 0x09, 0x37, 0xad, 0xe3, 0x02, 0x4a, 0xfb, 0x51, 0xcf, 0x41, 0x3a, 0xaa, 0x5a, + 0x1d, 0xd7, 0xe5, 0x27, 0x0a, 0x9c, 0x08, 0x8d, 0x3a, 0xd1, 0x1f, 0x5c, 0xe1, 0x6b, 0xdb, 0x7d, + 0x46, 0x1f, 0x86, 0xd1, 0x50, 0x8a, 0x5e, 0x36, 0x45, 0xdd, 0x90, 0x17, 0x62, 0xb5, 0xbd, 0xf3, + 0x18, 0xb3, 0x81, 0xc0, 0x75, 0x28, 0x8b, 0xff, 0x7a, 0xb9, 0x15, 0x18, 0x09, 0x57, 0xe2, 0x14, + 0x75, 0x7e, 0x1b, 0x35, 0x73, 0xc7, 0x90, 0xce, 0x9c, 0x51, 0x0e, 0x5b, 0x25, 0xc7, 0x0e, 0xb1, + 0xc5, 0xe1, 0xaa, 0xa6, 0xb8, 0x2d, 0x16, 0x3d, 0xa8, 0x8f, 0x00, 0x2c, 0xe3, 0xaa, 0x59, 0xde, + 0xd1, 0x36, 0xcd, 0x72, 0x32, 0x5b, 0xea, 0xff, 0x56, 0x60, 0x3c, 0xa2, 0xdf, 0x68, 0x11, 0x86, + 0x1b, 0xae, 0x73, 0xcb, 0xaa, 0x60, 0x97, 0x22, 0xa7, 0x93, 0x5c, 0x54, 0x99, 0x6a, 0xb6, 0xc8, + 0x49, 0xf4, 0x80, 0x58, 0x1e, 0x34, 0x15, 0x1d, 0xf4, 0x09, 0x18, 0x2e, 0x86, 0x58, 0xd3, 0x45, + 0x7d, 0xed, 0xda, 0xd2, 0x42, 0x41, 0x6f, 0x39, 0xbc, 0x01, 0x06, 0xf3, 0xda, 0xf2, 0x52, 0x7e, + 0x2d, 0xa3, 0xa8, 0x73, 0x30, 0x35, 0x6f, 0xd9, 0xa6, 0xbb, 0x23, 0xa4, 0x4a, 0xa3, 0x9a, 0x0e, + 0x7c, 0xfd, 0x7d, 0x3f, 0xa0, 0xa5, 0xa2, 0x56, 0x23, 0xfe, 0x30, 0x41, 0xe5, 0xcc, 0x3d, 0x04, + 0xe9, 0xa6, 0x87, 0x0d, 0xab, 0x61, 0x98, 0x35, 0xcb, 0xf4, 0x82, 0xbd, 0x37, 0xd6, 0xf4, 0xf0, + 0x52, 0x43, 0x63, 0x30, 0xf4, 0x18, 0x4c, 0x96, 0x5d, 0x4c, 0xf6, 0x81, 0xd7, 0xdc, 0xe4, 0xf6, + 0x8f, 0xf3, 0x90, 0x61, 0x0d, 0xa5, 0x00, 0x4e, 0x63, 0x8c, 0xe0, 0x8b, 0x2d, 0x57, 0x1f, 0x8f, + 0x31, 0x02, 0x30, 0x5d, 0xb1, 0x59, 0x98, 0x14, 0x47, 0x74, 0xb0, 0x29, 0xd9, 0xfe, 0xa2, 0x66, + 0x7c, 0x82, 0x37, 0x8a, 0x3d, 0x89, 0x4e, 0x43, 0x9a, 0x98, 0x07, 0x09, 0x79, 0x20, 0x40, 0x1e, + 0x23, 0x2d, 0x01, 0xe6, 0x13, 0x80, 0x78, 0x3c, 0xe3, 0x49, 0xd8, 0x83, 0x01, 0x76, 0x46, 0xb4, + 0x06, 0x14, 0x2f, 0xc1, 0xb1, 0x30, 0xa6, 0x2c, 0x3b, 0x76, 0xc5, 0x74, 0x77, 0x0c, 0xd7, 0xb4, + 0xab, 0x98, 0x71, 0x30, 0x44, 0x39, 0x38, 0xc2, 0x71, 0x4a, 0x02, 0x45, 0x27, 0x18, 0x94, 0x19, + 0x0d, 0x8e, 0x07, 0x43, 0xc6, 0xf6, 0x30, 0x4c, 0x7b, 0xc8, 0x09, 0xa4, 0x98, 0x2e, 0x2e, 0xc2, + 0xe1, 0x36, 0x79, 0x70, 0x75, 0x1e, 0x89, 0xf8, 0x2c, 0x51, 0x2b, 0x35, 0x07, 0xd3, 0x51, 0xb1, + 0x70, 0x1a, 0x60, 0x5e, 0x8b, 0x2c, 0x18, 0x46, 0xf0, 0x41, 0xc8, 0xb6, 0x4b, 0x87, 0x13, 0x8d, + 0x52, 0xa2, 0x43, 0xad, 0xf2, 0x61, 0x84, 0x8f, 0xc3, 0x94, 0xdf, 0x68, 0xb6, 0xd1, 0xb0, 0x08, + 0x25, 0xe3, 0x37, 0x9a, 0x11, 0x74, 0xf5, 0xa3, 0x59, 0x18, 0xe2, 0xf6, 0x9e, 0x04, 0x5e, 0xd2, + 0xc6, 0xa5, 0xff, 0x27, 0x81, 0x57, 0x05, 0x7b, 0x65, 0xd7, 0x6a, 0x10, 0x85, 0xe4, 0x5b, 0x56, + 0x06, 0x91, 0x75, 0xb4, 0x6c, 0xcb, 0xb7, 0xcc, 0x1a, 0x3d, 0x18, 0x78, 0x64, 0x47, 0xb4, 0x69, + 0x80, 0xad, 0x23, 0x6f, 0x65, 0xd1, 0x21, 0x09, 0xf0, 0x16, 0x60, 0x94, 0x63, 0x4a, 0x87, 0xfb, + 0x03, 0x5d, 0x42, 0x4a, 0xda, 0x17, 0xd8, 0x61, 0x38, 0xfe, 0x32, 0x8c, 0xf2, 0xd3, 0x82, 0xc4, + 0xce, 0x54, 0xcd, 0x92, 0x7a, 0x09, 0x6d, 0x9b, 0x0e, 0xf5, 0xd0, 0x41, 0x7e, 0x94, 0x04, 0xa4, + 0xd5, 0xaa, 0x65, 0x57, 0x45, 0x8e, 0x82, 0xa9, 0x9f, 0x9e, 0xe6, 0xe0, 0x12, 0x83, 0x12, 0x8f, + 0x33, 0x8c, 0x2b, 0x02, 0x5c, 0xa6, 0x6e, 0x93, 0x61, 0x8b, 0x40, 0xcf, 0xc2, 0x90, 0xd8, 0x7f, + 0x4c, 0xa1, 0xc4, 0x27, 0x3a, 0x1b, 0xb7, 0x9b, 0x98, 0xde, 0xb4, 0xed, 0xa4, 0xcb, 0x30, 0x6e, + 0x52, 0x17, 0x52, 0xc8, 0x09, 0x28, 0x87, 0xa7, 0xe2, 0x23, 0x5a, 0xc9, 0xd9, 0xd4, 0xc7, 0x4c, + 0xd9, 0xf5, 0x3c, 0x01, 0x20, 0x19, 0x04, 0xa6, 0x3b, 0x12, 0x04, 0x5d, 0x02, 0x2a, 0x55, 0xa3, + 0xe1, 0x38, 0x35, 0x2f, 0x3b, 0x46, 0x8f, 0x88, 0xe3, 0x89, 0x8b, 0x51, 0x74, 0x9c, 0x9a, 0x3e, + 0x62, 0xf3, 0xff, 0x79, 0xe8, 0x18, 0x8c, 0x08, 0x6b, 0xe5, 0x65, 0xc7, 0x69, 0xc4, 0x1e, 0x02, + 0x24, 0x4f, 0x40, 0x72, 0x1c, 0xcd, 0x5a, 0x63, 0xcb, 0xcc, 0xa6, 0x65, 0x4f, 0x20, 0xf4, 0x6f, + 0x34, 0xd2, 0x88, 0xae, 0xc3, 0x84, 0x8b, 0x3d, 0xa7, 0xe9, 0x96, 0xb1, 0xc1, 0x13, 0x0f, 0x2c, + 0x84, 0x7e, 0x22, 0xc1, 0x53, 0xa3, 0xa2, 0x9b, 0xd5, 0x39, 0x8d, 0x9c, 0x7d, 0x48, 0xbb, 0x11, + 0x20, 0x31, 0x93, 0xb4, 0x47, 0xe3, 0xa6, 0x65, 0x57, 0xb1, 0xdb, 0x70, 0x89, 0x1b, 0x90, 0x61, + 0xbb, 0x83, 0x36, 0x5c, 0x0e, 0xe1, 0x44, 0xc7, 0x6a, 0xf4, 0x40, 0x32, 0xcc, 0x4d, 0xb3, 0x9c, + 0x45, 0x1d, 0x74, 0x2c, 0x3c, 0xb8, 0x74, 0xa8, 0x85, 0x87, 0xd8, 0x12, 0xa4, 0xa3, 0x2e, 0x6d, + 0x76, 0x8a, 0x76, 0xa2, 0x76, 0x3f, 0x9e, 0xf4, 0xf1, 0x88, 0x17, 0x8b, 0xae, 0xc3, 0x34, 0x3d, + 0x02, 0x84, 0x78, 0x45, 0x87, 0x2c, 0x25, 0xf0, 0x68, 0x6c, 0x87, 0xed, 0xa7, 0x89, 0x8e, 0xac, + 0x46, 0xdb, 0x09, 0xf3, 0x36, 0x9c, 0x92, 0xf6, 0x12, 0x73, 0x00, 0x0c, 0x3e, 0x7a, 0xa0, 0x7f, + 0x33, 0x74, 0x9c, 0x27, 0xf7, 0xe1, 0x3d, 0xe8, 0x27, 0xea, 0x9d, 0x3d, 0x98, 0x0d, 0x40, 0x75, + 0x12, 0x2c, 0x63, 0x9b, 0xe6, 0x3a, 0x38, 0x63, 0x87, 0x3b, 0x44, 0x47, 0x2b, 0x21, 0x3a, 0xe7, + 0x6b, 0xb2, 0xde, 0x0a, 0x42, 0xff, 0x0e, 0xa6, 0x37, 0xe9, 0x01, 0x1c, 0xb0, 0x45, 0x99, 0xce, + 0x66, 0x3b, 0x84, 0x13, 0x31, 0x27, 0xb6, 0x3e, 0xb5, 0x19, 0x73, 0x8c, 0x2f, 0xc1, 0xa8, 0x1c, + 0x6b, 0xe5, 0x3a, 0xac, 0x02, 0xd7, 0x4f, 0x39, 0xca, 0x92, 0x69, 0x65, 0x25, 0xe1, 0xb2, 0x3e, + 0xda, 0x5d, 0x49, 0xb8, 0x68, 0x85, 0x92, 0x70, 0x49, 0x62, 0x38, 0x5a, 0xc1, 0x37, 0xcd, 0x66, + 0xcd, 0x37, 0xea, 0xe6, 0x1d, 0x12, 0x11, 0xd2, 0xf5, 0xf3, 0x7c, 0x97, 0x48, 0x27, 0x7b, 0xa2, + 0xa3, 0x48, 0xef, 0x14, 0x9d, 0x0a, 0x59, 0x12, 0x8e, 0xad, 0x67, 0x79, 0x57, 0x6d, 0x2d, 0xc8, + 0x81, 0x63, 0xc1, 0x06, 0x6d, 0x7a, 0x66, 0x15, 0x1b, 0xf8, 0x4e, 0xc3, 0x71, 0x7d, 0x31, 0xff, + 0x53, 0x74, 0x9c, 0xd9, 0xd8, 0x71, 0xc4, 0x2e, 0xdd, 0x20, 0x74, 0x05, 0x4a, 0xc6, 0x79, 0x39, + 0xe2, 0x26, 0x35, 0x21, 0x1b, 0x8e, 0x9a, 0xb2, 0x4b, 0xce, 0x1c, 0xec, 0x40, 0x37, 0xd5, 0x0e, + 0xe3, 0x25, 0xba, 0xf2, 0xfa, 0x11, 0x33, 0xd1, 0xcb, 0x37, 0x60, 0x46, 0x84, 0x15, 0x41, 0x8a, + 0x82, 0x0d, 0xf5, 0x30, 0x1d, 0xea, 0x4c, 0xec, 0x50, 0x71, 0x01, 0x94, 0x3e, 0xdd, 0x88, 0x0b, + 0xab, 0x3e, 0x04, 0x53, 0x15, 0xd3, 0x37, 0x37, 0x4d, 0x8f, 0x04, 0x2e, 0x65, 0x77, 0x87, 0x1d, + 0xb0, 0x8f, 0x74, 0x50, 0xa3, 0x05, 0x8e, 0x5f, 0x08, 0xd0, 0x75, 0x54, 0x69, 0x83, 0x21, 0x0c, + 0xd9, 0x5b, 0xd8, 0x25, 0x4c, 0xb5, 0x67, 0x04, 0x1e, 0xed, 0x90, 0x3e, 0xbc, 0xc6, 0x89, 0x5a, + 0xf2, 0x01, 0x33, 0xb7, 0x62, 0xe1, 0xe8, 0x28, 0x8c, 0x78, 0xb8, 0x76, 0xd3, 0xa8, 0x59, 0xf6, + 0x36, 0xcf, 0xd7, 0x0c, 0x13, 0xc0, 0xb2, 0x65, 0x6f, 0xa3, 0x19, 0xe8, 0x7f, 0xd3, 0xb1, 0x79, + 0x56, 0x86, 0x1e, 0xdd, 0xf4, 0x9b, 0x44, 0x3a, 0x41, 0xec, 0xc5, 0x52, 0x31, 0xc1, 0x37, 0x39, + 0x2c, 0x84, 0x23, 0x21, 0x44, 0x7e, 0x0b, 0xbb, 0x1e, 0x91, 0x4a, 0x95, 0x79, 0x3c, 0xbc, 0x99, + 0x0b, 0xf2, 0x1a, 0x6b, 0xa4, 0x59, 0xa4, 0xa6, 0xeb, 0x62, 0x9b, 0xa8, 0x7c, 0x84, 0x6c, 0x8b, + 0x7b, 0x64, 0xac, 0x95, 0xd9, 0xa8, 0x90, 0x4a, 0xc0, 0x99, 0xdb, 0x22, 0x68, 0xac, 0x60, 0xc6, + 0x88, 0xb7, 0x93, 0x63, 0x4f, 0x50, 0x3d, 0x00, 0xa3, 0xdc, 0xc9, 0xf6, 0xad, 0x3a, 0xce, 0xbe, + 0xc1, 0x4e, 0x53, 0x06, 0x5a, 0xb7, 0xea, 0x18, 0x3d, 0x0f, 0x83, 0x9e, 0x6f, 0xfa, 0x4d, 0x2f, + 0xbb, 0x4d, 0xc3, 0x90, 0x07, 0x3b, 0x1e, 0x58, 0x25, 0x8a, 0xaa, 0x73, 0x12, 0x1a, 0x40, 0xd2, + 0xff, 0x19, 0x75, 0xec, 0x91, 0x2d, 0x90, 0xad, 0xf1, 0x00, 0x92, 0x42, 0x57, 0x18, 0x90, 0xb8, + 0x78, 0x2d, 0xce, 0xa4, 0x67, 0xbd, 0x89, 0xb3, 0x75, 0x9a, 0x4c, 0xcf, 0xc8, 0xbe, 0x64, 0xc9, + 0x7a, 0x13, 0xa3, 0x73, 0xb1, 0x8e, 0xb6, 0xcd, 0x8e, 0xbc, 0x36, 0x27, 0xfb, 0x02, 0x4c, 0x05, + 0x39, 0x67, 0xba, 0xc7, 0x8c, 0xa6, 0x5b, 0xf3, 0xb2, 0x0e, 0x39, 0xda, 0xa9, 0x58, 0x26, 0x45, + 0x33, 0xdd, 0x2f, 0x1b, 0x6e, 0xcd, 0x23, 0x2e, 0x60, 0x44, 0x96, 0xcc, 0x05, 0x6c, 0x84, 0x2e, + 0xa0, 0x24, 0x49, 0xe6, 0x02, 0x3e, 0x00, 0xa3, 0xf8, 0x4e, 0xc3, 0x72, 0xb9, 0x1c, 0x3f, 0xc2, + 0xe4, 0xc8, 0x40, 0x54, 0x8e, 0x39, 0x18, 0x16, 0x67, 0x54, 0xd6, 0x65, 0x8a, 0x22, 0xbe, 0x49, + 0x14, 0xc9, 0xbd, 0x0a, 0xbf, 0xd1, 0xcc, 0x7a, 0xd4, 0x91, 0x18, 0x61, 0x90, 0xf5, 0x46, 0x33, + 0xc9, 0x03, 0xf6, 0xe3, 0x3d, 0x60, 0xb4, 0x00, 0x40, 0xbc, 0x7c, 0x8b, 0xb9, 0x30, 0xb7, 0xa8, + 0x9b, 0xf1, 0x50, 0x7c, 0x7e, 0x9d, 0xae, 0x42, 0x5e, 0x20, 0xeb, 0x12, 0x5d, 0x4e, 0x83, 0xa9, + 0x18, 0xef, 0xa3, 0xa7, 0xbc, 0xf8, 0x6d, 0x18, 0x64, 0x23, 0xa0, 0x19, 0x40, 0xa5, 0x75, 0x6d, + 0x7d, 0xa3, 0xd4, 0x12, 0x5c, 0x66, 0x60, 0x8c, 0x86, 0x9d, 0xa5, 0xa5, 0xb5, 0xd5, 0xa5, 0xd5, + 0xc5, 0x8c, 0x82, 0x46, 0x61, 0x48, 0xdf, 0x58, 0xa5, 0x1f, 0x29, 0x34, 0x01, 0xa3, 0x7a, 0x21, + 0xbf, 0xb6, 0x9a, 0x5f, 0x5a, 0x26, 0x80, 0x3e, 0x34, 0x06, 0xc3, 0xa5, 0xf5, 0xb5, 0x62, 0x91, + 0x7c, 0xf5, 0xa3, 0x11, 0x18, 0x28, 0xe8, 0xfa, 0x9a, 0x9e, 0x19, 0x20, 0x0d, 0x0b, 0x85, 0x45, + 0x5d, 0x5b, 0x28, 0x2c, 0x64, 0x06, 0xd5, 0x8f, 0x8e, 0xc2, 0x38, 0xd7, 0xc8, 0x8d, 0x46, 0xc5, + 0xf4, 0x31, 0x7a, 0x02, 0xa6, 0x2b, 0xd8, 0xb3, 0x5c, 0xe2, 0x05, 0xc8, 0x9b, 0x83, 0x25, 0x56, + 0x10, 0x6f, 0x93, 0x37, 0xc6, 0x25, 0xc8, 0x09, 0x8a, 0x18, 0x57, 0x99, 0xa5, 0x59, 0xb2, 0x1c, + 0x63, 0xa5, 0xcd, 0x63, 0xde, 0x80, 0x43, 0x82, 0x3a, 0xea, 0xf3, 0x0e, 0xee, 0xd5, 0xe7, 0x9d, + 0xe2, 0xf4, 0x91, 0xac, 0xeb, 0x5c, 0x0b, 0x1b, 0xc4, 0xc5, 0x35, 0xac, 0x8a, 0xf0, 0xdc, 0x25, + 0x36, 0x88, 0x33, 0xbb, 0x54, 0x21, 0x5b, 0x45, 0x10, 0x48, 0xd7, 0x61, 0xcc, 0x89, 0xcf, 0xf0, + 0x96, 0xa5, 0xe0, 0x56, 0xac, 0x4a, 0xce, 0x5a, 0x86, 0x1d, 0x67, 0xca, 0x67, 0x7b, 0x33, 0xe5, + 0x47, 0x78, 0x5f, 0xed, 0x4d, 0x68, 0x1b, 0x8e, 0xb7, 0xf3, 0x21, 0x9b, 0xf5, 0x91, 0x4e, 0xf9, + 0x51, 0xce, 0x9e, 0x6c, 0xd3, 0x73, 0x2d, 0xac, 0xcb, 0x76, 0xfd, 0x31, 0x10, 0x82, 0x31, 0x42, + 0xcf, 0x1e, 0xa8, 0x67, 0x2f, 0x44, 0xb0, 0x1c, 0x38, 0xf8, 0x9f, 0x53, 0xe0, 0x4c, 0xb0, 0xee, + 0x5d, 0x3d, 0xc8, 0xb1, 0xfd, 0x7b, 0x90, 0x0f, 0x0b, 0xdd, 0xe9, 0xec, 0x48, 0x4a, 0x4b, 0x22, + 0xce, 0x10, 0x59, 0x4e, 0x13, 0xbd, 0x39, 0x69, 0x62, 0x49, 0xda, 0x9b, 0xd0, 0x1b, 0x70, 0x4c, + 0x0c, 0x14, 0xeb, 0x62, 0x66, 0x7a, 0x74, 0x31, 0xc5, 0x8a, 0xc4, 0x25, 0x8c, 0x9e, 0x86, 0xc3, + 0xe1, 0x8a, 0x44, 0xe3, 0xd5, 0x29, 0x76, 0x30, 0x06, 0xeb, 0x12, 0x09, 0x5b, 0xdf, 0x86, 0x87, + 0x04, 0x5d, 0x47, 0x67, 0xed, 0xd0, 0xbe, 0x9c, 0xb5, 0x93, 0xbc, 0xef, 0x44, 0x0c, 0xe4, 0x83, + 0xc0, 0x31, 0x12, 0x1d, 0x92, 0x99, 0xde, 0x1d, 0x12, 0xb1, 0x17, 0xe2, 0x9b, 0x65, 0xae, 0x2d, + 0xdb, 0x77, 0x4d, 0x6e, 0xc1, 0x2c, 0xcf, 0xda, 0xb4, 0x6a, 0x96, 0x1f, 0xdc, 0x29, 0xe4, 0x3a, + 0x70, 0xbd, 0x44, 0x08, 0xa9, 0x79, 0x0b, 0xc8, 0x5a, 0xb8, 0x4e, 0xc4, 0x20, 0xee, 0x48, 0xcb, + 0x8e, 0x10, 0xd6, 0x93, 0x5f, 0x6a, 0x45, 0x34, 0x99, 0xdb, 0x4f, 0xf5, 0x4b, 0xc3, 0x30, 0xb2, + 0xd6, 0xc0, 0x2e, 0x5b, 0xf1, 0xb8, 0x4c, 0x8c, 0x70, 0xa9, 0x52, 0x2d, 0x2e, 0xd5, 0x2b, 0x90, + 0x76, 0x04, 0x21, 0xb3, 0x57, 0x7d, 0x1d, 0x3c, 0x8f, 0x60, 0x8c, 0x59, 0x62, 0xc2, 0xf4, 0xf1, + 0x80, 0x94, 0x5a, 0xb4, 0x17, 0x02, 0xef, 0xa5, 0xbf, 0xc3, 0xdd, 0x66, 0xd8, 0x47, 0x8b, 0xff, + 0x32, 0x03, 0x83, 0x15, 0xec, 0x9b, 0x56, 0x8d, 0x9b, 0x4c, 0xfe, 0x15, 0xe3, 0xd7, 0x0c, 0xc4, + 0xf9, 0x35, 0x11, 0x8f, 0x72, 0xb0, 0xc5, 0xa3, 0x7c, 0x00, 0x46, 0x7d, 0xd3, 0xad, 0x62, 0x9f, + 0x35, 0x33, 0x13, 0x0e, 0x0c, 0x44, 0x11, 0x64, 0x8f, 0x61, 0xa4, 0xdd, 0x63, 0xf0, 0x7c, 0xd3, + 0xf5, 0x99, 0xb7, 0xc1, 0x92, 0x6e, 0x23, 0x14, 0x42, 0x9d, 0x8d, 0x23, 0xd4, 0x2b, 0x65, 0x8d, + 0x2c, 0x41, 0x32, 0x84, 0xed, 0x0a, 0x6d, 0x2a, 0x01, 0x92, 0xfc, 0x7f, 0xe1, 0x25, 0x8c, 0xf7, + 0xe0, 0x25, 0x88, 0x8c, 0x4f, 0x00, 0xf1, 0xd0, 0x06, 0x73, 0xe0, 0xa8, 0x19, 0x97, 0x7a, 0x4d, + 0xf7, 0xd0, 0x2b, 0x12, 0x1d, 0x84, 0xdd, 0xaa, 0x7a, 0x57, 0x07, 0x62, 0x14, 0x86, 0x8a, 0x85, + 0xd5, 0x85, 0x18, 0xdf, 0x61, 0x18, 0xfa, 0x17, 0xd6, 0x56, 0x0b, 0xcc, 0x69, 0xd0, 0xe6, 0xd7, + 0xf4, 0x75, 0xea, 0x34, 0xa8, 0xff, 0x9a, 0x82, 0x7e, 0xaa, 0x1a, 0xd3, 0x90, 0x59, 0xbf, 0x5e, + 0x2c, 0xb4, 0x74, 0x88, 0x20, 0x9d, 0xd7, 0x0b, 0xda, 0x7a, 0xc1, 0xc8, 0x2f, 0x6f, 0x94, 0xd6, + 0x0b, 0x7a, 0x46, 0x21, 0xb0, 0x85, 0xc2, 0x72, 0x41, 0x82, 0xa5, 0x08, 0x6c, 0xa3, 0x48, 0x1d, + 0x0e, 0x63, 0x45, 0xa3, 0xb0, 0x3e, 0x34, 0x09, 0xe3, 0x02, 0xb6, 0xba, 0xb6, 0x50, 0x28, 0x65, + 0xfa, 0x09, 0x9a, 0x5e, 0x28, 0x6a, 0x4b, 0x7a, 0x40, 0x3a, 0xc0, 0x48, 0x17, 0xe4, 0x21, 0x06, + 0xc9, 0x64, 0xf8, 0xb0, 0x84, 0xd2, 0x28, 0xae, 0xad, 0x2d, 0x67, 0x86, 0x08, 0x94, 0x0f, 0x1c, + 0x42, 0x87, 0xd1, 0x31, 0xc8, 0x96, 0x0a, 0xeb, 0x21, 0xc8, 0x58, 0xd1, 0x56, 0xb5, 0xc5, 0xc2, + 0x4a, 0x61, 0x75, 0x3d, 0x33, 0x82, 0x0e, 0xc1, 0xa4, 0xb6, 0xb1, 0xbe, 0x66, 0xf0, 0x61, 0xd9, + 0x44, 0x80, 0x08, 0x90, 0x82, 0xa3, 0x13, 0x1c, 0x45, 0x69, 0x00, 0xd2, 0xd9, 0xb2, 0x36, 0x5f, + 0x58, 0x2e, 0x65, 0xc6, 0xd0, 0x14, 0x4c, 0x90, 0x6f, 0xc6, 0x93, 0xa1, 0x6d, 0xac, 0x5f, 0xc9, + 0x8c, 0x53, 0xe9, 0x47, 0x46, 0x2c, 0x2d, 0xdd, 0x28, 0x64, 0xd2, 0x01, 0xbc, 0xb0, 0xfe, 0xda, + 0x9a, 0x7e, 0xd5, 0x28, 0xae, 0x2d, 0x2f, 0xe5, 0xaf, 0x67, 0x26, 0x50, 0x0e, 0x66, 0x58, 0x27, + 0x4b, 0xab, 0xeb, 0x85, 0x55, 0x6d, 0x35, 0x5f, 0x10, 0x6d, 0x19, 0xf5, 0x6b, 0x0a, 0x4c, 0xe7, + 0x69, 0x78, 0xc1, 0x0f, 0x21, 0x1d, 0x7f, 0xa4, 0x89, 0x3d, 0x1f, 0x9d, 0x02, 0x68, 0xb8, 0xce, + 0x1b, 0xb8, 0xec, 0x13, 0x8f, 0x46, 0x09, 0x8c, 0xc2, 0x08, 0x87, 0x2e, 0x55, 0x12, 0x2d, 0xc6, + 0xf3, 0x30, 0xc4, 0x75, 0x92, 0x5f, 0xdd, 0x1e, 0xeb, 0x74, 0x20, 0xce, 0xf7, 0xfd, 0xa9, 0x96, + 0xd2, 0x05, 0x05, 0xd9, 0xe3, 0x0d, 0x93, 0x38, 0xf3, 0x7c, 0x0f, 0xf3, 0x2f, 0xf5, 0x13, 0x0a, + 0x4c, 0x2e, 0x62, 0xff, 0xde, 0xcd, 0xf2, 0x14, 0x40, 0x90, 0x2b, 0x65, 0x77, 0xcc, 0x9c, 0x54, + 0x24, 0x4a, 0x2b, 0x81, 0x99, 0x1c, 0x08, 0xcd, 0xa4, 0xfa, 0x3d, 0x05, 0xa6, 0x99, 0x17, 0x7b, + 0x5f, 0xa7, 0xf2, 0x12, 0x0c, 0x36, 0xe9, 0xa8, 0x3c, 0x9d, 0xad, 0x76, 0x12, 0x29, 0x9b, 0x1f, + 0x13, 0x2c, 0x27, 0x8b, 0xe5, 0xe5, 0x9f, 0x15, 0x38, 0xc4, 0x70, 0x83, 0xd4, 0xeb, 0x7d, 0x61, + 0xe6, 0x21, 0x18, 0x8b, 0xf8, 0xcb, 0xe1, 0x7d, 0x0f, 0xd8, 0xa1, 0xb3, 0xfc, 0x08, 0xc7, 0x12, + 0xc7, 0x1b, 0xbb, 0xe8, 0xa1, 0x4c, 0xd1, 0xfc, 0xbe, 0x08, 0x0d, 0xd4, 0x48, 0x6d, 0xd9, 0x60, + 0x88, 0x25, 0x15, 0x98, 0x09, 0xee, 0x87, 0x25, 0xee, 0xdf, 0x49, 0xc1, 0xf1, 0x12, 0xf6, 0xe3, + 0xfc, 0xd7, 0xf7, 0x91, 0x14, 0xd6, 0xa2, 0x69, 0xc0, 0x81, 0xde, 0x3c, 0x71, 0x2e, 0x2e, 0x39, + 0x19, 0x28, 0x44, 0x31, 0x28, 0x89, 0xe2, 0x57, 0x15, 0xc8, 0x96, 0xb0, 0x1f, 0xf5, 0xef, 0xee, + 0x8f, 0x14, 0xce, 0xb5, 0x5f, 0x92, 0xf4, 0x87, 0x4b, 0xd8, 0x7a, 0x53, 0x12, 0xa7, 0xc5, 0xbf, + 0xa1, 0xc0, 0xd1, 0x12, 0xf6, 0xdb, 0xa2, 0xbe, 0xfb, 0x33, 0xff, 0x0b, 0xb1, 0x77, 0x37, 0x12, + 0x0b, 0x31, 0x17, 0x38, 0x71, 0x4b, 0xf0, 0x87, 0x0a, 0xcc, 0x94, 0xb0, 0x1f, 0x09, 0x3a, 0xef, + 0x0b, 0x03, 0x57, 0x5b, 0xef, 0x81, 0xfa, 0xf7, 0x18, 0x13, 0x33, 0xf6, 0xa2, 0x97, 0x41, 0x71, + 0x9c, 0x7d, 0x53, 0x81, 0x29, 0xaa, 0x5c, 0x3c, 0xa8, 0xbb, 0x3f, 0x6c, 0x9d, 0x92, 0x2f, 0x8e, + 0xfa, 0x69, 0x76, 0x89, 0x19, 0x85, 0xf0, 0xf6, 0x28, 0x6e, 0xb2, 0xbf, 0xa0, 0xc0, 0x14, 0x33, + 0x89, 0xcc, 0x7f, 0xbe, 0x3f, 0x93, 0x3d, 0x0b, 0xe9, 0x16, 0x5f, 0x5e, 0x52, 0xa0, 0xf1, 0x7a, + 0x24, 0xb1, 0x28, 0x66, 0x3d, 0x24, 0xcd, 0xfa, 0xaf, 0x52, 0x30, 0x4d, 0xb6, 0x40, 0x78, 0x0f, + 0x79, 0x5f, 0xa6, 0xbd, 0x02, 0x83, 0x66, 0xd9, 0x17, 0xd3, 0x4d, 0x27, 0xdc, 0x9e, 0xc5, 0x4d, + 0x6c, 0x56, 0xa3, 0x74, 0xfc, 0x88, 0x62, 0x9d, 0xa0, 0x4b, 0xc1, 0x19, 0xb7, 0xb7, 0xcb, 0xd6, + 0xf8, 0x03, 0x4e, 0x96, 0x4b, 0x11, 0x06, 0xd9, 0x40, 0xc4, 0x19, 0xdd, 0x58, 0xbd, 0xba, 0xba, + 0xf6, 0xda, 0x2a, 0xcb, 0x73, 0x11, 0x87, 0xa8, 0xa8, 0x95, 0x4a, 0xaf, 0xad, 0xe9, 0x0b, 0x19, + 0x85, 0xb8, 0x69, 0x8b, 0x85, 0xd5, 0x82, 0x4e, 0x5c, 0xbe, 0x00, 0x9c, 0x12, 0x88, 0x1b, 0xa5, + 0x82, 0xbe, 0xaa, 0xad, 0x14, 0x32, 0x7d, 0xea, 0x3b, 0x0a, 0x4c, 0x2f, 0xe0, 0x1a, 0xbe, 0xcf, + 0xc7, 0xbf, 0x60, 0xae, 0x5f, 0x62, 0x6e, 0x0b, 0xa6, 0x96, 0x2d, 0x4f, 0x78, 0x44, 0xf7, 0x62, + 0x5b, 0x85, 0xbe, 0x57, 0x7f, 0xc4, 0xf7, 0x6a, 0xc2, 0x74, 0x74, 0x24, 0xaf, 0xe1, 0xd8, 0x1e, + 0x46, 0xcf, 0xc0, 0x30, 0x9f, 0xa2, 0x97, 0x55, 0x68, 0x70, 0xd1, 0xd1, 0xd3, 0xd3, 0x03, 0x6c, + 0xf4, 0x20, 0x8c, 0xd7, 0x2d, 0xcf, 0x23, 0x26, 0x93, 0x8c, 0xcc, 0x4a, 0x87, 0x46, 0xf4, 0x31, + 0x0e, 0xbc, 0x41, 0x60, 0xea, 0x67, 0x14, 0x98, 0x5a, 0xc4, 0x7e, 0x10, 0x0e, 0xde, 0x03, 0x0e, + 0x1f, 0x86, 0xb1, 0x30, 0x98, 0x8d, 0x08, 0x7b, 0x34, 0x80, 0x27, 0x38, 0x7e, 0x6f, 0xc0, 0x21, + 0x22, 0x84, 0x60, 0x36, 0xef, 0xa5, 0xc0, 0x3f, 0xaf, 0xc0, 0x4c, 0xde, 0xb4, 0xcb, 0xb8, 0xf6, + 0x33, 0x64, 0x5e, 0xd6, 0xb5, 0xbb, 0x30, 0xd3, 0xca, 0x3c, 0xd7, 0x81, 0x17, 0x01, 0x02, 0x62, + 0xa1, 0x05, 0x27, 0x3a, 0x87, 0xf5, 0xba, 0x44, 0xb1, 0x37, 0x4d, 0xa8, 0xc2, 0xcc, 0x22, 0xf6, + 0xc9, 0xf1, 0x19, 0xdc, 0x7a, 0x1d, 0x5c, 0x1c, 0x71, 0x7c, 0x7e, 0x3c, 0x05, 0x63, 0xf2, 0x30, + 0x2c, 0x37, 0xc6, 0xee, 0x3b, 0x5b, 0x2f, 0x8d, 0x14, 0x91, 0x1b, 0xa3, 0xcd, 0x2d, 0x97, 0x46, + 0xb3, 0x30, 0x75, 0xcb, 0xac, 0x59, 0xd1, 0xfc, 0xb6, 0x78, 0x8e, 0x31, 0x49, 0x9b, 0xa4, 0xf4, + 0xb6, 0xc7, 0x32, 0xc3, 0x6c, 0x1c, 0xc9, 0x99, 0xed, 0x17, 0x99, 0x61, 0xda, 0x12, 0x66, 0x86, + 0xcf, 0x02, 0xeb, 0x42, 0xc2, 0xf5, 0xb2, 0x03, 0xb4, 0xef, 0x09, 0xda, 0x10, 0xa0, 0x7a, 0xe8, + 0x02, 0x1c, 0x62, 0xb8, 0xd1, 0x13, 0x86, 0x3d, 0xb5, 0x18, 0xd1, 0xd9, 0x34, 0x23, 0xc9, 0x22, + 0x4f, 0xfd, 0x5d, 0x05, 0x0e, 0xb1, 0xa8, 0xf0, 0xfe, 0x06, 0x06, 0x2f, 0xc1, 0x48, 0xe0, 0x12, + 0x73, 0x3f, 0xa4, 0x73, 0xa9, 0x08, 0x3b, 0x02, 0x86, 0x85, 0xbf, 0x2c, 0x6d, 0xa8, 0xc1, 0xc8, + 0x86, 0xfa, 0xff, 0x0a, 0x1c, 0x62, 0x66, 0xfb, 0xfd, 0x18, 0xe9, 0xc4, 0x39, 0x22, 0x9f, 0x55, + 0x98, 0xd1, 0x15, 0xf3, 0xbd, 0x4f, 0x6e, 0x53, 0x52, 0xf8, 0xfd, 0xae, 0x02, 0x68, 0x31, 0x0c, + 0x96, 0xde, 0xef, 0xd2, 0xfb, 0xd6, 0x20, 0x0c, 0x8b, 0xb9, 0xc6, 0x66, 0x3b, 0x3f, 0x08, 0x83, + 0xdc, 0xdd, 0x4d, 0xed, 0xa9, 0x3c, 0x4c, 0xe7, 0xe8, 0x64, 0xa3, 0x26, 0x95, 0xa3, 0xc5, 0x94, + 0xa2, 0x75, 0xbc, 0xc4, 0xce, 0xc2, 0x90, 0xb0, 0x25, 0xec, 0x75, 0x81, 0xf8, 0x24, 0xd6, 0x23, + 0xee, 0x92, 0xf4, 0x26, 0xb3, 0x1e, 0xed, 0x17, 0xa4, 0x97, 0x82, 0xbc, 0x6a, 0x95, 0x3a, 0x62, + 0x0f, 0x75, 0xdc, 0x34, 0xdd, 0xaf, 0x85, 0xb7, 0xe2, 0xd2, 0xa7, 0xaf, 0x44, 0x23, 0xd1, 0xfe, + 0x1e, 0xef, 0x84, 0x22, 0x41, 0x68, 0x1e, 0xa0, 0x6e, 0xda, 0x66, 0x15, 0xd7, 0x85, 0xaa, 0x25, + 0x55, 0xc9, 0x93, 0xae, 0x56, 0x02, 0x54, 0x5d, 0x22, 0x43, 0xd7, 0x60, 0x2a, 0xae, 0x06, 0x65, + 0xb0, 0xa7, 0x1a, 0x94, 0xc9, 0x7a, 0x5b, 0xf1, 0x49, 0xf4, 0xc6, 0xd6, 0xda, 0xdf, 0x8d, 0x2d, + 0x7a, 0x0c, 0x50, 0xc3, 0xa9, 0xb4, 0x5e, 0xa2, 0xb3, 0x17, 0x69, 0x13, 0x0d, 0xa7, 0x22, 0xdf, + 0xa1, 0xab, 0x1f, 0x55, 0x0e, 0x7a, 0x39, 0x3b, 0x03, 0x88, 0x7f, 0x18, 0xaf, 0x2d, 0xad, 0x5f, + 0x31, 0xd8, 0x55, 0x6c, 0x5f, 0xeb, 0xa5, 0x6d, 0x7f, 0xe4, 0xd2, 0x76, 0x20, 0xbc, 0xb4, 0x1d, + 0x54, 0xbf, 0xa6, 0x40, 0x3a, 0x2a, 0x6d, 0x74, 0x0a, 0xc6, 0xc8, 0xaa, 0x19, 0xcd, 0x46, 0xd5, + 0x35, 0x2b, 0xe2, 0x55, 0x0a, 0x5d, 0xc9, 0x0d, 0x06, 0x42, 0x0f, 0x30, 0xad, 0x30, 0x5c, 0xdc, + 0x30, 0x2d, 0x97, 0x17, 0x04, 0x03, 0x01, 0xe9, 0x14, 0x82, 0x8a, 0x30, 0xc1, 0xc9, 0x0d, 0xa7, + 0x21, 0x6e, 0xfb, 0x92, 0xaf, 0xc9, 0xb4, 0xb0, 0xef, 0x35, 0x86, 0xae, 0xa7, 0x9b, 0x91, 0x6f, + 0xb5, 0x0e, 0xa8, 0x1d, 0x0b, 0x5d, 0x84, 0xc3, 0xf2, 0x5c, 0x0d, 0x29, 0x21, 0xcf, 0x36, 0xfe, + 0xb4, 0x34, 0xed, 0x52, 0x90, 0x9b, 0xef, 0x5a, 0x80, 0xaa, 0xbe, 0x0d, 0x93, 0x6d, 0xd5, 0x60, + 0xe8, 0x45, 0x18, 0xbc, 0x6d, 0xd9, 0x15, 0xe7, 0x76, 0xc7, 0x37, 0x36, 0x12, 0xdd, 0x6b, 0x14, + 0x5b, 0xe7, 0x54, 0xe8, 0x0c, 0x64, 0x82, 0x3b, 0x33, 0x61, 0x04, 0x58, 0x85, 0x74, 0x50, 0x99, + 0x28, 0xae, 0x6e, 0xbe, 0xdb, 0x17, 0x99, 0x00, 0xeb, 0x08, 0x55, 0x21, 0x5b, 0x31, 0xad, 0xda, + 0x8e, 0x21, 0x17, 0xb6, 0xf1, 0x29, 0xa5, 0x3a, 0x5c, 0x7a, 0x2d, 0x10, 0xa2, 0xb6, 0xee, 0xae, + 0x7c, 0x40, 0x9f, 0xa9, 0xc4, 0xb6, 0xa0, 0x0d, 0x32, 0xd3, 0x72, 0xd3, 0xa5, 0xf9, 0x0d, 0x3e, + 0x40, 0xa7, 0x17, 0x39, 0xba, 0x40, 0x26, 0xe2, 0x0d, 0x7a, 0x9f, 0x08, 0xfa, 0xe0, 0xdd, 0xde, + 0x81, 0x19, 0x79, 0xe6, 0xf8, 0x0e, 0x31, 0xfb, 0x41, 0xb0, 0x3e, 0x7a, 0x41, 0xdb, 0x9b, 0x40, + 0x65, 0x48, 0x21, 0xe8, 0x83, 0x95, 0x66, 0x1e, 0xaa, 0xc7, 0xb5, 0xe5, 0x2c, 0xc8, 0x25, 0x13, + 0xc5, 0x54, 0x54, 0x5c, 0x94, 0x2b, 0x2a, 0x92, 0x4e, 0x8a, 0x90, 0x59, 0xa9, 0xe4, 0x62, 0x7e, + 0x18, 0x06, 0x59, 0xad, 0xa1, 0xfa, 0x36, 0x40, 0x88, 0x82, 0x9e, 0x8d, 0xdc, 0x17, 0x31, 0x0d, + 0xca, 0x89, 0x7e, 0xc5, 0x7b, 0x61, 0xda, 0x27, 0x7d, 0x2f, 0x2c, 0xdf, 0x25, 0x5d, 0x94, 0xee, + 0x92, 0x52, 0x5d, 0x09, 0xc5, 0x3d, 0x93, 0x6a, 0xc3, 0x54, 0xcc, 0xc2, 0x90, 0x63, 0x30, 0xa2, + 0xc6, 0x5d, 0x99, 0x13, 0xfa, 0x7b, 0x02, 0x80, 0xad, 0x28, 0xb6, 0xcb, 0xa2, 0xd6, 0x44, 0x82, + 0xa8, 0x25, 0x98, 0x89, 0xd7, 0xb4, 0x96, 0xbb, 0xb2, 0x54, 0xeb, 0x5d, 0x59, 0x0e, 0x86, 0x2b, + 0x4d, 0x16, 0x1c, 0xf0, 0x0d, 0x11, 0x7c, 0xab, 0xff, 0x21, 0x05, 0xc7, 0xa4, 0x8c, 0xad, 0x74, + 0x2c, 0xbc, 0x8f, 0xdc, 0x91, 0x2b, 0xfb, 0x3c, 0xda, 0x98, 0x2b, 0x2b, 0x9f, 0x6f, 0x71, 0x19, + 0x8d, 0x1f, 0xb1, 0x34, 0xa1, 0x10, 0x01, 0x39, 0x3c, 0xde, 0x4f, 0xcc, 0xab, 0xbc, 0xd8, 0x9b, + 0x79, 0x45, 0x03, 0xb4, 0x42, 0x8b, 0xa5, 0xdd, 0xec, 0xc0, 0x27, 0x8a, 0x63, 0xeb, 0x97, 0x15, + 0x38, 0xa1, 0x3b, 0xb5, 0xda, 0xa6, 0x59, 0xde, 0x16, 0xbc, 0x71, 0x53, 0xfd, 0x7e, 0x77, 0x35, + 0x37, 0x58, 0x5e, 0x40, 0xf2, 0xd3, 0x79, 0x64, 0x1c, 0x2d, 0x7c, 0x57, 0x7a, 0x2b, 0x7c, 0x57, + 0x7f, 0x92, 0x02, 0x14, 0x53, 0x17, 0xf2, 0x32, 0x1c, 0xe3, 0xb5, 0x69, 0xb4, 0x6f, 0x72, 0xa4, + 0xd1, 0xb7, 0x45, 0xc4, 0x82, 0x89, 0x17, 0x9f, 0xc3, 0x7a, 0x8e, 0xe1, 0x90, 0x7e, 0xb5, 0x16, + 0x0c, 0x74, 0x55, 0xae, 0x7d, 0xb7, 0xea, 0x96, 0x2f, 0xde, 0x6d, 0xa9, 0x1d, 0x0b, 0x34, 0x96, + 0x09, 0xaa, 0x54, 0xed, 0x4e, 0x29, 0xd1, 0xc7, 0x15, 0x78, 0xb0, 0x75, 0x0e, 0x52, 0x0d, 0x11, + 0x0f, 0x5c, 0x3d, 0xee, 0x2c, 0x5e, 0x4c, 0x3c, 0xf1, 0x65, 0x7a, 0x21, 0x8d, 0x05, 0x4e, 0xac, + 0x9f, 0x34, 0xbb, 0x60, 0xa0, 0x17, 0x20, 0xd7, 0x36, 0x8b, 0x30, 0xfb, 0xcb, 0x02, 0xe3, 0x23, + 0xad, 0x18, 0x41, 0x42, 0x5a, 0xb5, 0xe1, 0x64, 0xb7, 0x49, 0xb4, 0xfd, 0xb0, 0x82, 0xb2, 0xa7, + 0x1f, 0x56, 0x48, 0xc5, 0xfd, 0xb0, 0x82, 0xba, 0x05, 0xe3, 0x11, 0xb1, 0xa2, 0x07, 0x61, 0x3c, + 0x58, 0x13, 0xe9, 0xb7, 0x1f, 0xc6, 0x04, 0x90, 0x06, 0xfd, 0x59, 0x18, 0xaa, 0x5b, 0xb6, 0x55, + 0x6f, 0xd6, 0x69, 0xb7, 0x7d, 0xba, 0xf8, 0xa4, 0x2d, 0xe6, 0x1d, 0xda, 0xd2, 0xc7, 0x5b, 0xd8, + 0xa7, 0xfa, 0x55, 0x05, 0xa6, 0xe2, 0x8a, 0xb0, 0x92, 0x9f, 0xf4, 0x3d, 0x04, 0xe9, 0xba, 0x65, + 0xcb, 0xb1, 0x0d, 0xfb, 0x91, 0x89, 0xb1, 0xba, 0x65, 0x87, 0x71, 0x0d, 0xc1, 0x32, 0xef, 0xb4, + 0x47, 0x40, 0x63, 0x75, 0xf3, 0x4e, 0x88, 0x75, 0x1a, 0x26, 0x22, 0x42, 0xc7, 0x6c, 0x5f, 0x0d, + 0xeb, 0xad, 0x60, 0xf5, 0xcf, 0x53, 0x90, 0x29, 0x61, 0x9f, 0x95, 0x36, 0xde, 0x9f, 0x1d, 0x5f, + 0x6d, 0x7f, 0x03, 0xc2, 0x3c, 0x8f, 0x67, 0x93, 0xb2, 0xd8, 0x91, 0xd9, 0xc5, 0x3d, 0x06, 0xe1, + 0x77, 0x56, 0x2d, 0x2f, 0x42, 0x9e, 0x88, 0x7b, 0x11, 0x22, 0x5d, 0x66, 0xb6, 0x3f, 0x0b, 0x89, + 0xb1, 0x90, 0xf7, 0xa2, 0x00, 0xf4, 0xeb, 0x0a, 0xbd, 0x25, 0x90, 0x5e, 0x92, 0xdc, 0x17, 0x41, + 0x1f, 0x0f, 0x75, 0x8d, 0xae, 0x3e, 0xbf, 0xf0, 0x17, 0x0a, 0x17, 0x67, 0x53, 0x7f, 0x85, 0x9c, + 0x72, 0xc4, 0x25, 0x58, 0x2a, 0xea, 0xfc, 0xc7, 0x5a, 0xee, 0x6f, 0x9e, 0x5d, 0x9a, 0x0c, 0x7a, + 0x1c, 0x90, 0x4b, 0x26, 0x81, 0x8d, 0xb2, 0x8b, 0x2b, 0xd8, 0x26, 0x11, 0xbe, 0x47, 0xd7, 0x66, + 0x58, 0x9f, 0x64, 0x2d, 0xf9, 0xb0, 0x41, 0xfd, 0x9c, 0x02, 0x47, 0xf2, 0x4e, 0xbd, 0x51, 0xc3, + 0x3e, 0xfe, 0x59, 0x4d, 0x5f, 0x3e, 0x5a, 0xb7, 0x61, 0xb2, 0xed, 0xf7, 0x48, 0xd0, 0x63, 0x30, + 0x29, 0xfd, 0x22, 0x09, 0xdf, 0xc2, 0x0a, 0xb5, 0x1d, 0x19, 0x53, 0xc6, 0x26, 0xdb, 0xf8, 0x0c, + 0xc8, 0x30, 0x66, 0xa0, 0x98, 0x72, 0x4d, 0x48, 0x70, 0x62, 0xa3, 0xd4, 0x3f, 0x52, 0xe0, 0x30, + 0x71, 0x4f, 0x22, 0xef, 0x8c, 0xee, 0x0b, 0xeb, 0xab, 0x6d, 0x0f, 0xa1, 0xfa, 0xf7, 0xfa, 0x10, + 0x8a, 0x5f, 0xb3, 0x45, 0x5f, 0x43, 0xc5, 0xa9, 0xe5, 0xdf, 0xf2, 0x9b, 0xe6, 0xb6, 0xb7, 0x41, + 0x9c, 0x43, 0x35, 0x86, 0x43, 0xe6, 0xfd, 0x84, 0x2c, 0x1e, 0x8e, 0xb0, 0x48, 0x5b, 0x19, 0x8f, + 0x6a, 0x0c, 0x8f, 0x8c, 0x38, 0x64, 0xf2, 0x46, 0xec, 0x3b, 0xa6, 0xfe, 0x5e, 0xde, 0x31, 0x89, + 0x4b, 0xe9, 0xb6, 0x88, 0x36, 0xee, 0xce, 0xe3, 0x9f, 0x14, 0x98, 0x68, 0xc9, 0x71, 0xa0, 0x17, + 0xa0, 0xbf, 0xec, 0xf0, 0x5c, 0x40, 0x3a, 0xe1, 0x9d, 0x4a, 0x0b, 0xcd, 0x6c, 0xde, 0xa9, 0x60, + 0x9d, 0x92, 0xd1, 0xb3, 0x8a, 0x67, 0x99, 0x98, 0x0e, 0x89, 0x4f, 0xf5, 0xb3, 0x0a, 0xf4, 0x13, + 0xc4, 0xb6, 0xbb, 0xba, 0xc5, 0x7c, 0xc1, 0x28, 0xad, 0xaf, 0xe5, 0xaf, 0xae, 0x6d, 0xac, 0x67, + 0x14, 0xf4, 0x00, 0x1c, 0x5d, 0xbc, 0x5a, 0x30, 0x4a, 0x05, 0xfd, 0xda, 0x52, 0xbe, 0x60, 0x68, + 0xf9, 0xfc, 0xda, 0xc6, 0xea, 0xba, 0xc1, 0x4a, 0xb3, 0x16, 0x58, 0x2a, 0x84, 0x90, 0xbc, 0xba, + 0xb1, 0xb6, 0xae, 0x19, 0x85, 0x0f, 0xe5, 0x0b, 0x85, 0x85, 0xc2, 0x42, 0xa6, 0x4f, 0x14, 0x53, + 0xcd, 0x5f, 0x37, 0xd6, 0x8a, 0x05, 0x5d, 0x5b, 0x5f, 0xd3, 0x33, 0xfd, 0xe8, 0x30, 0x4c, 0xe5, + 0x97, 0xd7, 0x36, 0x16, 0x8c, 0xab, 0x2b, 0x25, 0xe3, 0x6a, 0xe1, 0x3a, 0x4f, 0x9c, 0x0c, 0xa9, + 0x9f, 0x0e, 0x1f, 0x81, 0x87, 0xef, 0xe0, 0xc5, 0x33, 0x47, 0x25, 0xfa, 0xee, 0x32, 0xfa, 0x06, + 0x32, 0xd5, 0xf6, 0x06, 0xf2, 0xa5, 0xc0, 0x6b, 0x8b, 0x2d, 0x19, 0xa5, 0x32, 0x97, 0x7f, 0x4a, + 0xa6, 0xb5, 0xf4, 0x53, 0xbd, 0x08, 0x47, 0x92, 0x2b, 0x42, 0x93, 0x1f, 0x7c, 0xbf, 0x08, 0x93, + 0xed, 0x6f, 0xab, 0xce, 0xc0, 0x64, 0x90, 0x36, 0x6b, 0x60, 0x97, 0x4e, 0x86, 0xef, 0xfd, 0x34, + 0x4f, 0x86, 0x15, 0xb1, 0x4b, 0x46, 0x52, 0xff, 0x8f, 0x02, 0x28, 0xa6, 0x5e, 0x3c, 0x0f, 0x03, + 0x1e, 0x31, 0x7b, 0x94, 0xd3, 0x74, 0xc2, 0x0f, 0x80, 0xb4, 0xd3, 0x51, 0xb5, 0xc0, 0x3a, 0xa3, + 0x45, 0x47, 0x60, 0x78, 0x1b, 0x47, 0x9e, 0xf2, 0x0f, 0x6d, 0x63, 0xfa, 0x8c, 0x5f, 0xbd, 0x00, + 0x03, 0x14, 0x35, 0xaa, 0x09, 0xe3, 0x30, 0x52, 0x58, 0xcd, 0xeb, 0xd7, 0x8b, 0x64, 0x95, 0x15, + 0xf2, 0xb9, 0x50, 0x10, 0x9f, 0x29, 0xb2, 0x5c, 0xc7, 0x88, 0x1f, 0xbe, 0x41, 0x7f, 0xd7, 0x20, + 0x7c, 0x8a, 0x1e, 0x78, 0x13, 0x61, 0x66, 0x5b, 0x91, 0x33, 0xdb, 0x04, 0x7e, 0xd3, 0xaa, 0xf9, + 0xd8, 0xe5, 0xeb, 0xc6, 0xbf, 0xd0, 0x51, 0x18, 0x69, 0x98, 0x55, 0xcc, 0xd2, 0x76, 0xcc, 0xbb, + 0x19, 0x26, 0x00, 0xfa, 0xe6, 0xe5, 0x38, 0x00, 0x6d, 0xf4, 0x9d, 0x6d, 0x2c, 0x9e, 0x2d, 0x50, + 0xf4, 0x75, 0x02, 0x50, 0xbf, 0xa0, 0xc0, 0xf1, 0x84, 0xc9, 0xf0, 0xe0, 0x60, 0x11, 0x46, 0x43, + 0xfd, 0x10, 0xd1, 0x41, 0x7c, 0x39, 0x6c, 0x6b, 0x27, 0xba, 0x4c, 0x89, 0x1e, 0x81, 0x09, 0x1b, + 0xdf, 0xf1, 0x0d, 0x69, 0x3a, 0xfc, 0x57, 0x21, 0x08, 0xb8, 0x18, 0x4c, 0xe9, 0x2b, 0x29, 0x38, + 0xd1, 0xda, 0x53, 0xf4, 0xfd, 0x39, 0x61, 0x4a, 0x7a, 0xab, 0xce, 0xa4, 0x34, 0xe2, 0x06, 0x4f, + 0xd3, 0x55, 0x18, 0xb7, 0x1a, 0x2c, 0x95, 0x49, 0x81, 0x22, 0x57, 0x66, 0x35, 0xf2, 0x56, 0xc5, + 0x65, 0x5d, 0xe8, 0x41, 0x22, 0x9a, 0x15, 0x09, 0x3f, 0xb7, 0x27, 0x8e, 0xa2, 0xf3, 0x68, 0x49, + 0x4f, 0xab, 0xd5, 0x20, 0x35, 0x1a, 0x51, 0x07, 0x80, 0xc1, 0x8d, 0xd5, 0x8d, 0x12, 0xd5, 0x05, + 0x04, 0xe9, 0xa5, 0x55, 0x63, 0xa3, 0x14, 0x58, 0x85, 0x4c, 0x0a, 0x65, 0x61, 0x5a, 0xc0, 0xae, + 0x68, 0x7a, 0x41, 0x9b, 0x5f, 0x2e, 0x18, 0xc5, 0x35, 0x62, 0x07, 0x66, 0x00, 0xf1, 0x16, 0x56, + 0xaa, 0xb9, 0x40, 0xe1, 0xfd, 0xea, 0xbf, 0x28, 0x90, 0x69, 0x9d, 0x5a, 0xcb, 0xd6, 0x56, 0xda, + 0xb6, 0xb6, 0x64, 0x14, 0x52, 0x51, 0xa3, 0xd0, 0x26, 0xaf, 0xbe, 0x76, 0x79, 0x95, 0x61, 0x2a, + 0xfc, 0xa1, 0x00, 0xab, 0xc1, 0x10, 0x85, 0x23, 0xfa, 0xe4, 0x3e, 0x84, 0xa7, 0x4f, 0x06, 0xfd, + 0x2d, 0x35, 0x28, 0xc4, 0xdb, 0x63, 0x79, 0xb4, 0xfa, 0x7b, 0x7d, 0x70, 0x24, 0xb9, 0xd8, 0xbe, + 0x09, 0xd3, 0x9b, 0x56, 0xf5, 0x23, 0x4d, 0xec, 0xee, 0x18, 0x15, 0xec, 0xf9, 0x96, 0xcd, 0x72, + 0x34, 0x2c, 0x6f, 0x34, 0xdf, 0x5b, 0x71, 0xff, 0xec, 0xbc, 0x55, 0x7d, 0x95, 0x74, 0xb5, 0x10, + 0xf6, 0xa4, 0x4f, 0x89, 0xfe, 0x25, 0x20, 0xca, 0xc3, 0x09, 0x11, 0xef, 0xf2, 0xd3, 0x1d, 0x57, + 0x5d, 0xec, 0x11, 0x56, 0x7c, 0x2c, 0xfd, 0x0c, 0x17, 0xff, 0xa5, 0x2e, 0x6e, 0xb0, 0x0b, 0x14, + 0x67, 0x85, 0xa3, 0xa0, 0x77, 0x14, 0x38, 0x5a, 0x76, 0x6c, 0xaf, 0x59, 0xa7, 0x76, 0x28, 0xa0, + 0x8d, 0xfe, 0xe8, 0xd4, 0x95, 0x1e, 0x79, 0xc8, 0x87, 0x3d, 0x8a, 0x91, 0xc4, 0xbb, 0xcf, 0x72, + 0x52, 0x53, 0xee, 0x29, 0x98, 0x8a, 0xe1, 0x9d, 0x6c, 0xbd, 0x8a, 0xe9, 0x9b, 0x1e, 0x0e, 0x3d, + 0x07, 0x7d, 0x84, 0x43, 0x96, 0x2a, 0xb9, 0x8b, 0xc4, 0xa5, 0x4c, 0xe8, 0xb2, 0x83, 0xf9, 0xbf, + 0x00, 0x33, 0x09, 0x8f, 0x18, 0x12, 0x69, 0x2e, 0xfc, 0x70, 0x01, 0xd2, 0x3c, 0xef, 0xc0, 0x72, + 0x53, 0x2e, 0xfa, 0xb1, 0x02, 0x63, 0x72, 0xfd, 0x07, 0x8a, 0x4f, 0xfc, 0xc6, 0x14, 0xa3, 0xe4, + 0xce, 0xec, 0x01, 0x93, 0x59, 0x44, 0xf5, 0x63, 0xca, 0xae, 0x36, 0x11, 0xba, 0x4f, 0xe7, 0x88, + 0x53, 0xb4, 0xab, 0x71, 0x2b, 0xfd, 0xb1, 0x3f, 0xf8, 0xcb, 0x2f, 0xa6, 0x4c, 0x74, 0x6e, 0xee, + 0xd6, 0xf9, 0xb9, 0xb7, 0x18, 0xe8, 0x05, 0x8e, 0xeb, 0xcd, 0x9d, 0x9d, 0x0b, 0x62, 0xfd, 0xb9, + 0xb3, 0x77, 0xc5, 0xef, 0x42, 0x7a, 0x37, 0xce, 0xa3, 0x39, 0x82, 0x1f, 0xe0, 0xbd, 0x15, 0xf6, + 0x7e, 0x77, 0x8e, 0x16, 0x1b, 0xcc, 0xbd, 0x45, 0xfe, 0x09, 0x49, 0xd0, 0xdf, 0x28, 0x00, 0x61, + 0x95, 0x31, 0x8a, 0xf7, 0xa9, 0xda, 0xca, 0x90, 0x73, 0x1d, 0xcb, 0x5e, 0xd4, 0xff, 0xa6, 0xec, + 0x6a, 0xb9, 0x16, 0xce, 0xce, 0x85, 0xbe, 0xde, 0xae, 0x46, 0x9d, 0x2d, 0xca, 0xa2, 0xcd, 0x59, + 0x24, 0x80, 0x04, 0x06, 0x83, 0xc9, 0xce, 0x9d, 0xbd, 0x7b, 0xe3, 0x45, 0x74, 0xa9, 0x47, 0x16, + 0xe7, 0xde, 0x0a, 0x47, 0xbe, 0x8b, 0xfe, 0x51, 0x81, 0xf1, 0x48, 0xf9, 0x37, 0x8a, 0x5f, 0xb1, + 0xb8, 0x12, 0xf1, 0x5c, 0x97, 0x32, 0x0f, 0xca, 0xf7, 0xe1, 0x04, 0xbe, 0x77, 0xb5, 0x34, 0x5b, + 0x46, 0x01, 0xa0, 0xec, 0x57, 0xd5, 0x9e, 0x56, 0xf8, 0x39, 0xe5, 0xec, 0x8d, 0xa7, 0xd4, 0x5e, + 0x17, 0xf9, 0x39, 0xe5, 0x2c, 0xfa, 0x54, 0x0a, 0xc6, 0x23, 0x55, 0xdc, 0x09, 0x7c, 0xc7, 0x55, + 0x7a, 0x77, 0xe5, 0xfb, 0x5d, 0x65, 0x57, 0x3b, 0x95, 0xbc, 0xde, 0xe7, 0x58, 0xc1, 0xda, 0xae, + 0x36, 0x4a, 0x56, 0x99, 0x7f, 0x51, 0xf6, 0xfd, 0x5c, 0x4f, 0xab, 0x4f, 0xd8, 0xd7, 0x72, 0x07, + 0x52, 0x00, 0x22, 0x8b, 0x4f, 0xa6, 0x20, 0x1d, 0xad, 0x02, 0x47, 0x67, 0x3b, 0x08, 0xa3, 0xa5, + 0x04, 0xa0, 0xab, 0x34, 0xbe, 0xae, 0x50, 0xde, 0xbe, 0xa2, 0xe4, 0x9e, 0xe9, 0x81, 0xb9, 0xb9, + 0x20, 0x13, 0xca, 0x19, 0x35, 0xd5, 0xd7, 0x0f, 0xc2, 0xa8, 0xd4, 0xdf, 0x5b, 0x72, 0xa2, 0xf7, + 0xee, 0x1c, 0x93, 0x3f, 0x11, 0xc4, 0x37, 0x53, 0x91, 0xdc, 0xba, 0x6c, 0x2f, 0x2f, 0x24, 0xe5, + 0x84, 0x92, 0xab, 0xc7, 0xbb, 0x0a, 0xe6, 0xd7, 0x98, 0x60, 0x7e, 0x51, 0x51, 0x17, 0xf7, 0x2d, + 0x18, 0x0f, 0xfb, 0xd2, 0xc8, 0x44, 0x4e, 0x37, 0x55, 0xf3, 0xbd, 0x91, 0x93, 0x19, 0x19, 0x07, + 0xfd, 0x52, 0x0a, 0x26, 0xdb, 0x4a, 0xc6, 0xd1, 0xe3, 0x89, 0xb9, 0xb3, 0xb8, 0xd2, 0xf2, 0xae, + 0x22, 0xfa, 0xa1, 0xb2, 0xab, 0x9d, 0xed, 0xb0, 0x93, 0x5a, 0x2a, 0xc7, 0x77, 0xb5, 0x69, 0xba, + 0xa5, 0x5a, 0xc0, 0x54, 0xcc, 0xff, 0x51, 0x51, 0x3f, 0xd8, 0xd3, 0xe6, 0xf2, 0x82, 0x49, 0x13, + 0xb1, 0x2e, 0xa9, 0x0b, 0x07, 0x12, 0x6b, 0x2d, 0xe8, 0x0a, 0x7d, 0x8f, 0x17, 0xeb, 0xb6, 0x95, + 0x85, 0x27, 0x97, 0xcf, 0x26, 0x94, 0xb6, 0x77, 0x95, 0xdf, 0x1f, 0x2b, 0xbb, 0xda, 0xe3, 0x1d, + 0xe4, 0xd7, 0x5e, 0xb9, 0xbe, 0xab, 0x1d, 0xa6, 0x22, 0x6c, 0x6f, 0xa1, 0x52, 0xfc, 0x9c, 0xa2, + 0x3e, 0xd7, 0xab, 0x14, 0xc3, 0xd9, 0x13, 0x41, 0x2e, 0x33, 0x6d, 0xdf, 0xb7, 0x20, 0xeb, 0x72, + 0x6f, 0xe8, 0xff, 0xa5, 0x60, 0xa2, 0xa5, 0x6a, 0x1e, 0x3d, 0x96, 0x24, 0xc6, 0x98, 0xda, 0xfa, + 0xae, 0x12, 0xfc, 0x6d, 0x65, 0x57, 0x3b, 0xdd, 0x41, 0x82, 0x91, 0xd2, 0xf9, 0x5d, 0x0d, 0x51, + 0xe1, 0x45, 0x80, 0x54, 0x6e, 0x9f, 0x50, 0xd4, 0xa7, 0x7b, 0x95, 0x1b, 0x9b, 0x2e, 0x91, 0xd9, + 0x15, 0x35, 0x7f, 0x20, 0x99, 0x99, 0xa2, 0x27, 0xf4, 0x55, 0x5a, 0xdf, 0x18, 0xd6, 0xe2, 0x27, + 0xb8, 0x72, 0x31, 0xe5, 0xfa, 0x5d, 0x25, 0xf5, 0x7d, 0x65, 0x57, 0x7b, 0xa8, 0xe3, 0x5e, 0xe5, + 0x1d, 0xee, 0x6a, 0x69, 0xbe, 0x4b, 0x39, 0x80, 0x4a, 0xe8, 0x33, 0x8a, 0xfa, 0x6c, 0xef, 0xfb, + 0x93, 0x37, 0x11, 0x21, 0x5d, 0x55, 0x2f, 0x1f, 0x70, 0x87, 0x86, 0x9d, 0xa1, 0x77, 0x53, 0x30, + 0x26, 0x3f, 0x03, 0x48, 0x90, 0x53, 0xcc, 0x4b, 0x81, 0xae, 0x72, 0xfa, 0x7d, 0x65, 0x57, 0x3b, + 0xd3, 0x69, 0x4f, 0x46, 0xca, 0x34, 0x77, 0xb5, 0x29, 0xb6, 0x1f, 0x23, 0x50, 0x2a, 0xb1, 0x77, + 0x7a, 0x95, 0x58, 0x53, 0x9a, 0xed, 0xbd, 0x50, 0xab, 0xba, 0xe8, 0x09, 0xfd, 0x54, 0x81, 0xf1, + 0x48, 0x99, 0x7f, 0x82, 0x3b, 0x15, 0xf7, 0x14, 0xa0, 0xab, 0xc0, 0xfe, 0x2b, 0x3b, 0x27, 0xbf, + 0xb0, 0x1f, 0xd3, 0x13, 0x0c, 0x46, 0xf8, 0x5d, 0x53, 0x5f, 0x39, 0x90, 0xaf, 0xd4, 0xda, 0x21, + 0xf5, 0x9e, 0x23, 0x8f, 0x01, 0x12, 0xd8, 0x8e, 0x7b, 0x30, 0xb0, 0x27, 0xef, 0x79, 0xaf, 0x51, + 0xc3, 0xd9, 0x1e, 0xa3, 0x86, 0xb3, 0x07, 0x8b, 0x1a, 0x7e, 0xac, 0x40, 0x3a, 0x5a, 0x0e, 0x9e, + 0xe0, 0x31, 0xc6, 0x16, 0xcc, 0xe7, 0x1e, 0xdb, 0x13, 0x2e, 0x0f, 0x0b, 0xef, 0xb6, 0x47, 0x85, + 0x94, 0x67, 0x8c, 0x66, 0xf7, 0x12, 0x2a, 0x84, 0x95, 0xe6, 0x37, 0x9e, 0x44, 0xe7, 0xf7, 0xc8, + 0xb5, 0x54, 0x9e, 0xfe, 0x53, 0x05, 0xc6, 0xe4, 0x37, 0x08, 0x09, 0x86, 0x20, 0xe6, 0x99, 0x42, + 0xd7, 0x05, 0xfe, 0xef, 0xca, 0xae, 0x76, 0xac, 0x75, 0x81, 0xe5, 0x02, 0x7d, 0x16, 0x13, 0x70, + 0x3e, 0x3b, 0xad, 0x6d, 0x38, 0x5f, 0xb2, 0xba, 0xf3, 0xe8, 0xe5, 0x9e, 0xf9, 0x9c, 0x7b, 0x4b, + 0x1e, 0xf8, 0x2e, 0xfa, 0x7c, 0x0a, 0x26, 0x5a, 0x1e, 0x20, 0x24, 0x9c, 0xab, 0xf1, 0xcf, 0x14, + 0x72, 0x33, 0x6d, 0x45, 0x49, 0x85, 0x7a, 0xc3, 0xdf, 0x51, 0xbf, 0xdd, 0x8d, 0x69, 0x59, 0xaf, + 0xdf, 0x51, 0xd4, 0x8b, 0xbd, 0x31, 0xff, 0x5c, 0x99, 0x4e, 0xa7, 0x37, 0x47, 0x23, 0x51, 0x0c, + 0x61, 0x6f, 0xe8, 0xaf, 0x15, 0x98, 0x68, 0x79, 0x82, 0x90, 0x20, 0x90, 0xf8, 0x87, 0x0a, 0xb9, + 0x53, 0x09, 0x06, 0x31, 0xc4, 0x54, 0x3f, 0x19, 0x9b, 0x01, 0x09, 0xc5, 0x51, 0xed, 0xae, 0x0a, + 0x77, 0xe7, 0x3c, 0xa9, 0xc7, 0x1b, 0x17, 0xd1, 0x93, 0x7b, 0x94, 0x01, 0x23, 0xe3, 0x35, 0xce, + 0x5f, 0x4e, 0xc1, 0x78, 0xa4, 0xa6, 0x05, 0x25, 0xe7, 0x71, 0x5a, 0xeb, 0xd3, 0x73, 0x67, 0xf7, + 0x82, 0xca, 0x37, 0xf7, 0xb7, 0xba, 0xd9, 0x38, 0x39, 0xfd, 0xf3, 0x71, 0x05, 0x3d, 0xd3, 0x7d, + 0xcb, 0xcb, 0x86, 0x2e, 0x0c, 0x66, 0x6e, 0x5c, 0x41, 0x97, 0xef, 0x4d, 0x58, 0x84, 0xfe, 0x57, + 0x0a, 0x46, 0xa5, 0x4a, 0x78, 0xf4, 0x68, 0x92, 0x16, 0xb4, 0x06, 0xca, 0x9d, 0x6b, 0x7f, 0xd4, + 0x1f, 0x28, 0xbb, 0xda, 0xa3, 0x1d, 0xfc, 0x02, 0x39, 0x1a, 0x93, 0xb5, 0xe2, 0x8b, 0x42, 0x2c, + 0xfb, 0x88, 0x1f, 0x6f, 0xbc, 0x86, 0x36, 0xde, 0x93, 0x68, 0x91, 0xc4, 0xd2, 0xe9, 0xe8, 0x0b, + 0x92, 0x84, 0x23, 0x22, 0xf6, 0x99, 0x49, 0x57, 0xdb, 0xf9, 0x83, 0x2e, 0xce, 0x66, 0x30, 0x99, + 0x5d, 0x2d, 0xc3, 0xf3, 0x4c, 0x01, 0x88, 0x4a, 0xed, 0xd3, 0x8a, 0xba, 0x6f, 0x65, 0xba, 0x17, + 0xde, 0xa6, 0xdc, 0x19, 0x15, 0x56, 0xf4, 0x75, 0x4a, 0x82, 0xb0, 0x62, 0x9f, 0xb0, 0xec, 0x49, + 0x58, 0xfb, 0xd3, 0xac, 0xb3, 0x07, 0xd0, 0xac, 0xb3, 0xef, 0x91, 0x66, 0xfd, 0x28, 0x05, 0x87, + 0x13, 0x4a, 0x05, 0x51, 0xfc, 0x8d, 0x49, 0xe7, 0xc2, 0xc2, 0xae, 0xe2, 0xfb, 0x8b, 0x7d, 0x8a, + 0xef, 0xff, 0x2a, 0xaa, 0xb6, 0xef, 0xc4, 0x8e, 0xcb, 0xe7, 0x4c, 0x74, 0xad, 0xac, 0x7e, 0xf8, + 0x3d, 0x11, 0xa5, 0x3c, 0x08, 0xfa, 0x46, 0x0a, 0x0e, 0xc5, 0xd6, 0xd6, 0xa2, 0xf3, 0xdd, 0x72, + 0x5f, 0x6d, 0x75, 0xb8, 0x5d, 0x45, 0xfa, 0x1d, 0xe6, 0xd2, 0x7f, 0x5b, 0x61, 0xbb, 0x68, 0xbf, + 0xa9, 0xaf, 0x70, 0x60, 0x22, 0xa6, 0x2d, 0xb5, 0xfc, 0xde, 0x64, 0xbe, 0x5a, 0x47, 0x42, 0xff, + 0xa0, 0xc0, 0x48, 0x50, 0x0f, 0x86, 0x1e, 0xde, 0x53, 0xbd, 0x58, 0x57, 0x99, 0xfc, 0x0f, 0x26, + 0x93, 0x2f, 0x29, 0xea, 0x8b, 0xbd, 0x86, 0x39, 0xd1, 0xc2, 0x30, 0x22, 0x8b, 0xa2, 0x7a, 0xf5, + 0x40, 0xb2, 0x70, 0x5b, 0x7b, 0x44, 0x5f, 0x4d, 0xd1, 0x10, 0x4f, 0xfa, 0xfd, 0xfc, 0xc4, 0x10, + 0xaf, 0xad, 0xc0, 0xac, 0x2b, 0xef, 0xbf, 0xa5, 0xec, 0x6a, 0x6a, 0x87, 0x2d, 0xc6, 0x2f, 0xa7, + 0x76, 0xb5, 0x31, 0x1a, 0x0c, 0xf3, 0xcf, 0x7d, 0x67, 0xa4, 0xc2, 0x09, 0xde, 0x8b, 0x8c, 0x54, + 0x4d, 0xee, 0x0d, 0x7d, 0x25, 0x45, 0x4b, 0x66, 0xe4, 0xd2, 0xb5, 0xa4, 0x8c, 0x54, 0x6c, 0x81, + 0x5b, 0x57, 0x59, 0xfd, 0xfa, 0xde, 0xe3, 0xc2, 0xff, 0xac, 0xa8, 0x97, 0x7a, 0x93, 0x0d, 0x9d, + 0x52, 0x43, 0x4c, 0x89, 0x48, 0xe7, 0x55, 0x75, 0xf9, 0x60, 0x41, 0x73, 0x5b, 0x97, 0xe8, 0xdd, + 0x14, 0xa0, 0xf6, 0x0a, 0x39, 0x14, 0xff, 0x53, 0x69, 0x89, 0xa5, 0x74, 0x5d, 0x05, 0xf5, 0x9b, + 0x7b, 0x17, 0xd4, 0x97, 0x15, 0xf5, 0xa5, 0x9e, 0x04, 0x55, 0x16, 0xb3, 0x8a, 0xc8, 0x6a, 0x5d, + 0x5d, 0x3b, 0x90, 0xac, 0x62, 0x7b, 0x45, 0xff, 0x89, 0xe5, 0x38, 0xe5, 0x92, 0xff, 0xe4, 0x1c, + 0x67, 0xcc, 0xc3, 0x80, 0xae, 0x82, 0xfa, 0x39, 0x66, 0x79, 0xbe, 0xa1, 0xa8, 0x2f, 0x1f, 0xc4, + 0x1a, 0x93, 0x21, 0x89, 0x14, 0x36, 0xd5, 0x7f, 0xff, 0x9e, 0xd9, 0x61, 0x3e, 0x06, 0xfa, 0x2e, + 0xab, 0x17, 0x8e, 0xfe, 0x91, 0x8e, 0x73, 0x89, 0x42, 0x89, 0x29, 0x47, 0xec, 0x2a, 0x95, 0x3f, + 0xe9, 0x92, 0xa7, 0x8b, 0x56, 0x1b, 0x8a, 0x3c, 0x5d, 0x14, 0x4a, 0xe5, 0xfa, 0x5f, 0x14, 0xf5, + 0x85, 0x5e, 0x2d, 0x54, 0x64, 0xc6, 0x44, 0xa8, 0xba, 0xba, 0x72, 0xd0, 0xdc, 0x55, 0x6b, 0x9f, + 0x68, 0x57, 0xfc, 0x6a, 0x48, 0x6b, 0x29, 0x60, 0x87, 0xdf, 0xf1, 0x88, 0xaf, 0x7c, 0xec, 0x2a, + 0xcc, 0xbf, 0xeb, 0x76, 0x11, 0xd1, 0x56, 0xd5, 0x18, 0x5c, 0x44, 0xb4, 0xb5, 0x50, 0xa1, 0xfe, + 0xcf, 0xde, 0x9c, 0x2b, 0xee, 0x2e, 0xb4, 0xcc, 0x9e, 0x08, 0xf6, 0x9a, 0xfa, 0xea, 0xc1, 0x93, + 0x82, 0xed, 0xfd, 0xa2, 0xef, 0x28, 0xec, 0x5d, 0x48, 0x5b, 0x09, 0x58, 0x82, 0x3f, 0xd5, 0xa9, + 0x76, 0x2d, 0x77, 0xa1, 0x17, 0x12, 0x1e, 0x5b, 0x5f, 0xa2, 0x72, 0x7a, 0x1a, 0x3d, 0x15, 0x1f, + 0xe6, 0xdc, 0x9d, 0x33, 0xab, 0x55, 0x17, 0x57, 0x4d, 0x1f, 0x57, 0xe6, 0x9a, 0xad, 0xbd, 0xe4, + 0x96, 0xbf, 0xaf, 0x65, 0xc3, 0xc1, 0xd8, 0xe8, 0x66, 0xc3, 0xf2, 0x66, 0xcb, 0x4e, 0xfd, 0x87, + 0xda, 0xec, 0x96, 0xef, 0x37, 0xbc, 0xe7, 0xe6, 0xe6, 0x6e, 0xdf, 0xbe, 0xdd, 0xd2, 0x38, 0x67, + 0x36, 0xfd, 0xad, 0x39, 0xfa, 0xb7, 0xac, 0x1e, 0x17, 0x7f, 0xbf, 0x70, 0xfe, 0x6b, 0x0a, 0x1c, + 0x2e, 0x3b, 0xf5, 0x38, 0x2e, 0xe6, 0xa7, 0xf2, 0xe2, 0xef, 0xa9, 0xd0, 0x6b, 0xa6, 0xa2, 0xeb, + 0xf8, 0x4e, 0x51, 0xb9, 0x71, 0x89, 0xe3, 0x56, 0x9d, 0x9a, 0x69, 0x57, 0x67, 0x1d, 0xb7, 0x3a, + 0x57, 0xc5, 0x36, 0xcd, 0x26, 0xcd, 0x85, 0x23, 0x46, 0xfe, 0x22, 0xe8, 0xf3, 0xc1, 0xc7, 0x37, + 0x53, 0x47, 0x16, 0x19, 0x39, 0xfd, 0x4b, 0x48, 0xb3, 0xf9, 0x60, 0xc0, 0x6b, 0xe7, 0xbf, 0x2f, + 0xda, 0x5e, 0xa7, 0x6d, 0xaf, 0x07, 0x6d, 0xaf, 0x5f, 0x3b, 0xbf, 0x39, 0x48, 0x07, 0x78, 0xf2, + 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x0a, 0x58, 0xc2, 0x71, 0x74, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ClusterManagerClient is the client API for ClusterManager service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterManagerClient interface { + // Lists all clusters owned by a project in either the specified zone or all + // zones. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Gets the details of a specific cluster. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Creates a cluster, consisting of the specified number and type of Google + // Compute Engine instances. + // + // By default, the cluster is created in the project's + // [default network](/compute/docs/networks-and-firewalls#networks). + // + // One firewall is added for the cluster. After cluster creation, + // the Kubelet creates routes for each node to allow the containers + // on that node to communicate with all other instances in the + // cluster. + // + // Finally, an entry is added to the project's global metadata indicating + // which CIDR range the cluster is using. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) + // Updates the settings of a specific cluster. + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) + // Updates the version and/or image type for the specified node pool. + UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the autoscaling settings for the specified node pool. + SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the logging service for a specific cluster. + SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the monitoring service for a specific cluster. + SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the addons for a specific cluster. + SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the locations for a specific cluster. + SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) + // Updates the master for a specific cluster. + UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets master auth materials. Currently supports changing the admin password + // or a specific cluster, either via password generation or explicitly setting + // the password. + SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) + // Deletes the cluster, including the Kubernetes endpoint and all worker + // nodes. + // + // Firewalls and routes that were configured during cluster creation + // are also deleted. + // + // Other Google Compute Engine resources that might be in use by the cluster, + // such as load balancer resources, are not deleted if they weren't present + // when the cluster was initially created. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) + // Lists all operations in a project in a specific zone or all zones. + ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) + // Gets the specified operation. + GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) + // Cancels the specified operation. + CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Returns configuration info about the Google Kubernetes Engine service. + GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) + // Lists the node pools for a cluster. + ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) + // Retrieves the requested node pool. + GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) + // Creates a node pool for a cluster. + CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) + // Deletes a node pool from a cluster. + DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) + // Rolls back a previously Aborted or Failed NodePool upgrade. + // This makes no changes if the last upgrade successfully completed. + RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the NodeManagement options for a node pool. + SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets labels on a cluster. + SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) + // Enables or disables the ABAC authorization mechanism on a cluster. + SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) + // Starts master IP rotation. + StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) + // Completes master IP rotation. + CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the size for a specific node pool. + SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) + // Enables or disables Network Policy for a cluster. + SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) + // Sets the maintenance policy for a cluster. + SetMaintenancePolicy(ctx context.Context, in *SetMaintenancePolicyRequest, opts ...grpc.CallOption) (*Operation, error) + // Lists subnetworks that are usable for creating clusters in a project. + ListUsableSubnetworks(ctx context.Context, in *ListUsableSubnetworksRequest, opts ...grpc.CallOption) (*ListUsableSubnetworksResponse, error) +} + +type clusterManagerClient struct { + cc grpc.ClientConnInterface +} + +func NewClusterManagerClient(cc grpc.ClientConnInterface) ClusterManagerClient { + return &clusterManagerClient{cc} +} + +func (c *clusterManagerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListClusters", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetCluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateCluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateCluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateNodePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLoggingService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMonitoringService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetAddonsConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLocations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateMaster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMasterAuth", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteCluster", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) { + out := new(ListOperationsResponse) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListOperations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CancelOperation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) { + out := new(ServerConfig) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetServerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) { + out := new(ListNodePoolsResponse) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListNodePools", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) { + out := new(NodePool) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetNodePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateNodePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteNodePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolManagement", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLabels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLegacyAbac", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/StartIPRotation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CompleteIPRotation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolSize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNetworkPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) SetMaintenancePolicy(ctx context.Context, in *SetMaintenancePolicyRequest, opts ...grpc.CallOption) (*Operation, error) { + out := new(Operation) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMaintenancePolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterManagerClient) ListUsableSubnetworks(ctx context.Context, in *ListUsableSubnetworksRequest, opts ...grpc.CallOption) (*ListUsableSubnetworksResponse, error) { + out := new(ListUsableSubnetworksResponse) + err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListUsableSubnetworks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterManagerServer is the server API for ClusterManager service. +type ClusterManagerServer interface { + // Lists all clusters owned by a project in either the specified zone or all + // zones. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Gets the details of a specific cluster. + GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) + // Creates a cluster, consisting of the specified number and type of Google + // Compute Engine instances. + // + // By default, the cluster is created in the project's + // [default network](/compute/docs/networks-and-firewalls#networks). + // + // One firewall is added for the cluster. After cluster creation, + // the Kubelet creates routes for each node to allow the containers + // on that node to communicate with all other instances in the + // cluster. + // + // Finally, an entry is added to the project's global metadata indicating + // which CIDR range the cluster is using. + CreateCluster(context.Context, *CreateClusterRequest) (*Operation, error) + // Updates the settings of a specific cluster. + UpdateCluster(context.Context, *UpdateClusterRequest) (*Operation, error) + // Updates the version and/or image type for the specified node pool. + UpdateNodePool(context.Context, *UpdateNodePoolRequest) (*Operation, error) + // Sets the autoscaling settings for the specified node pool. + SetNodePoolAutoscaling(context.Context, *SetNodePoolAutoscalingRequest) (*Operation, error) + // Sets the logging service for a specific cluster. + SetLoggingService(context.Context, *SetLoggingServiceRequest) (*Operation, error) + // Sets the monitoring service for a specific cluster. + SetMonitoringService(context.Context, *SetMonitoringServiceRequest) (*Operation, error) + // Sets the addons for a specific cluster. + SetAddonsConfig(context.Context, *SetAddonsConfigRequest) (*Operation, error) + // Sets the locations for a specific cluster. + SetLocations(context.Context, *SetLocationsRequest) (*Operation, error) + // Updates the master for a specific cluster. + UpdateMaster(context.Context, *UpdateMasterRequest) (*Operation, error) + // Sets master auth materials. Currently supports changing the admin password + // or a specific cluster, either via password generation or explicitly setting + // the password. + SetMasterAuth(context.Context, *SetMasterAuthRequest) (*Operation, error) + // Deletes the cluster, including the Kubernetes endpoint and all worker + // nodes. + // + // Firewalls and routes that were configured during cluster creation + // are also deleted. + // + // Other Google Compute Engine resources that might be in use by the cluster, + // such as load balancer resources, are not deleted if they weren't present + // when the cluster was initially created. + DeleteCluster(context.Context, *DeleteClusterRequest) (*Operation, error) + // Lists all operations in a project in a specific zone or all zones. + ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) + // Gets the specified operation. + GetOperation(context.Context, *GetOperationRequest) (*Operation, error) + // Cancels the specified operation. + CancelOperation(context.Context, *CancelOperationRequest) (*empty.Empty, error) + // Returns configuration info about the Google Kubernetes Engine service. + GetServerConfig(context.Context, *GetServerConfigRequest) (*ServerConfig, error) + // Lists the node pools for a cluster. + ListNodePools(context.Context, *ListNodePoolsRequest) (*ListNodePoolsResponse, error) + // Retrieves the requested node pool. + GetNodePool(context.Context, *GetNodePoolRequest) (*NodePool, error) + // Creates a node pool for a cluster. + CreateNodePool(context.Context, *CreateNodePoolRequest) (*Operation, error) + // Deletes a node pool from a cluster. + DeleteNodePool(context.Context, *DeleteNodePoolRequest) (*Operation, error) + // Rolls back a previously Aborted or Failed NodePool upgrade. + // This makes no changes if the last upgrade successfully completed. + RollbackNodePoolUpgrade(context.Context, *RollbackNodePoolUpgradeRequest) (*Operation, error) + // Sets the NodeManagement options for a node pool. + SetNodePoolManagement(context.Context, *SetNodePoolManagementRequest) (*Operation, error) + // Sets labels on a cluster. + SetLabels(context.Context, *SetLabelsRequest) (*Operation, error) + // Enables or disables the ABAC authorization mechanism on a cluster. + SetLegacyAbac(context.Context, *SetLegacyAbacRequest) (*Operation, error) + // Starts master IP rotation. + StartIPRotation(context.Context, *StartIPRotationRequest) (*Operation, error) + // Completes master IP rotation. + CompleteIPRotation(context.Context, *CompleteIPRotationRequest) (*Operation, error) + // Sets the size for a specific node pool. + SetNodePoolSize(context.Context, *SetNodePoolSizeRequest) (*Operation, error) + // Enables or disables Network Policy for a cluster. + SetNetworkPolicy(context.Context, *SetNetworkPolicyRequest) (*Operation, error) + // Sets the maintenance policy for a cluster. + SetMaintenancePolicy(context.Context, *SetMaintenancePolicyRequest) (*Operation, error) + // Lists subnetworks that are usable for creating clusters in a project. + ListUsableSubnetworks(context.Context, *ListUsableSubnetworksRequest) (*ListUsableSubnetworksResponse, error) +} + +// UnimplementedClusterManagerServer can be embedded to have forward compatible implementations. +type UnimplementedClusterManagerServer struct { +} + +func (*UnimplementedClusterManagerServer) ListClusters(ctx context.Context, req *ListClustersRequest) (*ListClustersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListClusters not implemented") +} +func (*UnimplementedClusterManagerServer) GetCluster(ctx context.Context, req *GetClusterRequest) (*Cluster, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCluster not implemented") +} +func (*UnimplementedClusterManagerServer) CreateCluster(ctx context.Context, req *CreateClusterRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCluster not implemented") +} +func (*UnimplementedClusterManagerServer) UpdateCluster(ctx context.Context, req *UpdateClusterRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCluster not implemented") +} +func (*UnimplementedClusterManagerServer) UpdateNodePool(ctx context.Context, req *UpdateNodePoolRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateNodePool not implemented") +} +func (*UnimplementedClusterManagerServer) SetNodePoolAutoscaling(ctx context.Context, req *SetNodePoolAutoscalingRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetNodePoolAutoscaling not implemented") +} +func (*UnimplementedClusterManagerServer) SetLoggingService(ctx context.Context, req *SetLoggingServiceRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetLoggingService not implemented") +} +func (*UnimplementedClusterManagerServer) SetMonitoringService(ctx context.Context, req *SetMonitoringServiceRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetMonitoringService not implemented") +} +func (*UnimplementedClusterManagerServer) SetAddonsConfig(ctx context.Context, req *SetAddonsConfigRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetAddonsConfig not implemented") +} +func (*UnimplementedClusterManagerServer) SetLocations(ctx context.Context, req *SetLocationsRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetLocations not implemented") +} +func (*UnimplementedClusterManagerServer) UpdateMaster(ctx context.Context, req *UpdateMasterRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateMaster not implemented") +} +func (*UnimplementedClusterManagerServer) SetMasterAuth(ctx context.Context, req *SetMasterAuthRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetMasterAuth not implemented") +} +func (*UnimplementedClusterManagerServer) DeleteCluster(ctx context.Context, req *DeleteClusterRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCluster not implemented") +} +func (*UnimplementedClusterManagerServer) ListOperations(ctx context.Context, req *ListOperationsRequest) (*ListOperationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListOperations not implemented") +} +func (*UnimplementedClusterManagerServer) GetOperation(ctx context.Context, req *GetOperationRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOperation not implemented") +} +func (*UnimplementedClusterManagerServer) CancelOperation(ctx context.Context, req *CancelOperationRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelOperation not implemented") +} +func (*UnimplementedClusterManagerServer) GetServerConfig(ctx context.Context, req *GetServerConfigRequest) (*ServerConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServerConfig not implemented") +} +func (*UnimplementedClusterManagerServer) ListNodePools(ctx context.Context, req *ListNodePoolsRequest) (*ListNodePoolsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNodePools not implemented") +} +func (*UnimplementedClusterManagerServer) GetNodePool(ctx context.Context, req *GetNodePoolRequest) (*NodePool, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNodePool not implemented") +} +func (*UnimplementedClusterManagerServer) CreateNodePool(ctx context.Context, req *CreateNodePoolRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNodePool not implemented") +} +func (*UnimplementedClusterManagerServer) DeleteNodePool(ctx context.Context, req *DeleteNodePoolRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNodePool not implemented") +} +func (*UnimplementedClusterManagerServer) RollbackNodePoolUpgrade(ctx context.Context, req *RollbackNodePoolUpgradeRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method RollbackNodePoolUpgrade not implemented") +} +func (*UnimplementedClusterManagerServer) SetNodePoolManagement(ctx context.Context, req *SetNodePoolManagementRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetNodePoolManagement not implemented") +} +func (*UnimplementedClusterManagerServer) SetLabels(ctx context.Context, req *SetLabelsRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetLabels not implemented") +} +func (*UnimplementedClusterManagerServer) SetLegacyAbac(ctx context.Context, req *SetLegacyAbacRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetLegacyAbac not implemented") +} +func (*UnimplementedClusterManagerServer) StartIPRotation(ctx context.Context, req *StartIPRotationRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartIPRotation not implemented") +} +func (*UnimplementedClusterManagerServer) CompleteIPRotation(ctx context.Context, req *CompleteIPRotationRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteIPRotation not implemented") +} +func (*UnimplementedClusterManagerServer) SetNodePoolSize(ctx context.Context, req *SetNodePoolSizeRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetNodePoolSize not implemented") +} +func (*UnimplementedClusterManagerServer) SetNetworkPolicy(ctx context.Context, req *SetNetworkPolicyRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetNetworkPolicy not implemented") +} +func (*UnimplementedClusterManagerServer) SetMaintenancePolicy(ctx context.Context, req *SetMaintenancePolicyRequest) (*Operation, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetMaintenancePolicy not implemented") +} +func (*UnimplementedClusterManagerServer) ListUsableSubnetworks(ctx context.Context, req *ListUsableSubnetworksRequest) (*ListUsableSubnetworksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUsableSubnetworks not implemented") +} + +func RegisterClusterManagerServer(s *grpc.Server, srv ClusterManagerServer) { + s.RegisterService(&_ClusterManager_serviceDesc, srv) +} + +func _ClusterManager_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CreateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CreateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CreateCluster(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_UpdateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).UpdateNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/UpdateNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).UpdateNodePool(ctx, req.(*UpdateNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNodePoolAutoscaling_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodePoolAutoscalingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, req.(*SetNodePoolAutoscalingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLoggingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLoggingServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLoggingService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLoggingService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLoggingService(ctx, req.(*SetLoggingServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetMonitoringService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetMonitoringServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetMonitoringService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetMonitoringService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetMonitoringService(ctx, req.(*SetMonitoringServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetAddonsConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetAddonsConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetAddonsConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetAddonsConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetAddonsConfig(ctx, req.(*SetAddonsConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLocationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLocations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLocations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLocations(ctx, req.(*SetLocationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_UpdateMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateMasterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).UpdateMaster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/UpdateMaster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).UpdateMaster(ctx, req.(*UpdateMasterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetMasterAuth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetMasterAuthRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetMasterAuth(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetMasterAuth", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetMasterAuth(ctx, req.(*SetMasterAuthRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).DeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/DeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListOperationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListOperations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListOperations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListOperations(ctx, req.(*ListOperationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetOperation(ctx, req.(*GetOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CancelOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CancelOperation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CancelOperation(ctx, req.(*CancelOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetServerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServerConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetServerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetServerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetServerConfig(ctx, req.(*GetServerConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_ListNodePools_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodePoolsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListNodePools(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListNodePools", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListNodePools(ctx, req.(*ListNodePoolsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_GetNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).GetNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/GetNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).GetNodePool(ctx, req.(*GetNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CreateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CreateNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CreateNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CreateNodePool(ctx, req.(*CreateNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_DeleteNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNodePoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).DeleteNodePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/DeleteNodePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).DeleteNodePool(ctx, req.(*DeleteNodePoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_RollbackNodePoolUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackNodePoolUpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, req.(*RollbackNodePoolUpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNodePoolManagement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodePoolManagementRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNodePoolManagement", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, req.(*SetNodePoolManagementRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLabels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLabelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLabels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLabels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLabels(ctx, req.(*SetLabelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetLegacyAbac_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetLegacyAbacRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetLegacyAbac(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetLegacyAbac", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetLegacyAbac(ctx, req.(*SetLegacyAbacRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_StartIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartIPRotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).StartIPRotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/StartIPRotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).StartIPRotation(ctx, req.(*StartIPRotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_CompleteIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompleteIPRotationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).CompleteIPRotation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/CompleteIPRotation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).CompleteIPRotation(ctx, req.(*CompleteIPRotationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNodePoolSize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNodePoolSizeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNodePoolSize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNodePoolSize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNodePoolSize(ctx, req.(*SetNodePoolSizeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetNetworkPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetNetworkPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetNetworkPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, req.(*SetNetworkPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_SetMaintenancePolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetMaintenancePolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).SetMaintenancePolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/SetMaintenancePolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).SetMaintenancePolicy(ctx, req.(*SetMaintenancePolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterManager_ListUsableSubnetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsableSubnetworksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterManagerServer).ListUsableSubnetworks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.container.v1.ClusterManager/ListUsableSubnetworks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterManagerServer).ListUsableSubnetworks(ctx, req.(*ListUsableSubnetworksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.container.v1.ClusterManager", + HandlerType: (*ClusterManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListClusters", + Handler: _ClusterManager_ListClusters_Handler, + }, + { + MethodName: "GetCluster", + Handler: _ClusterManager_GetCluster_Handler, + }, + { + MethodName: "CreateCluster", + Handler: _ClusterManager_CreateCluster_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _ClusterManager_UpdateCluster_Handler, + }, + { + MethodName: "UpdateNodePool", + Handler: _ClusterManager_UpdateNodePool_Handler, + }, + { + MethodName: "SetNodePoolAutoscaling", + Handler: _ClusterManager_SetNodePoolAutoscaling_Handler, + }, + { + MethodName: "SetLoggingService", + Handler: _ClusterManager_SetLoggingService_Handler, + }, + { + MethodName: "SetMonitoringService", + Handler: _ClusterManager_SetMonitoringService_Handler, + }, + { + MethodName: "SetAddonsConfig", + Handler: _ClusterManager_SetAddonsConfig_Handler, + }, + { + MethodName: "SetLocations", + Handler: _ClusterManager_SetLocations_Handler, + }, + { + MethodName: "UpdateMaster", + Handler: _ClusterManager_UpdateMaster_Handler, + }, + { + MethodName: "SetMasterAuth", + Handler: _ClusterManager_SetMasterAuth_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _ClusterManager_DeleteCluster_Handler, + }, + { + MethodName: "ListOperations", + Handler: _ClusterManager_ListOperations_Handler, + }, + { + MethodName: "GetOperation", + Handler: _ClusterManager_GetOperation_Handler, + }, + { + MethodName: "CancelOperation", + Handler: _ClusterManager_CancelOperation_Handler, + }, + { + MethodName: "GetServerConfig", + Handler: _ClusterManager_GetServerConfig_Handler, + }, + { + MethodName: "ListNodePools", + Handler: _ClusterManager_ListNodePools_Handler, + }, + { + MethodName: "GetNodePool", + Handler: _ClusterManager_GetNodePool_Handler, + }, + { + MethodName: "CreateNodePool", + Handler: _ClusterManager_CreateNodePool_Handler, + }, + { + MethodName: "DeleteNodePool", + Handler: _ClusterManager_DeleteNodePool_Handler, + }, + { + MethodName: "RollbackNodePoolUpgrade", + Handler: _ClusterManager_RollbackNodePoolUpgrade_Handler, + }, + { + MethodName: "SetNodePoolManagement", + Handler: _ClusterManager_SetNodePoolManagement_Handler, + }, + { + MethodName: "SetLabels", + Handler: _ClusterManager_SetLabels_Handler, + }, + { + MethodName: "SetLegacyAbac", + Handler: _ClusterManager_SetLegacyAbac_Handler, + }, + { + MethodName: "StartIPRotation", + Handler: _ClusterManager_StartIPRotation_Handler, + }, + { + MethodName: "CompleteIPRotation", + Handler: _ClusterManager_CompleteIPRotation_Handler, + }, + { + MethodName: "SetNodePoolSize", + Handler: _ClusterManager_SetNodePoolSize_Handler, + }, + { + MethodName: "SetNetworkPolicy", + Handler: _ClusterManager_SetNetworkPolicy_Handler, + }, + { + MethodName: "SetMaintenancePolicy", + Handler: _ClusterManager_SetMaintenancePolicy_Handler, + }, + { + MethodName: "ListUsableSubnetworks", + Handler: _ClusterManager_ListUsableSubnetworks_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/container/v1/cluster_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go b/test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go new file mode 100644 index 0000000000..646304e8b9 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go @@ -0,0 +1,1274 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v2/trace.proto + +package cloudtrace + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 3, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Often, a trace contains a root span +// that describes the end-to-end latency, and one or more subspans for +// its sub-operations. A trace can also contain multiple root spans, +// or none at all. Spans do not need to be contiguous—there may be +// gaps or overlaps between spans in a trace. +type Span struct { + // The resource name of the span in the following format: + // + // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // [TRACE_ID] is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // [SPAN_ID] is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The [SPAN_ID] portion of the span's resource name. + SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The [SPAN_ID] of this span's parent span. If this is a root span, + // then this field must be empty. + ParentSpanId string `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation (up to 128 bytes). + // Stackdriver Trace displays the description in the + // Google Cloud Platform Console. + // For example, the display name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name within an application and at the same call point. + // This makes it easier to correlate spans in different traces. + DisplayName *TruncatableString `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. You can have up to 32 attributes per + // span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // Stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // A set of time events. You can have up to 32 annotations and 128 message + // events per span. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // Links associated with the span. You can have up to 128 links per Span. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // Optional. The final status for this span. + Status *status.Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // Optional. Set this parameter to indicate whether this span is in + // the same process as its parent. If you do not set this parameter, + // Stackdriver Trace is unable to take advantage of this helpful + // information. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // Optional. The number of child spans that were generated while this span + // was active. If set, allows implementation to detect missing child spans. + ChildSpanCount *wrappers.Int32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetSpanId() string { + if m != nil { + return m.SpanId + } + return "" +} + +func (m *Span) GetParentSpanId() string { + if m != nil { + return m.ParentSpanId + } + return "" +} + +func (m *Span) GetDisplayName() *TruncatableString { + if m != nil { + return m.DisplayName + } + return nil +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.Int32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// A set of attributes, each in the format `[KEY]:[VALUE]`. +type Span_Attributes struct { + // The set of attributes. Each attribute's key can be up to 128 bytes + // long. The value can be a string up to 256 bytes, a signed 64-bit integer, + // or the Boolean values `true` and `false`. For example: + // + // "/instance_id": "my-instance" + // "/http/user_agent": "" + // "/http/request_bytes": 300 + // "abc.com/myattribute": true + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0 then all attributes are valid. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 0} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The timestamp indicating the time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +// Text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. The maximum length for + // the description is 256 bytes. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. You can have up to 4 attributes + // per Annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // Type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. It is recommended to be unique within + // a Span. + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSizeBytes int64 `protobuf:"varint,3,opt,name=uncompressed_size_bytes,json=uncompressedSizeBytes,proto3" json:"uncompressed_size_bytes,omitempty"` + // The number of compressed bytes sent or received. If missing assumed to + // be the same size as uncompressed. + CompressedSizeBytes int64 `protobuf:"varint,4,opt,name=compressed_size_bytes,json=compressedSizeBytes,proto3" json:"compressed_size_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSizeBytes() int64 { + if m != nil { + return m.UncompressedSizeBytes + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSizeBytes() int64 { + if m != nil { + return m.CompressedSizeBytes + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key:value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 2} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // The [TRACE_ID] for a trace within a project. + TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // The [SPAN_ID] for a span within a trace. + SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. You have have up to 32 attributes per + // link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 3} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *Span_Link) GetSpanId() string { + if m != nil { + return m.SpanId + } + return "" +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 4} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{1} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + } +} + +// A call stack appearing in a trace. +type StackTrace struct { + // Stack frames in this stack trace. A maximum of 128 frames are allowed. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both the + // `stackFrame` content and a value in `stackTraceHashId`. + // + // Subsequent spans within the same request can refer + // to that stack trace by only setting `stackTraceHashId`. + StackTraceHashId int64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{2} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() int64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// Represents a single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame (up to 1024 bytes). + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully-qualified (up to 1024 bytes). + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears (up to 256 + // bytes). + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code (up to 128 bytes). + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{2, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{2, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// Binary module. +type Module struct { + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so (up to 256 bytes). + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents (up to 128 bytes). + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{3} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// Represents a string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string is 500 + // bytes long and the limit of the string is 128 bytes, then + // `value` contains the first 128 bytes of the 500-byte string. + // + // Truncation always happens on a UTF8 character boundary. If there + // are multi-byte characters in the string, then the length of the + // shortened string might be less than the size limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{4} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "google.devtools.cloudtrace.v2.Span") + proto.RegisterType((*Span_Attributes)(nil), "google.devtools.cloudtrace.v2.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "google.devtools.cloudtrace.v2.Span.Link") + proto.RegisterType((*Span_Links)(nil), "google.devtools.cloudtrace.v2.Span.Links") + proto.RegisterType((*AttributeValue)(nil), "google.devtools.cloudtrace.v2.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "google.devtools.cloudtrace.v2.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "google.devtools.cloudtrace.v2.Module") + proto.RegisterType((*TruncatableString)(nil), "google.devtools.cloudtrace.v2.TruncatableString") +} + +func init() { + proto.RegisterFile("google/devtools/cloudtrace/v2/trace.proto", fileDescriptor_29869cc16dc8ce61) +} + +var fileDescriptor_29869cc16dc8ce61 = []byte{ + // 1521 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, + 0x16, 0x36, 0xf5, 0xb0, 0xa5, 0x23, 0xd9, 0x90, 0xc7, 0x76, 0xac, 0x28, 0xcf, 0xeb, 0x7b, 0x2f, + 0xe0, 0x2c, 0x4c, 0x06, 0x4a, 0xee, 0x45, 0xe0, 0x7b, 0xd1, 0x54, 0xb6, 0xe5, 0x48, 0x89, 0xad, + 0x0a, 0x94, 0xe3, 0xb6, 0x69, 0x0a, 0x62, 0x44, 0x8e, 0x65, 0x36, 0x14, 0x49, 0x70, 0x48, 0x17, + 0x4e, 0x90, 0x45, 0xd7, 0x5d, 0x77, 0x53, 0xa0, 0xe8, 0xb2, 0x40, 0x56, 0xf9, 0x1d, 0x5d, 0x74, + 0xdb, 0x6d, 0xd7, 0xfd, 0x03, 0xdd, 0x16, 0xf3, 0xe0, 0x43, 0x79, 0xd9, 0x56, 0x56, 0xe2, 0x9c, + 0x73, 0xbe, 0x6f, 0xe6, 0xcc, 0x9c, 0x97, 0x0d, 0xb7, 0x46, 0x9e, 0x37, 0x72, 0x88, 0x66, 0x91, + 0x93, 0xd0, 0xf3, 0x1c, 0xaa, 0x99, 0x8e, 0x17, 0x59, 0x61, 0x80, 0x4d, 0xa2, 0x9d, 0x34, 0x35, + 0xfe, 0xa1, 0xfa, 0x81, 0x17, 0x7a, 0xe8, 0x9a, 0x30, 0x55, 0x63, 0x53, 0x35, 0x35, 0x55, 0x4f, + 0x9a, 0x8d, 0xab, 0x92, 0x09, 0xfb, 0xb6, 0x86, 0x5d, 0xd7, 0x0b, 0x71, 0x68, 0x7b, 0x2e, 0x15, + 0xe0, 0xc6, 0x8d, 0x8c, 0xf6, 0xc8, 0x26, 0x8e, 0x65, 0x0c, 0xc9, 0x31, 0x3e, 0xb1, 0xbd, 0x40, + 0x1a, 0x5c, 0xce, 0x18, 0x04, 0x84, 0x7a, 0x51, 0x10, 0x6f, 0x9c, 0x60, 0xf9, 0x6a, 0x18, 0x1d, + 0x69, 0xa1, 0x3d, 0x26, 0x34, 0xc4, 0x63, 0x5f, 0x1a, 0x5c, 0x7f, 0xd3, 0xe0, 0xdb, 0x00, 0xfb, + 0x3e, 0x09, 0xe2, 0xcd, 0x57, 0xa5, 0x3e, 0xf0, 0x4d, 0x8d, 0x86, 0x38, 0x8c, 0xa4, 0x62, 0xed, + 0xaf, 0x25, 0x28, 0x0c, 0x7c, 0xec, 0xa2, 0x55, 0x28, 0xb8, 0x78, 0x4c, 0xea, 0xca, 0x4d, 0x65, + 0xbd, 0xbc, 0x95, 0xff, 0xa3, 0x95, 0xd3, 0xb9, 0x00, 0x5d, 0x85, 0x39, 0xea, 0x63, 0xd7, 0xb0, + 0xad, 0x7a, 0x2e, 0xd5, 0xcd, 0x32, 0x59, 0xd7, 0x42, 0xff, 0x82, 0x05, 0x1f, 0x07, 0xc4, 0x0d, + 0x8d, 0xd8, 0x28, 0xcf, 0x8c, 0xf4, 0xaa, 0x90, 0x0e, 0x84, 0xd5, 0x17, 0x50, 0xb5, 0x6c, 0xea, + 0x3b, 0xf8, 0xd4, 0xe0, 0x9b, 0x14, 0x6e, 0x2a, 0xeb, 0x95, 0xe6, 0x6d, 0xf5, 0x83, 0xf7, 0xa9, + 0x1e, 0x04, 0x91, 0x6b, 0xe2, 0x10, 0x0f, 0x1d, 0x32, 0x08, 0x03, 0xdb, 0x1d, 0x89, 0xad, 0x2b, + 0x92, 0xaa, 0xc7, 0x4e, 0xf7, 0x09, 0x00, 0x0d, 0x71, 0x10, 0x1a, 0xec, 0x46, 0xea, 0x45, 0xce, + 0xdb, 0x88, 0x79, 0xe3, 0xdb, 0x50, 0x0f, 0xe2, 0xeb, 0x12, 0x0c, 0x65, 0x0e, 0x61, 0x42, 0xb4, + 0x09, 0x25, 0xe2, 0x5a, 0x02, 0x3d, 0x7b, 0x3e, 0xf4, 0x1c, 0x71, 0x2d, 0x8e, 0xed, 0x01, 0xe0, + 0x30, 0x0c, 0xec, 0x61, 0x14, 0x12, 0x5a, 0x9f, 0xe3, 0x68, 0xf5, 0x0c, 0x9f, 0xd8, 0x85, 0xa8, + 0xad, 0x04, 0xa5, 0x67, 0x18, 0xd0, 0x43, 0xa8, 0xd0, 0x10, 0x9b, 0xcf, 0x0c, 0x6e, 0x5d, 0x2f, + 0x71, 0xc2, 0x5b, 0x67, 0x11, 0x32, 0xc4, 0x01, 0x5b, 0xe9, 0x40, 0x93, 0x6f, 0xf4, 0x19, 0x54, + 0x98, 0x4f, 0x06, 0x39, 0x21, 0x6e, 0x48, 0xeb, 0xe5, 0xf3, 0x1f, 0x8e, 0xb9, 0xd6, 0xe6, 0x28, + 0x1d, 0xc2, 0xe4, 0x1b, 0xdd, 0x87, 0xa2, 0x63, 0xbb, 0xcf, 0x68, 0x1d, 0xce, 0x77, 0x2c, 0x46, + 0xb5, 0xc7, 0x00, 0xba, 0xc0, 0x21, 0x0d, 0x66, 0x45, 0xe4, 0xd5, 0x2b, 0x9c, 0x01, 0xc5, 0x0c, + 0x81, 0x6f, 0x32, 0x2f, 0xc2, 0x88, 0xb2, 0xfb, 0x55, 0x74, 0x69, 0x86, 0xbe, 0x86, 0x2b, 0x14, + 0x8f, 0x89, 0xe1, 0x07, 0x9e, 0x49, 0x28, 0x35, 0x30, 0x35, 0x32, 0xa1, 0x56, 0xaf, 0xbe, 0xe7, + 0xb5, 0xb6, 0x3c, 0xcf, 0x39, 0xc4, 0x4e, 0x44, 0x04, 0xdb, 0x2a, 0xe3, 0xe8, 0x0b, 0x8a, 0x16, + 0xed, 0x27, 0x51, 0x89, 0x1e, 0x42, 0xcd, 0x3c, 0xb6, 0x1d, 0x4b, 0x04, 0xae, 0xe9, 0x45, 0x6e, + 0x58, 0x9f, 0xe7, 0x9c, 0x57, 0xde, 0xe2, 0xec, 0xba, 0xe1, 0x9d, 0x66, 0x86, 0x74, 0x81, 0x23, + 0x19, 0xcd, 0x36, 0xc3, 0x35, 0x7e, 0xca, 0x01, 0xa4, 0x8f, 0x8a, 0x08, 0xcc, 0x27, 0xcf, 0x6a, + 0x8c, 0xb1, 0x5f, 0x57, 0x6e, 0xe6, 0xd7, 0x2b, 0xcd, 0x4f, 0x2f, 0x16, 0x1b, 0xe9, 0xe7, 0x3e, + 0xf6, 0xdb, 0x6e, 0x18, 0x9c, 0xea, 0x55, 0x9c, 0x11, 0xa1, 0x7b, 0x50, 0xb7, 0x02, 0xcf, 0xf7, + 0x89, 0x65, 0xa4, 0x51, 0x24, 0x3d, 0x61, 0xa9, 0x5a, 0xd4, 0x2f, 0x49, 0x7d, 0x4a, 0x2a, 0xce, + 0xeb, 0xc2, 0xe2, 0x5b, 0xe4, 0xa8, 0x06, 0xf9, 0x67, 0xe4, 0x54, 0x14, 0x00, 0x9d, 0x7d, 0xa2, + 0x6d, 0x28, 0x9e, 0x30, 0xa7, 0x39, 0x5b, 0xa5, 0xb9, 0x71, 0xc6, 0xf9, 0x13, 0x4a, 0x7e, 0x53, + 0xba, 0xc0, 0x6e, 0xe6, 0xee, 0x29, 0x8d, 0xdf, 0x8a, 0x50, 0x4e, 0xe2, 0x0a, 0xa9, 0x50, 0xe0, + 0xf9, 0xa6, 0x9c, 0x95, 0x6f, 0x3a, 0xb7, 0x43, 0x4f, 0x00, 0xd2, 0x72, 0x2a, 0xcf, 0x72, 0xef, + 0x42, 0xa1, 0xac, 0xb6, 0x12, 0x7c, 0x67, 0x46, 0xcf, 0xb0, 0x21, 0x0c, 0xf3, 0x63, 0x42, 0x29, + 0x1e, 0xc9, 0x54, 0xe1, 0xe5, 0xab, 0xd2, 0xdc, 0xbc, 0x18, 0xfd, 0xbe, 0xa0, 0xe0, 0x8b, 0xce, + 0x8c, 0x5e, 0x1d, 0x67, 0xd6, 0x8d, 0xd7, 0x0a, 0x40, 0xba, 0x3f, 0xd2, 0xa1, 0x62, 0x11, 0x6a, + 0x06, 0xb6, 0xcf, 0xdd, 0x51, 0xa6, 0x2b, 0x85, 0x7a, 0x96, 0xe4, 0x8d, 0x4a, 0x94, 0xfb, 0xd8, + 0x4a, 0xd4, 0xf8, 0x21, 0x07, 0xd5, 0xac, 0x4f, 0x68, 0x00, 0x85, 0xf0, 0xd4, 0x17, 0x4f, 0xb6, + 0xd0, 0xbc, 0x3f, 0xfd, 0xed, 0xa8, 0x07, 0xa7, 0x3e, 0xd1, 0x39, 0x19, 0x5a, 0x80, 0x9c, 0x6c, + 0x2a, 0x79, 0x3d, 0x67, 0x5b, 0xe8, 0xbf, 0xb0, 0x1a, 0xb9, 0xa6, 0x37, 0xf6, 0x03, 0x42, 0x29, + 0xb1, 0x0c, 0x6a, 0x3f, 0x27, 0xc6, 0xf0, 0x94, 0xb9, 0x94, 0xe7, 0x46, 0x2b, 0x59, 0xf5, 0xc0, + 0x7e, 0x4e, 0xb6, 0x98, 0x12, 0x35, 0x61, 0xe5, 0xdd, 0xa8, 0x02, 0x47, 0x2d, 0xbd, 0x03, 0xb3, + 0x76, 0x17, 0x0a, 0xec, 0x24, 0x68, 0x19, 0x6a, 0x07, 0x5f, 0xf6, 0xdb, 0xc6, 0xe3, 0xde, 0xa0, + 0xdf, 0xde, 0xee, 0xee, 0x76, 0xdb, 0x3b, 0xb5, 0x19, 0x54, 0x82, 0xc2, 0xa0, 0xdd, 0x3b, 0xa8, + 0x29, 0xa8, 0x0a, 0x25, 0xbd, 0xbd, 0xdd, 0xee, 0x1e, 0xb6, 0x77, 0x6a, 0xb9, 0xad, 0x39, 0x99, + 0x10, 0x8d, 0xdf, 0x15, 0x80, 0xb4, 0x50, 0xa2, 0x3d, 0x80, 0xb4, 0xda, 0xca, 0x6c, 0xdf, 0xb8, + 0xd0, 0x25, 0xe9, 0xe5, 0xa4, 0xd6, 0xa2, 0x4d, 0xb8, 0x9c, 0xe4, 0x75, 0x3a, 0x46, 0x4c, 0x24, + 0xf6, 0x6a, 0x9c, 0xd8, 0xa9, 0x9e, 0x67, 0x36, 0xba, 0x0f, 0x57, 0x63, 0xec, 0x44, 0x5c, 0xc7, + 0xf0, 0x3c, 0x87, 0xc7, 0xfc, 0xd9, 0x97, 0x91, 0xa5, 0xe1, 0xc7, 0x1c, 0x14, 0x58, 0xdd, 0x46, + 0x97, 0xa1, 0xc4, 0xcf, 0xca, 0x7a, 0xba, 0xa8, 0x09, 0x73, 0x7c, 0xdd, 0xb5, 0xd0, 0xea, 0x1b, + 0x23, 0x41, 0x32, 0x0d, 0xb4, 0x64, 0x98, 0xe4, 0x79, 0x98, 0x6c, 0x9c, 0xb7, 0x47, 0x64, 0x83, + 0x62, 0x32, 0x94, 0x0b, 0x1f, 0x1b, 0xca, 0x6b, 0x8f, 0x3e, 0xf8, 0xd0, 0x2b, 0xb0, 0xb8, 0xdd, + 0xe9, 0xee, 0xed, 0x18, 0x7b, 0xdd, 0xde, 0xa3, 0xf6, 0x8e, 0x31, 0xe8, 0xb7, 0x7a, 0x35, 0x05, + 0x5d, 0x02, 0xd4, 0x6f, 0xe9, 0xed, 0xde, 0xc1, 0x84, 0x3c, 0xd7, 0x88, 0xa0, 0xc8, 0x7b, 0x1a, + 0xfa, 0x3f, 0x14, 0x58, 0x57, 0x93, 0x4f, 0xbd, 0x7e, 0x5e, 0x47, 0x75, 0x8e, 0x42, 0x2a, 0x2c, + 0xc5, 0x8f, 0xc4, 0x7b, 0xe3, 0xc4, 0xd3, 0x2e, 0x4a, 0x15, 0xdf, 0x88, 0xbf, 0xc9, 0xe6, 0xe0, + 0xcf, 0x56, 0x1f, 0xae, 0x67, 0x38, 0xc5, 0x76, 0xd8, 0xb7, 0xa9, 0x6a, 0x7a, 0x63, 0x8d, 0xf7, + 0x33, 0x56, 0x3f, 0xbf, 0x21, 0x66, 0x48, 0xb5, 0x17, 0xf2, 0xeb, 0xa5, 0x18, 0x5e, 0xa9, 0xf6, + 0x82, 0xff, 0xbe, 0xd4, 0xd8, 0x2b, 0x51, 0xed, 0x05, 0xfb, 0x79, 0xb9, 0xf6, 0x5a, 0x81, 0x85, + 0xc9, 0x8a, 0x8d, 0x1e, 0x43, 0x95, 0xf2, 0xea, 0x62, 0x88, 0xb2, 0x3f, 0x65, 0x6d, 0xea, 0xcc, + 0xe8, 0x15, 0xc1, 0x23, 0x68, 0xaf, 0x41, 0xd9, 0x76, 0x43, 0x23, 0x6d, 0x25, 0xf9, 0xce, 0x8c, + 0x5e, 0xb2, 0xdd, 0x50, 0xa8, 0x6f, 0x00, 0x0c, 0x3d, 0xcf, 0x91, 0x7a, 0x16, 0x3a, 0xa5, 0xce, + 0x8c, 0x5e, 0x1e, 0x26, 0x7d, 0x3c, 0xce, 0xba, 0xb5, 0x5f, 0x66, 0x01, 0xd2, 0x79, 0x87, 0x4d, + 0x95, 0x62, 0x5e, 0x3a, 0x0a, 0xf0, 0x98, 0x50, 0x79, 0xdc, 0xff, 0x9c, 0x7b, 0x60, 0x12, 0x9f, + 0xbb, 0x1c, 0xac, 0x8b, 0xd1, 0x4b, 0x2c, 0xd0, 0x06, 0x2c, 0x65, 0x26, 0x31, 0xe3, 0x18, 0xd3, + 0x63, 0x23, 0x29, 0x55, 0xb5, 0x74, 0xcc, 0xea, 0x60, 0x7a, 0xdc, 0xb5, 0x1a, 0xdf, 0x15, 0xe4, + 0xb9, 0x38, 0x1c, 0x3d, 0x86, 0xf9, 0xa3, 0xc8, 0x35, 0x59, 0x56, 0x1a, 0xc9, 0x4c, 0x3d, 0x4d, + 0x8d, 0xaf, 0xc6, 0x34, 0x7c, 0xd4, 0x3d, 0x82, 0x4b, 0x5e, 0x60, 0x8f, 0x6c, 0x17, 0x3b, 0xc6, + 0x24, 0x7f, 0x6e, 0x4a, 0xfe, 0xe5, 0x98, 0x6f, 0x37, 0xbb, 0xcf, 0x3e, 0x94, 0x8f, 0x6c, 0x87, + 0x08, 0xea, 0xfc, 0x94, 0xd4, 0x25, 0x46, 0xc1, 0xe9, 0x6e, 0x40, 0xc5, 0xb1, 0x5d, 0x62, 0xb8, + 0xd1, 0x78, 0x48, 0x02, 0x59, 0x93, 0x81, 0x89, 0x7a, 0x5c, 0x82, 0xfe, 0x09, 0xf3, 0xa6, 0xe7, + 0x44, 0x63, 0x37, 0x36, 0x29, 0x72, 0x93, 0xaa, 0x10, 0x4a, 0xa3, 0x5d, 0xa8, 0x38, 0x1e, 0xb6, + 0x8c, 0xb1, 0x67, 0x45, 0x4e, 0x3c, 0xaa, 0xff, 0xfb, 0x8c, 0x63, 0xed, 0x73, 0x63, 0x1d, 0x18, + 0x52, 0x7c, 0xa3, 0xcf, 0x61, 0x41, 0xfc, 0x65, 0x65, 0x9c, 0x90, 0x80, 0xb2, 0x06, 0x3c, 0x37, + 0xa5, 0x87, 0xf3, 0x82, 0xe7, 0x50, 0xd0, 0x34, 0xbe, 0x57, 0xa0, 0x92, 0x89, 0x27, 0xf4, 0x10, + 0x8a, 0x3c, 0x2c, 0x65, 0x89, 0xb8, 0x3b, 0x4d, 0x54, 0xea, 0x82, 0x02, 0xdd, 0x86, 0xe5, 0xb8, + 0x5e, 0x88, 0x50, 0x9f, 0x28, 0x18, 0x48, 0xea, 0xc4, 0xc6, 0xbc, 0x62, 0xac, 0xfd, 0xac, 0xc0, + 0xac, 0xf4, 0xb8, 0x03, 0xb3, 0xf2, 0xd2, 0xa6, 0x0d, 0x43, 0x89, 0x47, 0x8f, 0xa0, 0x34, 0x8c, + 0xd8, 0xc4, 0x2c, 0x53, 0x61, 0x1a, 0xae, 0x39, 0xce, 0xd0, 0xb5, 0xd6, 0xbe, 0x82, 0xc5, 0xb7, + 0xb4, 0x68, 0x39, 0x1e, 0x38, 0x45, 0xc3, 0x11, 0x0b, 0xe6, 0x7e, 0x28, 0x4c, 0x89, 0xc5, 0x3b, + 0xfb, 0xa4, 0xfb, 0x89, 0x8e, 0x75, 0x76, 0xee, 0xfe, 0xd6, 0x2b, 0x05, 0xfe, 0x61, 0x7a, 0xe3, + 0x0f, 0x9f, 0x6e, 0x0b, 0xf8, 0x7d, 0xf7, 0xd9, 0xd8, 0xd9, 0x57, 0x9e, 0x3c, 0x90, 0xc6, 0x23, + 0xcf, 0xc1, 0xee, 0x48, 0xf5, 0x82, 0x91, 0x36, 0x22, 0x2e, 0x1f, 0x4a, 0xb5, 0xb4, 0xde, 0xbe, + 0xe7, 0xdf, 0x04, 0xff, 0x4b, 0x57, 0xaf, 0x72, 0x2b, 0x0f, 0x04, 0xd3, 0x36, 0x93, 0xa9, 0xe2, + 0x51, 0x0f, 0x9b, 0xbf, 0xc6, 0xf2, 0xa7, 0x5c, 0xfe, 0x94, 0xcb, 0x9f, 0x1e, 0x36, 0x87, 0xb3, + 0x7c, 0x8f, 0x3b, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x31, 0x4c, 0x19, 0x89, 0x10, 0x00, + 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go b/test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go new file mode 100644 index 0000000000..a87b00d514 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go @@ -0,0 +1,249 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v2/tracing.proto + +package cloudtrace + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "github.com/golang/protobuf/ptypes/timestamp" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The request message for the `BatchWriteSpans` method. +type BatchWriteSpansRequest struct { + // Required. The name of the project where the spans belong. The format is + // `projects/[PROJECT_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. A list of new spans. The span names must not match existing + // spans, or the results are undefined. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchWriteSpansRequest) Reset() { *m = BatchWriteSpansRequest{} } +func (m *BatchWriteSpansRequest) String() string { return proto.CompactTextString(m) } +func (*BatchWriteSpansRequest) ProtoMessage() {} +func (*BatchWriteSpansRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_d1f9b588db05fdc6, []int{0} +} + +func (m *BatchWriteSpansRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchWriteSpansRequest.Unmarshal(m, b) +} +func (m *BatchWriteSpansRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchWriteSpansRequest.Marshal(b, m, deterministic) +} +func (m *BatchWriteSpansRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchWriteSpansRequest.Merge(m, src) +} +func (m *BatchWriteSpansRequest) XXX_Size() int { + return xxx_messageInfo_BatchWriteSpansRequest.Size(m) +} +func (m *BatchWriteSpansRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BatchWriteSpansRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchWriteSpansRequest proto.InternalMessageInfo + +func (m *BatchWriteSpansRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BatchWriteSpansRequest) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +func init() { + proto.RegisterType((*BatchWriteSpansRequest)(nil), "google.devtools.cloudtrace.v2.BatchWriteSpansRequest") +} + +func init() { + proto.RegisterFile("google/devtools/cloudtrace/v2/tracing.proto", fileDescriptor_d1f9b588db05fdc6) +} + +var fileDescriptor_d1f9b588db05fdc6 = []byte{ + // 534 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0x13, 0x3d, + 0x10, 0xc7, 0xb5, 0xc9, 0xf7, 0x21, 0x61, 0x90, 0x90, 0x56, 0xa2, 0xb4, 0x01, 0x44, 0x09, 0x12, + 0x94, 0x24, 0xb5, 0xc5, 0x56, 0x5c, 0x82, 0x00, 0x6d, 0x2a, 0x94, 0x6b, 0x94, 0xa2, 0x22, 0x41, + 0x24, 0xe4, 0x6c, 0x26, 0x1b, 0xa3, 0x5d, 0xdb, 0xd8, 0xce, 0x46, 0x80, 0x7a, 0xe1, 0xc6, 0x99, + 0x27, 0xe8, 0x15, 0xf1, 0x04, 0x3c, 0x42, 0x8f, 0x70, 0xe3, 0xd4, 0x03, 0x4f, 0xc1, 0x09, 0xad, + 0xbd, 0xdb, 0x84, 0xa8, 0x34, 0xbd, 0xad, 0x67, 0xfe, 0x9e, 0xf9, 0xcd, 0xfc, 0xbd, 0xa8, 0x19, + 0x0b, 0x11, 0x27, 0x40, 0x46, 0x90, 0x19, 0x21, 0x12, 0x4d, 0xa2, 0x44, 0x4c, 0x47, 0x46, 0xd1, + 0x08, 0x48, 0x16, 0x90, 0xfc, 0x83, 0xf1, 0x18, 0x4b, 0x25, 0x8c, 0xf0, 0x6f, 0x3a, 0x31, 0x2e, + 0xc5, 0x78, 0x2e, 0xc6, 0x59, 0x50, 0xbb, 0x51, 0xd4, 0xa2, 0x92, 0x11, 0xca, 0xb9, 0x30, 0xd4, + 0x30, 0xc1, 0xb5, 0xbb, 0x5c, 0xbb, 0xb6, 0x90, 0x8d, 0x12, 0x06, 0xdc, 0x14, 0x89, 0x5b, 0x0b, + 0x89, 0x31, 0x83, 0x64, 0xf4, 0x7a, 0x08, 0x13, 0x9a, 0x31, 0xa1, 0x0a, 0xc1, 0xc6, 0x82, 0x40, + 0x81, 0x16, 0x53, 0x15, 0x41, 0x91, 0xba, 0xbf, 0x1a, 0xbf, 0x94, 0x5e, 0x2f, 0xa4, 0xf6, 0x34, + 0x9c, 0x8e, 0x09, 0xa4, 0xd2, 0xbc, 0x5b, 0x62, 0x38, 0x49, 0x1a, 0x96, 0x82, 0x36, 0x34, 0x95, + 0x4e, 0x50, 0x3f, 0xf4, 0xd0, 0x5a, 0x87, 0x9a, 0x68, 0xf2, 0x42, 0x31, 0x03, 0x7b, 0x92, 0x72, + 0xdd, 0x87, 0xb7, 0x53, 0xd0, 0xc6, 0xef, 0xa2, 0xff, 0x38, 0x4d, 0x61, 0xdd, 0xdb, 0xf4, 0xb6, + 0x2e, 0x76, 0x76, 0x8e, 0xc3, 0xca, 0xef, 0x70, 0x1b, 0x35, 0x2d, 0x49, 0x89, 0x9b, 0x52, 0x4e, + 0x63, 0x50, 0xd8, 0xb5, 0xa1, 0x92, 0x69, 0x1c, 0x89, 0x94, 0xf4, 0x94, 0x78, 0x03, 0x91, 0xe9, + 0xdb, 0x02, 0xfe, 0x13, 0xf4, 0xbf, 0xce, 0x0b, 0xaf, 0x57, 0x36, 0xab, 0x5b, 0x97, 0x82, 0x3b, + 0xf8, 0xcc, 0x75, 0xe3, 0x1c, 0xa2, 0x53, 0x3d, 0x0e, 0x2b, 0x7d, 0x77, 0x2d, 0xf8, 0x56, 0x45, + 0x97, 0x9f, 0xe7, 0xd9, 0x3d, 0x50, 0x19, 0x8b, 0xc0, 0x3f, 0xf4, 0xd0, 0x95, 0x25, 0x68, 0xff, + 0xe1, 0x8a, 0xaa, 0xa7, 0x0f, 0x59, 0x5b, 0x2b, 0xaf, 0x95, 0x1b, 0xc2, 0xcf, 0xf2, 0xf5, 0xd5, + 0x9f, 0xfe, 0x0c, 0x51, 0x0e, 0xdf, 0xb2, 0x08, 0x1f, 0x7f, 0xfc, 0xfa, 0x5c, 0x69, 0xd5, 0xef, + 0xe5, 0xbb, 0xff, 0x90, 0x87, 0x1f, 0x4b, 0x37, 0xa1, 0x26, 0x8d, 0x03, 0xe7, 0x86, 0x6e, 0x0f, + 0x4f, 0x3a, 0xb4, 0xbd, 0x86, 0xff, 0xc9, 0x43, 0x68, 0x57, 0x01, 0x75, 0xfd, 0xfc, 0xf3, 0x0c, + 0x5d, 0x3b, 0x8f, 0xa8, 0xfe, 0xc0, 0xc2, 0x34, 0xeb, 0x77, 0x4f, 0x83, 0x29, 0x58, 0x48, 0x83, + 0x58, 0x6e, 0xd2, 0x38, 0x68, 0x7b, 0x8d, 0xda, 0xfb, 0xa3, 0x70, 0x63, 0xa1, 0xd2, 0xdf, 0x66, + 0x7d, 0x0f, 0x5f, 0x4d, 0x8c, 0x91, 0xba, 0x4d, 0xc8, 0x6c, 0x36, 0x5b, 0x76, 0x92, 0x4e, 0xcd, + 0xc4, 0x3d, 0xbe, 0x6d, 0x99, 0x50, 0x33, 0x16, 0x2a, 0x6d, 0xad, 0x92, 0xbb, 0x2e, 0x54, 0x4a, + 0xe0, 0xa3, 0xce, 0x57, 0x0f, 0xdd, 0x8e, 0x44, 0x7a, 0xf6, 0x64, 0x1d, 0xeb, 0x2f, 0xe3, 0x71, + 0x2f, 0x77, 0xa1, 0xe7, 0xbd, 0xec, 0x16, 0xf2, 0x58, 0x24, 0x94, 0xc7, 0x58, 0xa8, 0x98, 0xc4, + 0xc0, 0xad, 0x47, 0x64, 0xde, 0xf2, 0x1f, 0xbf, 0xc7, 0xa3, 0xf9, 0xe9, 0x4b, 0xe5, 0x6a, 0xd7, + 0x55, 0xda, 0xcd, 0x63, 0xd8, 0x3e, 0x23, 0xbc, 0x1f, 0x1c, 0x95, 0xf1, 0x81, 0x8d, 0x0f, 0x6c, + 0x7c, 0xb0, 0x1f, 0x0c, 0x2f, 0xd8, 0x1e, 0x3b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xb4, + 0x7d, 0x35, 0x40, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // Sends new spans to new or existing traces. You cannot update + // existing spans. + BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Creates a new span. + CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) +} + +type traceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *traceServiceClient) CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) { + out := new(Span) + err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // Sends new spans to new or existing traces. You cannot update + // existing spans. + BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*empty.Empty, error) + // Creates a new span. + CreateSpan(context.Context, *Span) (*Span, error) +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) BatchWriteSpans(ctx context.Context, req *BatchWriteSpansRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchWriteSpans not implemented") +} +func (*UnimplementedTraceServiceServer) CreateSpan(ctx context.Context, req *Span) (*Span, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSpan not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_BatchWriteSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchWriteSpansRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).BatchWriteSpans(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).BatchWriteSpans(ctx, req.(*BatchWriteSpansRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TraceService_CreateSpan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Span) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).CreateSpan(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).CreateSpan(ctx, req.(*Span)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.cloudtrace.v2.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "BatchWriteSpans", + Handler: _TraceService_BatchWriteSpans_Handler, + }, + { + MethodName: "CreateSpan", + Handler: _TraceService_CreateSpan_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/cloudtrace/v2/tracing.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go new file mode 100644 index 0000000000..c4517de714 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go @@ -0,0 +1,862 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all the conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +var AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", +} + +var AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x)) +} + +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](/monitoring/alerts/). +type AlertPolicy struct { + // Required if the policy exists. The resource name for this policy. The + // format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + // If `condition_time_series_query_language` is present, it must be the only + // `condition`. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` + // How to combine the results of multiple conditions to determine if an + // incident should be opened. + // If `condition_time_series_query_language` is present, this must be + // `COMBINE_UNSPECIFIED`. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *wrappers.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Read-only description of how the alert policy is invalid. OK if the alert + // policy is valid. If not OK, the alert policy will not generate incidents. + Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The format of the entries in this field is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy) Reset() { *m = AlertPolicy{} } +func (m *AlertPolicy) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy) ProtoMessage() {} +func (*AlertPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0} +} + +func (m *AlertPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy.Unmarshal(m, b) +} +func (m *AlertPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy.Marshal(b, m, deterministic) +} +func (m *AlertPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy.Merge(m, src) +} +func (m *AlertPolicy) XXX_Size() int { + return xxx_messageInfo_AlertPolicy.Size(m) +} +func (m *AlertPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy proto.InternalMessageInfo + +func (m *AlertPolicy) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if m != nil { + return m.Documentation + } + return nil +} + +func (m *AlertPolicy) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if m != nil { + return m.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (m *AlertPolicy) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func (m *AlertPolicy) GetValidity() *status.Status { + if m != nil { + return m.Validity + } + return nil +} + +func (m *AlertPolicy) GetNotificationChannels() []string { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *AlertPolicy) GetCreationRecord() *MutationRecord { + if m != nil { + return m.CreationRecord + } + return nil +} + +func (m *AlertPolicy) GetMutationRecord() *MutationRecord { + if m != nil { + return m.MutationRecord + } + return nil +} + +// A content string and a MIME type that describes the content string's +// format. +type AlertPolicy_Documentation struct { + // The text of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} } +func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Documentation) ProtoMessage() {} +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 0} +} + +func (m *AlertPolicy_Documentation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Documentation.Unmarshal(m, b) +} +func (m *AlertPolicy_Documentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Documentation.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Documentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Documentation.Merge(m, src) +} +func (m *AlertPolicy_Documentation) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Documentation.Size(m) +} +func (m *AlertPolicy_Documentation) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Documentation.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Documentation proto.InternalMessageInfo + +func (m *AlertPolicy_Documentation) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *AlertPolicy_Documentation) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + // Required if the condition exists. The unique resource name for this + // condition. Its format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Stackdriver Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are valid to be assigned to Condition: + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} } +func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition) ProtoMessage() {} +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1} +} + +func (m *AlertPolicy_Condition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition.Merge(m, src) +} +func (m *AlertPolicy_Condition) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition.Size(m) +} +func (m *AlertPolicy_Condition) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition proto.InternalMessageInfo + +func (m *AlertPolicy_Condition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy_Condition) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionAbsent struct { + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AlertPolicy_Condition) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + } +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + // A type of trigger. + // + // Types that are valid to be assigned to Type: + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} } +func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 0} +} + +func (m *AlertPolicy_Condition_Trigger) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_Trigger.Merge(m, src) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Size(m) +} +func (m *AlertPolicy_Condition_Trigger) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_Trigger.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_Trigger proto.InternalMessageInfo + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` +} + +type AlertPolicy_Condition_Trigger_Percent struct { + Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} + +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AlertPolicy_Condition_Trigger) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } +} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It + // is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies a time series that should be used as the denominator of a + // ratio that will be compared with the threshold. If a + // `denominator_filter` is specified, the time series specified by the + // `filter` field will be used as the numerator. + // + // The filter must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *duration.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} } +func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 1} +} + +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Merge(m, src) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Size(m) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_MetricThreshold proto.InternalMessageInfo + +func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if m != nil { + return m.DenominatorFilter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if m != nil { + return m.DenominatorAggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if m != nil { + return m.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if m != nil { + return m.ThresholdValue + } + return 0 +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *duration.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the [`ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It + // is advisable to use the `ListTimeSeries` method when debugging this + // field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. Currently, only values that + // are a multiple of a minute--e.g. 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} } +func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 2} +} + +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Merge(m, src) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Size(m) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_MetricAbsence proto.InternalMessageInfo + +func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *duration.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value) + proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.AlertPolicy.UserLabelsEntry") + proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation") + proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition") + proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger") + proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold") + proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence") +} + +func init() { proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor_014ef0e1a0f00a00) } + +var fileDescriptor_014ef0e1a0f00a00 = []byte{ + // 1094 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0xdb, 0x46, + 0x13, 0x36, 0xa5, 0x58, 0xb2, 0x46, 0xfe, 0x50, 0x36, 0x4e, 0xcc, 0xf0, 0x05, 0x02, 0x39, 0x78, + 0x8b, 0x1a, 0x29, 0x42, 0xa2, 0x56, 0x8b, 0x36, 0x0e, 0x5a, 0x40, 0x96, 0xe4, 0xc8, 0x68, 0x25, + 0x1b, 0xf4, 0x47, 0x81, 0xc2, 0x00, 0xb1, 0xa2, 0xd6, 0x34, 0x5b, 0x72, 0x97, 0x58, 0x52, 0x2e, + 0x54, 0xc3, 0xbf, 0xa2, 0x87, 0xde, 0x7b, 0xec, 0xff, 0xe8, 0xa1, 0xfd, 0x29, 0x39, 0x16, 0x01, + 0x7a, 0x2d, 0xb8, 0x5c, 0x52, 0x94, 0x23, 0x27, 0x91, 0x7b, 0xdb, 0xd9, 0x79, 0xe6, 0x99, 0x9d, + 0x9d, 0x67, 0x87, 0x84, 0xba, 0xc3, 0x98, 0xe3, 0x11, 0xc3, 0x67, 0xd4, 0x8d, 0x18, 0x77, 0xa9, + 0x63, 0x5c, 0x36, 0x0c, 0xec, 0x11, 0x1e, 0xe9, 0x01, 0x67, 0x11, 0x43, 0xeb, 0x09, 0x42, 0x9f, + 0x20, 0xf4, 0xcb, 0x86, 0xf6, 0x58, 0xc6, 0xe1, 0xc0, 0x35, 0x38, 0x09, 0xd9, 0x88, 0xdb, 0x24, + 0x09, 0xd0, 0x36, 0x67, 0x52, 0xda, 0xcc, 0xf7, 0x19, 0x95, 0x90, 0x67, 0x33, 0x21, 0xfe, 0x28, + 0xc2, 0x91, 0xcb, 0xa8, 0xc5, 0x89, 0xcd, 0xf8, 0x50, 0x62, 0x9f, 0x48, 0xac, 0xb0, 0x06, 0xa3, + 0x73, 0x63, 0x38, 0xe2, 0x02, 0x76, 0x9b, 0xff, 0x27, 0x8e, 0x83, 0x80, 0xf0, 0x50, 0xfa, 0x37, + 0xa4, 0x9f, 0x07, 0xb6, 0x11, 0x46, 0x38, 0x1a, 0x49, 0xc7, 0xd3, 0x7f, 0x1e, 0x40, 0xb5, 0x19, + 0x17, 0x7a, 0xc8, 0x3c, 0xd7, 0x1e, 0x23, 0x04, 0xf7, 0x28, 0xf6, 0x89, 0xaa, 0xd4, 0x95, 0xad, + 0x8a, 0x29, 0xd6, 0x68, 0x13, 0x96, 0x87, 0x6e, 0x18, 0x78, 0x78, 0x6c, 0x09, 0x5f, 0x41, 0xf8, + 0xaa, 0x72, 0xaf, 0x1f, 0x43, 0x4e, 0x60, 0x65, 0xc8, 0xec, 0x91, 0x4f, 0x68, 0x72, 0x7a, 0x75, + 0xa5, 0xae, 0x6c, 0x55, 0xb7, 0x0d, 0x7d, 0xd6, 0xbd, 0xe9, 0xb9, 0x84, 0x7a, 0x3b, 0x1f, 0x66, + 0x4e, 0xb3, 0x20, 0x13, 0xaa, 0xa3, 0x90, 0x70, 0xcb, 0xc3, 0x03, 0xe2, 0x85, 0x6a, 0xad, 0x5e, + 0xdc, 0xaa, 0x6e, 0x7f, 0xfa, 0x7e, 0xd2, 0x93, 0x90, 0xf0, 0x6f, 0x45, 0x4c, 0x87, 0x46, 0x7c, + 0x6c, 0xc2, 0x28, 0xdb, 0x40, 0xdf, 0x00, 0xd8, 0x8c, 0x0e, 0xdd, 0x38, 0x41, 0xa8, 0x2e, 0x0b, + 0xca, 0x4f, 0xde, 0x4f, 0xd9, 0x4a, 0x63, 0xcc, 0x5c, 0x38, 0x3a, 0x82, 0x25, 0x9b, 0xf9, 0x03, + 0x97, 0x12, 0xae, 0x96, 0xea, 0xca, 0xd6, 0xea, 0xf6, 0x17, 0x73, 0x50, 0xb5, 0x64, 0xe8, 0xf1, + 0x38, 0x20, 0x66, 0x46, 0x84, 0x3e, 0x83, 0x32, 0xa1, 0x78, 0xe0, 0x91, 0xa1, 0x7a, 0x5f, 0x5c, + 0xa3, 0x96, 0x72, 0xa6, 0xed, 0xd5, 0x77, 0x19, 0xf3, 0x4e, 0xb1, 0x37, 0x22, 0x66, 0x0a, 0x45, + 0x3a, 0x2c, 0x5d, 0x62, 0xcf, 0x1d, 0xba, 0xd1, 0x58, 0x45, 0x22, 0x0c, 0xa5, 0x61, 0x3c, 0xb0, + 0xf5, 0x23, 0xd1, 0x75, 0x33, 0xc3, 0xa0, 0x06, 0x3c, 0xa4, 0x2c, 0x72, 0xcf, 0x5d, 0x3b, 0xd1, + 0x9b, 0x7d, 0x81, 0x29, 0x8d, 0x6f, 0x79, 0xb5, 0x5e, 0xdc, 0xaa, 0x98, 0xeb, 0x79, 0x67, 0x4b, + 0xfa, 0x50, 0x0f, 0xd6, 0x6c, 0x4e, 0xf2, 0x02, 0x55, 0x41, 0xe4, 0xfa, 0xff, 0xec, 0xb2, 0x7b, + 0x52, 0xcd, 0xa6, 0xc0, 0x9a, 0xab, 0x69, 0x70, 0x62, 0xc7, 0x74, 0x37, 0xf4, 0xae, 0x56, 0xe7, + 0xa1, 0xf3, 0xa7, 0x6c, 0x6d, 0x0f, 0x56, 0xa6, 0xe4, 0x84, 0x54, 0x28, 0xdb, 0x8c, 0x46, 0x84, + 0x46, 0x52, 0xd0, 0xa9, 0x89, 0xfe, 0x07, 0x15, 0xdf, 0xf5, 0x89, 0x15, 0x8d, 0x83, 0x54, 0xd0, + 0x4b, 0xf1, 0x46, 0xdc, 0x0a, 0xed, 0x0d, 0x40, 0x25, 0x6b, 0x52, 0xf6, 0x24, 0x96, 0xdf, 0xf1, + 0x24, 0x4a, 0x6f, 0x3f, 0x09, 0x0a, 0x0f, 0x32, 0xa1, 0x58, 0xd1, 0x05, 0x27, 0xe1, 0x05, 0xf3, + 0x86, 0xe2, 0x1c, 0xd5, 0xed, 0x97, 0x73, 0xa8, 0x44, 0xef, 0x91, 0x88, 0xbb, 0xf6, 0x71, 0x4a, + 0xd1, 0x5d, 0x30, 0x51, 0xc6, 0x9c, 0xed, 0xa2, 0x73, 0xa8, 0x4d, 0xf2, 0xe1, 0x41, 0x18, 0x17, + 0x5d, 0x10, 0xc9, 0x5e, 0xcc, 0x9f, 0xac, 0x19, 0xc7, 0xdb, 0xa4, 0xbb, 0x60, 0xae, 0x65, 0xa4, + 0x62, 0x2f, 0xd2, 0x3a, 0x50, 0x3e, 0xe6, 0xae, 0xe3, 0x10, 0x8e, 0x1e, 0xc1, 0xa2, 0xcd, 0x46, + 0xf2, 0x72, 0x17, 0xbb, 0x0b, 0x66, 0x62, 0x22, 0x0d, 0xca, 0x01, 0xe1, 0x76, 0x7a, 0x02, 0xa5, + 0xbb, 0x60, 0xa6, 0x1b, 0xbb, 0x25, 0xb8, 0x17, 0xdf, 0xb9, 0xf6, 0x77, 0x11, 0xd6, 0x6e, 0x14, + 0x86, 0x1e, 0x41, 0xe9, 0xdc, 0xf5, 0x22, 0xc2, 0x65, 0x47, 0xa4, 0x85, 0x3a, 0xb0, 0x8c, 0x1d, + 0x87, 0x13, 0x07, 0x27, 0x8f, 0x76, 0x49, 0x3c, 0xda, 0xcd, 0x5b, 0xca, 0x9a, 0x20, 0xcd, 0xa9, + 0x30, 0xf4, 0x1c, 0xd0, 0x90, 0x50, 0xe6, 0xbb, 0x14, 0x47, 0x8c, 0x5b, 0x32, 0x55, 0x45, 0xa4, + 0xba, 0x9f, 0xf3, 0xec, 0x25, 0x59, 0xcf, 0x40, 0xcd, 0xc3, 0xa7, 0x4e, 0x00, 0x1f, 0x7a, 0x82, + 0x8d, 0x1c, 0x45, 0x33, 0x7f, 0x98, 0x76, 0x3c, 0x86, 0xfc, 0x00, 0x73, 0x37, 0x64, 0x54, 0xbd, + 0x27, 0x66, 0xc7, 0x2d, 0xaa, 0x6f, 0x65, 0x38, 0x31, 0x28, 0x72, 0x71, 0xe8, 0x63, 0x58, 0xcb, + 0xa4, 0x65, 0x5d, 0xc6, 0x03, 0x41, 0x5d, 0x8c, 0x6f, 0xdc, 0x5c, 0xcd, 0xb6, 0xc5, 0x98, 0x40, + 0x9f, 0xc3, 0x52, 0xfa, 0xc9, 0x10, 0x62, 0xad, 0x6e, 0x3f, 0x7e, 0x6b, 0xa8, 0xb4, 0x25, 0xc0, + 0xcc, 0xa0, 0xa8, 0x07, 0xe5, 0x28, 0x69, 0xb6, 0x5a, 0x16, 0x51, 0x8d, 0x79, 0xb4, 0x24, 0x75, + 0x62, 0xa6, 0x1c, 0xda, 0x1b, 0x05, 0x56, 0xa6, 0x04, 0x96, 0x6b, 0xb9, 0xf2, 0xce, 0x96, 0x2f, + 0xde, 0xad, 0xe5, 0xf9, 0xb2, 0x0b, 0x77, 0x2a, 0xbb, 0xf8, 0xdf, 0xcb, 0xde, 0xf9, 0xb5, 0xf0, + 0xba, 0xf9, 0x4b, 0x01, 0xf2, 0xc1, 0x09, 0x1d, 0x0e, 0xdc, 0x50, 0xb7, 0x99, 0x6f, 0xe4, 0x88, + 0x26, 0x83, 0x67, 0x2f, 0xe0, 0xec, 0x07, 0x62, 0x47, 0xa1, 0x71, 0x25, 0x57, 0xd7, 0xc9, 0x4f, + 0x89, 0x00, 0xba, 0x24, 0x34, 0xae, 0x84, 0x69, 0x05, 0x22, 0xf0, 0xda, 0x98, 0x7c, 0x98, 0x8c, + 0xab, 0x6c, 0x7d, 0x8d, 0x0e, 0x19, 0x77, 0x30, 0x75, 0x7f, 0xc6, 0xd2, 0x95, 0x37, 0xef, 0xc6, + 0xd8, 0x3e, 0x67, 0xde, 0x90, 0xf0, 0xd0, 0xb8, 0x4a, 0x16, 0x77, 0x63, 0x51, 0x9e, 0xed, 0x56, + 0xa1, 0x92, 0x6d, 0x68, 0x5f, 0xc1, 0xda, 0x8d, 0x0f, 0x37, 0xaa, 0x41, 0xf1, 0x47, 0x32, 0x96, + 0xda, 0x88, 0x97, 0x68, 0x1d, 0x16, 0x13, 0x9d, 0x27, 0x23, 0x22, 0x31, 0x76, 0x0a, 0x5f, 0x2a, + 0x4f, 0x31, 0x3c, 0x9c, 0xf9, 0x65, 0x45, 0x1b, 0xf0, 0xa0, 0x75, 0xd0, 0xdb, 0xdd, 0xef, 0x77, + 0xac, 0x93, 0xfe, 0xd1, 0x61, 0xa7, 0xb5, 0xbf, 0xb7, 0xdf, 0x69, 0xd7, 0x16, 0x50, 0x19, 0x8a, + 0xcd, 0x7e, 0xbb, 0xa6, 0xa0, 0x12, 0x14, 0x0e, 0xcc, 0x5a, 0x01, 0x3d, 0x01, 0xad, 0xd9, 0x6f, + 0x5b, 0xdf, 0xed, 0x1f, 0x77, 0xad, 0x5e, 0xf3, 0xb8, 0xd5, 0xdd, 0xef, 0xbf, 0xb2, 0xcc, 0xce, + 0xd1, 0xc1, 0x89, 0xd9, 0xea, 0xd4, 0x8a, 0x3b, 0x7f, 0x2a, 0xaf, 0x9b, 0x7f, 0x28, 0xf0, 0xd1, + 0x07, 0x35, 0x12, 0x19, 0x73, 0xf6, 0x0f, 0xbd, 0xb8, 0x73, 0xa3, 0xd0, 0xf3, 0xb9, 0x3a, 0x12, + 0x5f, 0xfd, 0x6f, 0x0a, 0xa8, 0x36, 0xf3, 0x67, 0x0a, 0x7b, 0x17, 0x92, 0x3a, 0xe2, 0x27, 0x72, + 0xa8, 0x7c, 0xff, 0xb5, 0xc4, 0x38, 0xcc, 0xc3, 0xd4, 0xd1, 0x19, 0x77, 0x0c, 0x87, 0x50, 0xf1, + 0x80, 0x8c, 0x49, 0xfd, 0xd3, 0x3f, 0xb2, 0x2f, 0x27, 0xd6, 0xef, 0x05, 0xed, 0x55, 0x42, 0xd0, + 0xf2, 0xd8, 0x68, 0xa8, 0xf7, 0x26, 0xa9, 0x4e, 0x1b, 0x7f, 0xa5, 0xce, 0x33, 0xe1, 0x3c, 0x9b, + 0x38, 0xcf, 0x4e, 0x1b, 0x83, 0x92, 0x48, 0xd2, 0xf8, 0x37, 0x00, 0x00, 0xff, 0xff, 0xde, 0xc1, + 0xf6, 0xc0, 0xa2, 0x0b, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go new file mode 100644 index 0000000000..5279f0182a --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go @@ -0,0 +1,707 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert_service.proto + +package monitoring + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + // Required. The project in which to create the alerting policy. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. The alerting + // policy that is returned will have a name that contains a normalized + // representation of this name as a prefix but adds a suffix of the form + // `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the + // container. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The requested alerting policy. You should omit the `name` field in this + // policy. The name will be returned in the new policy, including + // a new `[ALERT_POLICY_ID]` value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} } +func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAlertPolicyRequest) ProtoMessage() {} +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{0} +} + +func (m *CreateAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateAlertPolicyRequest.Unmarshal(m, b) +} +func (m *CreateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *CreateAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateAlertPolicyRequest.Merge(m, src) +} +func (m *CreateAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_CreateAlertPolicyRequest.Size(m) +} +func (m *CreateAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateAlertPolicyRequest proto.InternalMessageInfo + +func (m *CreateAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + // Required. The alerting policy to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} } +func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*GetAlertPolicyRequest) ProtoMessage() {} +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{1} +} + +func (m *GetAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAlertPolicyRequest.Unmarshal(m, b) +} +func (m *GetAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *GetAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAlertPolicyRequest.Merge(m, src) +} +func (m *GetAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_GetAlertPolicyRequest.Size(m) +} +func (m *GetAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAlertPolicyRequest proto.InternalMessageInfo + +func (m *GetAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + // Required. The project whose alert policies are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} } +func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesRequest) ProtoMessage() {} +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{2} +} + +func (m *ListAlertPoliciesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAlertPoliciesRequest.Unmarshal(m, b) +} +func (m *ListAlertPoliciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAlertPoliciesRequest.Marshal(b, m, deterministic) +} +func (m *ListAlertPoliciesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAlertPoliciesRequest.Merge(m, src) +} +func (m *ListAlertPoliciesRequest) XXX_Size() int { + return xxx_messageInfo_ListAlertPoliciesRequest.Size(m) +} +func (m *ListAlertPoliciesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListAlertPoliciesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAlertPoliciesRequest proto.InternalMessageInfo + +func (m *ListAlertPoliciesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListAlertPoliciesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} } +func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesResponse) ProtoMessage() {} +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{3} +} + +func (m *ListAlertPoliciesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAlertPoliciesResponse.Unmarshal(m, b) +} +func (m *ListAlertPoliciesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAlertPoliciesResponse.Marshal(b, m, deterministic) +} +func (m *ListAlertPoliciesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAlertPoliciesResponse.Merge(m, src) +} +func (m *ListAlertPoliciesResponse) XXX_Size() int { + return xxx_messageInfo_ListAlertPoliciesResponse.Size(m) +} +func (m *ListAlertPoliciesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListAlertPoliciesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAlertPoliciesResponse proto.InternalMessageInfo + +func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if m != nil { + return m.AlertPolicies + } + return nil +} + +func (m *ListAlertPoliciesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // + The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // + Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} } +func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAlertPolicyRequest) ProtoMessage() {} +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{4} +} + +func (m *UpdateAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateAlertPolicyRequest.Unmarshal(m, b) +} +func (m *UpdateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *UpdateAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateAlertPolicyRequest.Merge(m, src) +} +func (m *UpdateAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_UpdateAlertPolicyRequest.Size(m) +} +func (m *UpdateAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateAlertPolicyRequest proto.InternalMessageInfo + +func (m *UpdateAlertPolicyRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + // Required. The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} } +func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAlertPolicyRequest) ProtoMessage() {} +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{5} +} + +func (m *DeleteAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteAlertPolicyRequest.Unmarshal(m, b) +} +func (m *DeleteAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *DeleteAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteAlertPolicyRequest.Merge(m, src) +} +func (m *DeleteAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_DeleteAlertPolicyRequest.Size(m) +} +func (m *DeleteAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteAlertPolicyRequest proto.InternalMessageInfo + +func (m *DeleteAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest") + proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest") + proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest") + proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse") + proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest") + proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor_c45362b2a456d1bf) +} + +var fileDescriptor_c45362b2a456d1bf = []byte{ + // 806 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x41, 0x4f, 0xdb, 0x48, + 0x14, 0x96, 0x13, 0x60, 0x61, 0x58, 0x58, 0x65, 0xb4, 0xcb, 0x3a, 0x66, 0x57, 0x4d, 0x53, 0x51, + 0x10, 0x05, 0x5b, 0x4a, 0x7a, 0x02, 0xb5, 0x92, 0x43, 0x5b, 0x5a, 0xa9, 0x48, 0x51, 0x68, 0x39, + 0x20, 0xaa, 0x68, 0xe2, 0x4c, 0xcc, 0x14, 0xc7, 0xe3, 0xda, 0x93, 0xd0, 0x50, 0x71, 0xe9, 0xad, + 0x87, 0x1e, 0xaa, 0x4a, 0x3d, 0xf6, 0xd0, 0x13, 0xf4, 0xde, 0x3f, 0x41, 0x7b, 0x6a, 0x6f, 0x9c, + 0x38, 0xf4, 0x57, 0xf4, 0x54, 0x79, 0xec, 0x90, 0x49, 0x62, 0x0b, 0xc3, 0x2d, 0xf3, 0xde, 0x37, + 0x6f, 0xbe, 0xf9, 0xde, 0xf7, 0x26, 0x06, 0x0b, 0x26, 0xa5, 0xa6, 0x85, 0xb5, 0x26, 0xb5, 0x09, + 0xa3, 0x2e, 0xb1, 0x4d, 0xad, 0x5d, 0xd4, 0x90, 0x85, 0x5d, 0x56, 0xf5, 0xb0, 0xdb, 0x26, 0x06, + 0x56, 0x1d, 0x97, 0x32, 0x0a, 0xff, 0x0e, 0x90, 0x6a, 0x0f, 0xa9, 0xb6, 0x8b, 0xca, 0x7f, 0xe1, + 0x7e, 0xe4, 0x10, 0x0d, 0xd9, 0x36, 0x65, 0x88, 0x11, 0x6a, 0x7b, 0xc1, 0x1e, 0xe5, 0x5f, 0x21, + 0x6b, 0x58, 0x04, 0xdb, 0x2c, 0x4c, 0x5c, 0x13, 0x12, 0x0d, 0x82, 0xad, 0x7a, 0xb5, 0x86, 0x77, + 0x51, 0x9b, 0x50, 0x37, 0x04, 0x64, 0x05, 0x80, 0x8b, 0x3d, 0xda, 0x72, 0xbb, 0x44, 0x94, 0x5c, + 0x3c, 0xe5, 0x10, 0x31, 0x1b, 0x22, 0xf8, 0xaa, 0xd6, 0x6a, 0x68, 0xb8, 0xe9, 0xb0, 0xce, 0xc0, + 0xf6, 0xf3, 0x64, 0x70, 0x7e, 0x13, 0x79, 0x7b, 0x01, 0x22, 0x7f, 0x24, 0x01, 0x79, 0xcd, 0xc5, + 0x88, 0x61, 0xdd, 0x2f, 0x5a, 0xa6, 0x16, 0x31, 0x3a, 0x15, 0xfc, 0xa2, 0x85, 0x3d, 0x06, 0x75, + 0x30, 0x62, 0xa3, 0x26, 0x96, 0xd3, 0x39, 0x69, 0x61, 0xa2, 0xb4, 0x7c, 0xa6, 0xa7, 0x7e, 0xe9, + 0xf3, 0x70, 0x4e, 0x50, 0x25, 0xa8, 0x8f, 0x1c, 0xe2, 0xa9, 0x06, 0x6d, 0x6a, 0x62, 0x0d, 0xbe, + 0x15, 0x3e, 0x02, 0x7f, 0x06, 0x02, 0x3b, 0x3c, 0x2a, 0xa7, 0x72, 0xd2, 0xc2, 0x64, 0xe1, 0xba, + 0x1a, 0x25, 0xb0, 0x2a, 0x6c, 0x2f, 0xa5, 0xcf, 0xf4, 0x54, 0x65, 0x12, 0xf5, 0x22, 0xf9, 0x6d, + 0xf0, 0xcf, 0x3a, 0x66, 0x49, 0x69, 0x82, 0xcb, 0xd0, 0xcc, 0x7f, 0x93, 0x80, 0xfc, 0x98, 0x78, + 0x42, 0x75, 0x82, 0xbd, 0xc1, 0xfa, 0x23, 0x57, 0x97, 0x61, 0x06, 0x8c, 0x35, 0x88, 0xc5, 0xb0, + 0x2b, 0x8f, 0xfa, 0x45, 0x2a, 0xe1, 0x0a, 0x66, 0xc1, 0x38, 0x75, 0xeb, 0xd8, 0xad, 0xd6, 0x3a, + 0xf2, 0x18, 0xcf, 0xfc, 0xc1, 0xd7, 0xa5, 0x0e, 0x9c, 0x05, 0x13, 0x0e, 0x32, 0x71, 0xd5, 0x23, + 0x07, 0x98, 0xcb, 0x36, 0x5a, 0x19, 0xf7, 0x03, 0x9b, 0xe4, 0x00, 0xc3, 0xff, 0x01, 0xe0, 0x49, + 0x46, 0xf7, 0xb0, 0x1d, 0x5c, 0xbc, 0xc2, 0xe1, 0x4f, 0xfc, 0x40, 0xfe, 0xad, 0x04, 0xb2, 0x11, + 0xd7, 0xf1, 0x1c, 0x6a, 0x7b, 0x18, 0x3e, 0x04, 0xd3, 0x42, 0x4f, 0x08, 0xf6, 0xe4, 0x74, 0x2e, + 0x9d, 0xa8, 0x2b, 0x95, 0x29, 0x24, 0x56, 0x84, 0x37, 0xc1, 0x5f, 0x36, 0x7e, 0xc9, 0xaa, 0x02, + 0x97, 0x14, 0xe7, 0x32, 0xe5, 0x87, 0xcb, 0xe7, 0x7c, 0x3e, 0x49, 0x40, 0x7e, 0xea, 0xd4, 0xa3, + 0x5d, 0xb6, 0x0a, 0x26, 0x5b, 0x3c, 0xc7, 0x7d, 0x19, 0x3a, 0x44, 0xe9, 0x72, 0xe9, 0x5a, 0x57, + 0x7d, 0xe0, 0x5b, 0x77, 0x03, 0x79, 0x7b, 0x15, 0x10, 0xc0, 0xfd, 0xdf, 0x43, 0xfe, 0x4a, 0x5f, + 0xdd, 0x5f, 0xcf, 0x80, 0x7c, 0x0f, 0x5b, 0x38, 0xf9, 0x24, 0x5c, 0xca, 0x62, 0x85, 0x8f, 0xe3, + 0x00, 0x0a, 0xd1, 0xcd, 0xe0, 0xc1, 0x81, 0x47, 0x12, 0xc8, 0x0c, 0xb5, 0x0a, 0xaa, 0xd1, 0x17, + 0x88, 0xb3, 0xa8, 0xa2, 0x25, 0xc6, 0x07, 0x1e, 0xc8, 0x17, 0x4e, 0x75, 0x4e, 0xeb, 0xf5, 0x8f, + 0x9f, 0xef, 0x53, 0x73, 0xf0, 0x86, 0xff, 0xac, 0xbc, 0xf2, 0x03, 0x77, 0x1c, 0x97, 0x3e, 0xc7, + 0x06, 0xf3, 0xb4, 0xc5, 0x43, 0xad, 0xbf, 0xdb, 0x1f, 0x24, 0x30, 0xdd, 0x3f, 0x81, 0xf0, 0x56, + 0xf4, 0xb9, 0x91, 0x73, 0xaa, 0x5c, 0xdc, 0x95, 0xfc, 0x6d, 0x91, 0xd6, 0x3c, 0x9c, 0x8b, 0xa2, + 0xd5, 0xcf, 0x4a, 0x5b, 0x3c, 0x84, 0x5f, 0x24, 0x90, 0x19, 0x7a, 0xc4, 0xe2, 0x34, 0x8c, 0x7b, + 0xed, 0x92, 0xd0, 0xdb, 0x38, 0xd5, 0x33, 0x3e, 0x9b, 0x25, 0xd1, 0x73, 0x9c, 0x6b, 0x31, 0x9f, + 0x44, 0xc2, 0x95, 0x3e, 0xb3, 0xc2, 0x77, 0x12, 0xc8, 0x0c, 0x59, 0x2e, 0x8e, 0x77, 0x9c, 0x37, + 0x95, 0x99, 0xa1, 0x51, 0xb9, 0xef, 0xff, 0x05, 0x0c, 0x68, 0xb9, 0x98, 0x50, 0xcb, 0xaf, 0x12, + 0xc8, 0x0c, 0x8d, 0x6a, 0x1c, 0xa7, 0xb8, 0x99, 0x4e, 0xa2, 0xa5, 0x71, 0xaa, 0xcb, 0xc2, 0xdc, + 0x0f, 0x4b, 0x5a, 0x2a, 0x14, 0x38, 0x65, 0x31, 0xa1, 0x5e, 0xc4, 0xbf, 0x5f, 0x61, 0xe5, 0x58, + 0x3a, 0xd1, 0xb3, 0xb1, 0x53, 0xfa, 0x5d, 0x7f, 0x23, 0xed, 0x32, 0xe6, 0x78, 0x2b, 0x9a, 0xb6, + 0xbf, 0xbf, 0x3f, 0x38, 0xc3, 0xa8, 0xc5, 0x76, 0x35, 0xc3, 0xa2, 0xad, 0xfa, 0xb2, 0x63, 0x21, + 0xd6, 0xa0, 0x6e, 0x73, 0xe9, 0x22, 0x78, 0xef, 0xac, 0x4b, 0x40, 0x55, 0x17, 0xa3, 0x7a, 0xe9, + 0x58, 0x02, 0xb2, 0x41, 0x9b, 0x91, 0xca, 0x95, 0x32, 0x5c, 0xba, 0xf0, 0xd1, 0x28, 0xfb, 0x8d, + 0x2e, 0x4b, 0xdb, 0x77, 0x43, 0xa8, 0x49, 0x2d, 0x64, 0x9b, 0x2a, 0x75, 0x4d, 0xcd, 0xc4, 0x36, + 0xb7, 0x81, 0xd6, 0x3b, 0xb1, 0xff, 0xe3, 0x61, 0xb5, 0xb7, 0xfa, 0x9c, 0x52, 0xd6, 0x83, 0x02, + 0x6b, 0xfe, 0x25, 0xd5, 0x8d, 0xde, 0x89, 0x5b, 0xc5, 0x93, 0x6e, 0x72, 0x87, 0x27, 0x77, 0x7a, + 0xc9, 0x9d, 0xad, 0x62, 0x6d, 0x8c, 0x1f, 0x52, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x73, + 0x76, 0x58, 0x53, 0x09, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// AlertPolicyServiceClient is the client API for AlertPolicyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AlertPolicyServiceServer is the server API for AlertPolicyService service. +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations. +type UnimplementedAlertPolicyServiceServer struct { +} + +func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(ctx context.Context, req *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(ctx context.Context, req *GetAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(ctx context.Context, req *CreateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(ctx context.Context, req *DeleteAlertPolicyRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented") +} +func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(ctx context.Context, req *UpdateAlertPolicyRequest) (*AlertPolicy, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented") +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go new file mode 100644 index 0000000000..31e278c90e --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -0,0 +1,854 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/common.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + distribution "google.golang.org/genproto/googleapis/api/distribution" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Specifies an ordering relationship on two arguments, called `left` and +// `right`. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // True if the left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // True if the left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // True if the left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // True if the left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // True if the left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // True if the left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +var ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", +} + +var ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, +} + +func (x ComparisonType) String() string { + return proto.EnumName(ComparisonType_name, int32(x)) +} + +func (ComparisonType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{0} +} + +// The tier of service for a Workspace. Please see the +// [service tiers +// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more +// details. +type ServiceTier int32 // Deprecated: Do not use. +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Stackdriver Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Stackdriver Premium tier, a higher, more expensive tier of service + // that provides access to all Stackdriver features, lets you use Stackdriver + // with AWS accounts, and has a larger allotments for logs and metrics. For + // more details, see [the service tiers + // documentation](https://cloud.google.com/monitoring/workspaces/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +var ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", +} + +var ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, +} + +func (x ServiceTier) String() string { + return proto.EnumName(ServiceTier_name, int32(x)) +} + +func (ServiceTier) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{1} +} + +// The `Aligner` specifies the operation that will be applied to the data +// points in each alignment period in a time series. Except for +// `ALIGN_NONE`, which specifies that no operation be applied, each alignment +// operation replaces the set of data values in each alignment period with +// a single value: the result of applying the operation to the data values. +// An aligned time series has a single data value at the end of each +// `alignment_period`. +// +// An alignment operation can change the data type of the values, too. For +// example, if you apply a counting operation to boolean values, the data +// `value_type` in the original time series is `BOOLEAN`, but the `value_type` +// in the aligned result is `INT64`. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-series reduction + // is requested. The `value_type` of the result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA]. + // The output is `delta = y1 - y0`. + // + // This alignment is valid for + // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and + // `DELTA` metrics. If the selected alignment period results in periods + // with no data, then the aligned value for such a period is created by + // interpolation. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. The result is computed as + // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time". + // Think of this aligner as providing the slope of the line that passes + // through the value at the start and at the end of the `alignment_period`. + // + // This aligner is valid for `CUMULATIVE` + // and `DELTA` metrics with numeric values. If the selected alignment + // period results in periods with no data, then the aligned value for + // such a period is created by interpolation. The output is a `GAUGE` + // metric with `value_type` `DOUBLE`. + // + // If, by "rate", you mean "percentage change", see the + // `ALIGN_PERCENT_CHANGE` aligner instead. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the alignment + // period boundary. This aligner is valid for `GAUGE` metrics with + // numeric values. The `value_type` of the aligned result is the same as the + // `value_type` of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by moving the most recent data point before the end of the + // alignment period to the boundary at the end of the alignment + // period. This aligner is valid for `GAUGE` metrics. The `value_type` of + // the aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align the time series by returning the minimum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align the time series by returning the maximum value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is the same as + // the `value_type` of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align the time series by returning the mean value in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric values. The `value_type` of the aligned result is `DOUBLE`. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align the time series by returning the number of values in each alignment + // period. This aligner is valid for `GAUGE` and `DELTA` metrics with + // numeric or Boolean values. The `value_type` of the aligned result is + // `INT64`. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align the time series by returning the sum of the values in each + // alignment period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with numeric and distribution values. The `value_type` of the + // aligned result is the same as the `value_type` of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align the time series by returning the standard deviation of the values + // in each alignment period. This aligner is valid for `GAUGE` and + // `DELTA` metrics with numeric values. The `value_type` of the output is + // `DOUBLE`. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align the time series by returning the number of `True` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align the time series by returning the number of `False` values in + // each alignment period. This aligner is valid for `GAUGE` metrics with + // Boolean values. The `value_type` of the output is `INT64`. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align the time series by returning the ratio of the number of `True` + // values to the total number of values in each alignment period. This + // aligner is valid for `GAUGE` metrics with Boolean values. The output + // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 99th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 95th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 50th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align the time series by using [percentile + // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting + // data point in each alignment period is the 5th percentile of all data + // points in the period. This aligner is valid for `GAUGE` and `DELTA` + // metrics with distribution values. The output is a `GAUGE` metric with + // `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This aligner is valid for + // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns + // `((current - previous)/previous) * 100`, where the value of `previous` is + // determined based on the `alignment_period`. + // + // If the values of `current` and `previous` are both 0, then the returned + // value is 0. If only `previous` is 0, the returned value is infinity. + // + // A 10-minute moving mean is computed at each point of the alignment period + // prior to the above calculation to smooth the metric and prevent false + // positives from very short-lived spikes. The moving mean is only + // applicable for data whose values are `>= 0`. Any values `< 0` are + // treated as a missing datapoint, and are ignored. While `DELTA` + // metrics are accepted by this alignment, special care should be taken that + // the values for the metric will always be positive. The output is a + // `GAUGE` metric with `value_type` `DOUBLE`. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 +) + +var Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", +} + +var Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, +} + +func (x Aggregation_Aligner) String() string { + return proto.EnumName(Aggregation_Aligner_name, int32(x)) +} + +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{2, 0} +} + +// A Reducer operation describes how to aggregate data points from multiple +// time series into a single time series, where the value of each data point +// in the resulting series is a function of all the already aligned values in +// the input time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the `Aligner` is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean value across time series for each + // alignment period. This reducer is valid for + // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and + // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with + // numeric or distribution values. The `value_type` of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum value across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric values. The `value_type` of the output is the same as the + // `value_type` of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics + // with numeric and distribution values. The `value_type` of the output is + // the same as the `value_type` of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics with numeric or distribution values. The `value_type` + // of the output is `DOUBLE`. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the number of data points across time series + // for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of numeric, Boolean, distribution, and string + // `value_type`. The `value_type` of the output is `INT64`. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the number of `True`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the number of `False`-valued data points across time + // series for each alignment period. This reducer is valid for `DELTA` and + // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output + // is `INT64`. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 + // Reduce by computing the ratio of the number of `True`-valued data points + // to the total number of data points for each alignment period. This + // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`. + // The output value is in the range [0.0, 1.0] and has `value_type` + // `DOUBLE`. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing the [99th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing the [95th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing the [50th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing the [5th + // percentile](https://en.wikipedia.org/wiki/Percentile) of data points + // across time series for each alignment period. This reducer is valid for + // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value + // of the output is `DOUBLE`. + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +var Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", +} + +var Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, +} + +func (x Aggregation_Reducer) String() string { + return proto.EnumName(Aggregation_Reducer_name, int32(x)) +} + +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{2, 1} +} + +// A single strongly-typed value. +type TypedValue struct { + // The typed value field. + // + // Types that are valid to be assigned to Value: + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypedValue) Reset() { *m = TypedValue{} } +func (m *TypedValue) String() string { return proto.CompactTextString(m) } +func (*TypedValue) ProtoMessage() {} +func (*TypedValue) Descriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{0} +} + +func (m *TypedValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypedValue.Unmarshal(m, b) +} +func (m *TypedValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypedValue.Marshal(b, m, deterministic) +} +func (m *TypedValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedValue.Merge(m, src) +} +func (m *TypedValue) XXX_Size() int { + return xxx_messageInfo_TypedValue.Size(m) +} +func (m *TypedValue) XXX_DiscardUnknown() { + xxx_messageInfo_TypedValue.DiscardUnknown(m) +} + +var xxx_messageInfo_TypedValue proto.InternalMessageInfo + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type TypedValue_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type TypedValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type TypedValue_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type TypedValue_DistributionValue struct { + DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} + +func (*TypedValue_Int64Value) isTypedValue_Value() {} + +func (*TypedValue_DoubleValue) isTypedValue_Value() {} + +func (*TypedValue_StringValue) isTypedValue_Value() {} + +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *TypedValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *TypedValue) GetInt64Value() int64 { + if x, ok := m.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *TypedValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *TypedValue) GetStringValue() string { + if x, ok := m.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *TypedValue) GetDistributionValue() *distribution.Distribution { + if x, ok := m.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TypedValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } +} + +// A closed time interval. It extends from the start time to the end time, and includes both: `[startTime, endTime]`. Valid time intervals depend on the [`MetricKind`](/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) of the metric value. In no case can the end time be earlier than the start time. +// +// * For a `GAUGE` metric, the `startTime` value is technically optional; if +// no value is specified, the start time defaults to the value of the +// end time, and the interval represents a single point in time. If both +// start and end times are specified, they must be identical. Such an +// interval is valid only for `GAUGE` metrics, which are point-in-time +// measurements. +// +// * For `DELTA` and `CUMULATIVE` metrics, the start time must be earlier +// than the end time. +// +// * In all cases, the start time of the next interval must be +// at least a microsecond after the end time of the previous interval. +// Because the interval is closed, if the start time of a new interval +// is the same as the end time of the previous interval, data written +// at the new start time could overwrite data written at the previous +// end time. +type TimeInterval struct { + // Required. The end of the time interval. + EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeInterval) Reset() { *m = TimeInterval{} } +func (m *TimeInterval) String() string { return proto.CompactTextString(m) } +func (*TimeInterval) ProtoMessage() {} +func (*TimeInterval) Descriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{1} +} + +func (m *TimeInterval) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeInterval.Unmarshal(m, b) +} +func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic) +} +func (m *TimeInterval) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeInterval.Merge(m, src) +} +func (m *TimeInterval) XXX_Size() int { + return xxx_messageInfo_TimeInterval.Size(m) +} +func (m *TimeInterval) XXX_DiscardUnknown() { + xxx_messageInfo_TimeInterval.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeInterval proto.InternalMessageInfo + +func (m *TimeInterval) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *TimeInterval) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide a different view of +// the data. Aggregation of time series is done in two steps. First, each time +// series in the set is _aligned_ to the same time interval boundaries, then the +// set of time series is optionally _reduced_ in number. +// +// Alignment consists of applying the `per_series_aligner` operation +// to each time series after its data has been divided into regular +// `alignment_period` time intervals. This process takes _all_ of the data +// points in an alignment period, applies a mathematical transformation such as +// averaging, minimum, maximum, delta, etc., and converts them into a single +// data point per period. +// +// Reduction is when the aligned and transformed time series can optionally be +// combined, reducing the number of time series through similar mathematical +// transformations. Reduction involves applying a `cross_series_reducer` to +// all the time series, optionally sorting the time series into subsets with +// `group_by_fields`, and applying the reducer to each subset. +// +// The raw time series data can contain a huge amount of information from +// multiple sources. Alignment and reduction transforms this mass of data into +// a more manageable and representative collection of data, for example "the +// 95% latency across the average of all tasks in a cluster". This +// representative data can be more easily graphed and comprehended, and the +// individual time series data is still available for later drilldown. For more +// details, see [Aggregating Time +// Series](/monitoring/api/v3/metrics#aggregating_time_series). +type Aggregation struct { + // The `alignment_period` specifies a time interval, in seconds, that is used + // to divide the data in all the + // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of + // time. This will be done before the per-series aligner can be applied to + // the data. + // + // The value must be at least 60 seconds. If a per-series aligner other than + // `ALIGN_NONE` is specified, this field is required or an error is returned. + // If no per-series aligner is specified, or the aligner `ALIGN_NONE` is + // specified, then this field is ignored. + AlignmentPeriod *duration.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + // An `Aligner` describes how to bring the data points in a single + // time series into temporal alignment. Except for `ALIGN_NONE`, all + // alignments cause all the data points in an `alignment_period` to be + // mathematically grouped together, resulting in a single data point for + // each `alignment_period` with end timestamp at the end of the period. + // + // Not all alignment operations may be applied to all time series. The valid + // choices depend on the `metric_kind` and `value_type` of the original time + // series. Alignment can change the `metric_kind` or the `value_type` of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `cross_series_reducer` is specified, then + // `per_series_aligner` must be specified and not equal to `ALIGN_NONE` + // and `alignment_period` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The reduction operation to be used to combine time series into a single + // time series, where the value of each data point in the resulting series is + // a function of all the already aligned values in the input time series. + // + // Not all reducer operations can be applied to all time series. The valid + // choices depend on the `metric_kind` and the `value_type` of the original + // time series. Reduction can yield a time series with a different + // `metric_kind` or `value_type` than the input time series. + // + // Time series data must first be aligned (see `per_series_aligner`) in order + // to perform cross-time series reduction. If `cross_series_reducer` is + // specified, then `per_series_aligner` must be specified, and must not be + // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an + // error is returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `cross_series_reducer` is + // specified. The `group_by_fields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // operation. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `cross_series_reducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `group_by_fields` are aggregated away. If + // `group_by_fields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `cross_series_reducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Aggregation) Reset() { *m = Aggregation{} } +func (m *Aggregation) String() string { return proto.CompactTextString(m) } +func (*Aggregation) ProtoMessage() {} +func (*Aggregation) Descriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{2} +} + +func (m *Aggregation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Aggregation.Unmarshal(m, b) +} +func (m *Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Aggregation.Marshal(b, m, deterministic) +} +func (m *Aggregation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Aggregation.Merge(m, src) +} +func (m *Aggregation) XXX_Size() int { + return xxx_messageInfo_Aggregation.Size(m) +} +func (m *Aggregation) XXX_DiscardUnknown() { + xxx_messageInfo_Aggregation.DiscardUnknown(m) +} + +var xxx_messageInfo_Aggregation proto.InternalMessageInfo + +func (m *Aggregation) GetAlignmentPeriod() *duration.Duration { + if m != nil { + return m.AlignmentPeriod + } + return nil +} + +func (m *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if m != nil { + return m.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (m *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if m != nil { + return m.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (m *Aggregation) GetGroupByFields() []string { + if m != nil { + return m.GroupByFields + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value) + proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value) + proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue") + proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval") + proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation") +} + +func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor_013c57c1dcbb8d65) } + +var fileDescriptor_013c57c1dcbb8d65 = []byte{ + // 946 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0x41, 0x6f, 0xe2, 0xc6, + 0x1b, 0xc6, 0x31, 0x24, 0x21, 0xbc, 0x4e, 0xc2, 0x64, 0x36, 0x9b, 0xe5, 0x8f, 0xfe, 0xed, 0xb2, + 0xa9, 0x54, 0xd1, 0x3d, 0x98, 0x28, 0x29, 0x95, 0xa2, 0x4a, 0x95, 0x1c, 0x33, 0x49, 0x2c, 0x81, + 0xa1, 0x83, 0x49, 0xa3, 0x36, 0x92, 0x65, 0xc2, 0xac, 0x65, 0x09, 0x6c, 0x6b, 0x30, 0x91, 0x72, + 0xeb, 0xbd, 0xdf, 0xa1, 0x97, 0xde, 0x7a, 0xeb, 0xd7, 0xe8, 0x87, 0xe9, 0xa5, 0x5f, 0xa0, 0xf2, + 0x8c, 0xbd, 0x36, 0x94, 0xaa, 0x3d, 0xbe, 0xbf, 0xe7, 0x79, 0xdf, 0x99, 0x79, 0xc6, 0x1a, 0xc3, + 0x3b, 0x2f, 0x0c, 0xbd, 0x39, 0xeb, 0x2c, 0xc2, 0xc0, 0x8f, 0x43, 0xee, 0x07, 0x5e, 0xe7, 0xf9, + 0xb2, 0xf3, 0x14, 0x2e, 0x16, 0x61, 0xa0, 0x45, 0x3c, 0x8c, 0x43, 0x7c, 0x22, 0x2d, 0x5a, 0x6e, + 0xd1, 0x9e, 0x2f, 0x9b, 0x9f, 0xa4, 0x8d, 0x6e, 0xe4, 0x77, 0x66, 0xfe, 0x32, 0xe6, 0xfe, 0x74, + 0x15, 0xfb, 0x59, 0x53, 0xf3, 0xd3, 0x54, 0x16, 0xd5, 0x74, 0xf5, 0xa1, 0x33, 0x5b, 0x71, 0xb7, + 0xa0, 0xbf, 0xdd, 0xd4, 0x63, 0x7f, 0xc1, 0x96, 0xb1, 0xbb, 0x88, 0xa4, 0xe1, 0xec, 0x0f, 0x05, + 0xc0, 0x7e, 0x89, 0xd8, 0xec, 0xde, 0x9d, 0xaf, 0x18, 0x7e, 0x0b, 0x30, 0x0d, 0xc3, 0xb9, 0xf3, + 0x9c, 0x54, 0x0d, 0xa5, 0xa5, 0xb4, 0xf7, 0xef, 0x4a, 0xb4, 0x96, 0x30, 0x69, 0x78, 0x07, 0xaa, + 0x1f, 0xc4, 0x5f, 0x7d, 0x99, 0x3a, 0xca, 0x2d, 0xa5, 0x5d, 0xb9, 0x2b, 0x51, 0x10, 0x50, 0x5a, + 0x3e, 0x83, 0x83, 0x59, 0xb8, 0x9a, 0xce, 0x59, 0xea, 0xa9, 0xb4, 0x94, 0xb6, 0x72, 0x57, 0xa2, + 0xaa, 0xa4, 0x1f, 0x4d, 0xc9, 0x61, 0x02, 0x2f, 0x35, 0xed, 0xb4, 0x94, 0x76, 0x2d, 0x31, 0x49, + 0x2a, 0x4d, 0x26, 0xe0, 0xe2, 0x99, 0x53, 0xeb, 0x6e, 0x4b, 0x69, 0xab, 0x17, 0x0d, 0x2d, 0xcd, + 0xcb, 0x8d, 0x7c, 0xad, 0x57, 0x70, 0xdd, 0x95, 0xe8, 0x71, 0xb1, 0x4b, 0x8c, 0xba, 0xae, 0xc2, + 0xae, 0xe8, 0x3e, 0xfb, 0x51, 0x81, 0x03, 0xdb, 0x5f, 0x30, 0x33, 0x88, 0x19, 0x7f, 0x76, 0xe7, + 0xb8, 0x0b, 0xfb, 0x2c, 0x98, 0x39, 0x49, 0x30, 0xe2, 0x38, 0xea, 0x45, 0x33, 0x1b, 0x9d, 0xa5, + 0xa6, 0xd9, 0x59, 0x6a, 0xb4, 0xca, 0x82, 0x59, 0x52, 0xe1, 0x2b, 0x80, 0x65, 0xec, 0xf2, 0x58, + 0x36, 0x2a, 0xff, 0xda, 0x58, 0x13, 0xee, 0xa4, 0x3e, 0xfb, 0xb3, 0x0a, 0xaa, 0xee, 0x79, 0x9c, + 0x79, 0xe2, 0xaa, 0x70, 0x0f, 0x90, 0x3b, 0xf7, 0xbd, 0x60, 0xc1, 0x82, 0xd8, 0x89, 0x18, 0xf7, + 0xc3, 0x59, 0x3a, 0xf0, 0x7f, 0x7f, 0x1b, 0xd8, 0x4b, 0xef, 0x97, 0xd6, 0x3f, 0xb6, 0x8c, 0x44, + 0x07, 0xfe, 0x0e, 0x70, 0xc4, 0xb8, 0xb3, 0x64, 0xdc, 0x67, 0x4b, 0x47, 0xa8, 0x8c, 0x8b, 0x13, + 0x1d, 0x5d, 0x7c, 0xa1, 0x6d, 0xfb, 0xb8, 0xb4, 0xc2, 0x26, 0x34, 0x5d, 0x36, 0x50, 0x14, 0x31, + 0x3e, 0x16, 0x33, 0x52, 0x82, 0x7f, 0x80, 0x93, 0x27, 0x1e, 0x2e, 0x97, 0xd9, 0x68, 0xce, 0x66, + 0xab, 0x27, 0xc6, 0xc5, 0x95, 0xfd, 0xa7, 0xd1, 0x54, 0x36, 0x50, 0x2c, 0xc6, 0xc8, 0xe1, 0x29, + 0xc3, 0x9f, 0x43, 0xdd, 0xe3, 0xe1, 0x2a, 0x72, 0xa6, 0x2f, 0xce, 0x07, 0x9f, 0xcd, 0x67, 0xcb, + 0xc6, 0x6e, 0xab, 0xd2, 0xae, 0xd1, 0x43, 0x81, 0xaf, 0x5f, 0x6e, 0x04, 0x3c, 0xfb, 0xa9, 0x02, + 0xd5, 0x6c, 0x43, 0x47, 0x00, 0x7a, 0xdf, 0xbc, 0xb5, 0x1c, 0x6b, 0x68, 0x11, 0x54, 0xc2, 0x75, + 0x50, 0x65, 0xdd, 0x23, 0x7d, 0x5b, 0x47, 0x4a, 0x6e, 0xa0, 0xba, 0x4d, 0x50, 0x19, 0xbf, 0x86, + 0x63, 0x59, 0x9b, 0x96, 0x4d, 0xe8, 0x68, 0xd8, 0x4f, 0x70, 0x05, 0x9f, 0x00, 0x4a, 0xe7, 0x90, + 0x07, 0xdb, 0x19, 0xf6, 0x7b, 0x84, 0xa2, 0x1d, 0x7c, 0x08, 0x35, 0x49, 0x07, 0xa6, 0x85, 0xa0, + 0x50, 0xea, 0x0f, 0x48, 0xcd, 0x47, 0x0f, 0x88, 0x6e, 0xa1, 0x83, 0x7c, 0x6d, 0x63, 0x38, 0xb1, + 0x6c, 0x74, 0x98, 0xfb, 0xc7, 0x93, 0x01, 0x3a, 0xc2, 0x08, 0x0e, 0xd2, 0xd2, 0xee, 0xf5, 0xc8, + 0x3d, 0xaa, 0xe7, 0xab, 0x8a, 0x0e, 0xc7, 0xa6, 0x13, 0x82, 0x50, 0xbe, 0x45, 0x49, 0x6f, 0xf4, + 0xfe, 0x98, 0xa0, 0x06, 0x7e, 0x03, 0xaf, 0x24, 0xbe, 0xa1, 0xba, 0x61, 0x9b, 0x43, 0x4b, 0xfa, + 0x8f, 0x73, 0x61, 0x44, 0xa8, 0x41, 0x2c, 0xdb, 0xec, 0x13, 0xe7, 0xea, 0x0a, 0xe1, 0xed, 0x42, + 0x17, 0xbd, 0xda, 0x2a, 0x74, 0xcf, 0xd1, 0xc9, 0x56, 0xe1, 0xbc, 0x8b, 0x5e, 0xe3, 0x06, 0x9c, + 0xac, 0x09, 0x8e, 0x71, 0xa7, 0x5b, 0xb7, 0x04, 0xbd, 0x39, 0xfb, 0xad, 0x0c, 0xd5, 0xec, 0x06, + 0xeb, 0xa0, 0x52, 0xd2, 0x9b, 0x18, 0xa4, 0x70, 0x1d, 0x29, 0x10, 0x19, 0x89, 0xeb, 0xc8, 0x80, + 0x69, 0xa1, 0x72, 0xb1, 0xd6, 0x1f, 0x50, 0xa5, 0x50, 0x27, 0x99, 0xed, 0xe0, 0x63, 0x38, 0xcc, + 0x6a, 0x19, 0xda, 0x6e, 0x12, 0x63, 0x8a, 0x64, 0xce, 0x7b, 0x49, 0x60, 0x45, 0x22, 0x73, 0xa9, + 0xe2, 0x53, 0xc0, 0x6b, 0x58, 0x06, 0x59, 0x4f, 0xce, 0x92, 0xf2, 0xf5, 0x24, 0xf7, 0x0b, 0xca, + 0x7a, 0x94, 0xb5, 0x7f, 0x50, 0xba, 0x08, 0xb6, 0x2b, 0xdd, 0x73, 0xa4, 0x6e, 0x57, 0xce, 0xbb, + 0xe8, 0xe0, 0xfd, 0xcf, 0x0a, 0x1c, 0x19, 0xe1, 0x22, 0x72, 0xb9, 0xbf, 0x0c, 0x83, 0xe4, 0xcd, + 0xc5, 0x4d, 0x38, 0x35, 0x86, 0x83, 0x91, 0x4e, 0xcd, 0xf1, 0xd0, 0x72, 0x26, 0xd6, 0x78, 0x44, + 0x0c, 0xf3, 0xc6, 0x24, 0x3d, 0x54, 0x4a, 0x42, 0x28, 0x68, 0xb7, 0x36, 0x52, 0x36, 0x51, 0xf2, + 0x65, 0xaf, 0xa3, 0xbe, 0x8d, 0x2a, 0x9b, 0x88, 0xc8, 0x40, 0x0b, 0x88, 0x7c, 0x8b, 0x76, 0x37, + 0x90, 0x45, 0xd0, 0xde, 0x7b, 0x17, 0xd4, 0x31, 0xe3, 0xcf, 0xfe, 0x13, 0xb3, 0x7d, 0xc6, 0xf1, + 0xff, 0xa1, 0x31, 0x26, 0xf4, 0xde, 0x34, 0x88, 0x63, 0x9b, 0x84, 0x6e, 0x6c, 0xef, 0x14, 0xf0, + 0x9a, 0x7a, 0xad, 0x8f, 0x4d, 0x03, 0x29, 0xc9, 0xf9, 0xd7, 0xf8, 0x88, 0x92, 0x81, 0x39, 0x19, + 0xa0, 0x72, 0xb3, 0xdc, 0x50, 0xae, 0x7f, 0x51, 0xa0, 0xf1, 0x14, 0x2e, 0xb6, 0x3e, 0x19, 0xd7, + 0xaa, 0x21, 0x7e, 0x87, 0xa3, 0xe4, 0xa9, 0x1b, 0x29, 0xdf, 0x7f, 0x93, 0x9a, 0xbc, 0x70, 0xee, + 0x06, 0x9e, 0x16, 0x72, 0xaf, 0xe3, 0xb1, 0x40, 0x3c, 0x84, 0x1d, 0x29, 0xb9, 0x91, 0xbf, 0x5c, + 0xff, 0xa3, 0x7e, 0x9d, 0x57, 0xbf, 0x96, 0x9b, 0xb7, 0x72, 0x80, 0x31, 0x0f, 0x57, 0x33, 0x6d, + 0x90, 0xaf, 0x75, 0x7f, 0xf9, 0x7b, 0x26, 0x3e, 0x0a, 0xf1, 0x31, 0x17, 0x1f, 0xef, 0x2f, 0xa7, + 0x7b, 0x62, 0x91, 0xcb, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x70, 0xc6, 0x75, 0xb5, 0x07, + 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go new file mode 100644 index 0000000000..fc158f1962 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/dropped_labels.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A set of (label, value) pairs which were dropped during aggregation, attached +// to google.api.Distribution.Exemplars in google.api.Distribution values during +// aggregation. +// +// These values are used in combination with the label values that remain on the +// aggregated Distribution timeseries to construct the full label set for the +// exemplar values. The resulting full label set may be used to identify the +// specific task/job/instance (for example) which may be contributing to a +// long-tail, while allowing the storage savings of only storing aggregated +// distribution values for a large group. +// +// Note that there are no guarantees on ordering of the labels from +// exemplar-to-exemplar and from distribution-to-distribution in the same +// stream, and there may be duplicates. It is up to clients to resolve any +// ambiguities. +type DroppedLabels struct { + // Map from label to its value, for all labels dropped in any aggregation. + Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DroppedLabels) Reset() { *m = DroppedLabels{} } +func (m *DroppedLabels) String() string { return proto.CompactTextString(m) } +func (*DroppedLabels) ProtoMessage() {} +func (*DroppedLabels) Descriptor() ([]byte, []int) { + return fileDescriptor_15749142c06d7f43, []int{0} +} + +func (m *DroppedLabels) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DroppedLabels.Unmarshal(m, b) +} +func (m *DroppedLabels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DroppedLabels.Marshal(b, m, deterministic) +} +func (m *DroppedLabels) XXX_Merge(src proto.Message) { + xxx_messageInfo_DroppedLabels.Merge(m, src) +} +func (m *DroppedLabels) XXX_Size() int { + return xxx_messageInfo_DroppedLabels.Size(m) +} +func (m *DroppedLabels) XXX_DiscardUnknown() { + xxx_messageInfo_DroppedLabels.DiscardUnknown(m) +} + +var xxx_messageInfo_DroppedLabels proto.InternalMessageInfo + +func (m *DroppedLabels) GetLabel() map[string]string { + if m != nil { + return m.Label + } + return nil +} + +func init() { + proto.RegisterType((*DroppedLabels)(nil), "google.monitoring.v3.DroppedLabels") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.DroppedLabels.LabelEntry") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/dropped_labels.proto", fileDescriptor_15749142c06d7f43) +} + +var fileDescriptor_15749142c06d7f43 = []byte{ + // 246 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0x4f, 0x29, 0xca, 0x2f, 0x28, 0x48, 0x4d, 0x89, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0x29, 0xd6, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd5, 0x43, 0x28, 0xd5, 0x2b, 0x33, 0x56, + 0xea, 0x67, 0xe4, 0xe2, 0x75, 0x81, 0x28, 0xf7, 0x01, 0xab, 0x16, 0x72, 0xe1, 0x62, 0x05, 0xeb, + 0x93, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0xd2, 0xd3, 0xc3, 0xa6, 0x4f, 0x0f, 0x45, 0x8f, 0x1e, + 0x98, 0x72, 0xcd, 0x2b, 0x29, 0xaa, 0x0c, 0x82, 0x68, 0x96, 0xb2, 0xe0, 0xe2, 0x42, 0x08, 0x0a, + 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, + 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, + 0x82, 0xd1, 0x69, 0x15, 0x23, 0x97, 0x44, 0x72, 0x7e, 0x2e, 0x56, 0x6b, 0x9d, 0x84, 0x50, 0xec, + 0x0d, 0x00, 0x79, 0x2c, 0x80, 0x31, 0xca, 0x0e, 0xaa, 0x36, 0x3d, 0x3f, 0x27, 0x31, 0x2f, 0x5d, + 0x2f, 0xbf, 0x28, 0x5d, 0x3f, 0x3d, 0x35, 0x0f, 0xec, 0x6d, 0x7d, 0x88, 0x54, 0x62, 0x41, 0x66, + 0x31, 0x6a, 0x20, 0x59, 0x23, 0x78, 0xab, 0x98, 0xa4, 0xdc, 0x21, 0x06, 0x38, 0xe7, 0xe4, 0x97, + 0xa6, 0xe8, 0xf9, 0x22, 0xac, 0x0c, 0x33, 0x3e, 0x05, 0x93, 0x8c, 0x01, 0x4b, 0xc6, 0x20, 0x24, + 0x63, 0xc2, 0x8c, 0x93, 0xd8, 0xc0, 0x96, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x65, 0x9b, + 0x10, 0x62, 0x88, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go new file mode 100644 index 0000000000..fb2ec76e48 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go @@ -0,0 +1,165 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + // Output only. The name of this group. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `{group_id}` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. + // The format is `"projects/{project_id_or_number}/groups/{group_id}"`. + // For groups with no parent, `parentName` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this + // group. + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_907e30c1f087271d, []int{0} +} + +func (m *Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Group.Unmarshal(m, b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return xxx_messageInfo_Group.Size(m) +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *Group) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Group) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Group) GetParentName() string { + if m != nil { + return m.ParentName + } + return "" +} + +func (m *Group) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *Group) GetIsCluster() bool { + if m != nil { + return m.IsCluster + } + return false +} + +func init() { + proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group") +} + +func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor_907e30c1f087271d) } + +var fileDescriptor_907e30c1f087271d = []byte{ + // 349 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4a, 0x2b, 0x31, + 0x14, 0xc6, 0x49, 0xef, 0x6d, 0xb9, 0x4d, 0xef, 0x6a, 0xb8, 0x5c, 0x6a, 0x41, 0xda, 0xba, 0x2a, + 0x0a, 0xc9, 0x62, 0x76, 0x0a, 0x82, 0xed, 0xa2, 0x2b, 0xa5, 0x74, 0xd1, 0x85, 0x14, 0x4a, 0x9c, + 0xa6, 0x21, 0x92, 0xc9, 0x09, 0xc9, 0x4c, 0x41, 0x87, 0x3e, 0x86, 0x0b, 0xb7, 0x2e, 0x5d, 0xfa, + 0x18, 0x3e, 0x8a, 0x4f, 0x21, 0x93, 0x8c, 0x4c, 0xa5, 0xae, 0xe6, 0x9c, 0xf3, 0xfd, 0x72, 0xfe, + 0x7c, 0x83, 0x07, 0x02, 0x40, 0x28, 0x4e, 0x53, 0xd0, 0x32, 0x03, 0x2b, 0xb5, 0xa0, 0xdb, 0x98, + 0x0a, 0x0b, 0xb9, 0x21, 0xc6, 0x42, 0x06, 0xd1, 0xbf, 0x40, 0x90, 0x9a, 0x20, 0xdb, 0xb8, 0x77, + 0x54, 0xbd, 0x63, 0x46, 0x52, 0xcb, 0x1d, 0xe4, 0x36, 0xe1, 0xe1, 0xc1, 0xc9, 0x5b, 0x03, 0x37, + 0xa7, 0x65, 0x83, 0x28, 0xc2, 0xbf, 0x35, 0x4b, 0x79, 0x17, 0x0d, 0xd0, 0xa8, 0x3d, 0xf7, 0x71, + 0x34, 0xc4, 0x7f, 0xd7, 0xd2, 0x19, 0xc5, 0x1e, 0x56, 0x5e, 0x6b, 0x78, 0xad, 0x53, 0xd5, 0x6e, + 0x4a, 0xa4, 0x8f, 0x3b, 0x86, 0x59, 0xae, 0xb3, 0x40, 0xfc, 0xf2, 0x04, 0x0e, 0x25, 0x0f, 0xfc, + 0xc7, 0xad, 0x8d, 0x54, 0x19, 0xb7, 0xdd, 0xa6, 0xd7, 0xaa, 0x2c, 0x3a, 0xc6, 0x58, 0xba, 0x55, + 0xa2, 0x72, 0x57, 0x6a, 0xad, 0x01, 0x1a, 0xfd, 0x99, 0xb7, 0xa5, 0x9b, 0x84, 0xc2, 0xf9, 0x33, + 0xfa, 0xb8, 0x7a, 0x42, 0xb8, 0xbf, 0x77, 0x49, 0xb8, 0x82, 0x19, 0xe9, 0x48, 0x02, 0x29, 0x0d, + 0x6b, 0x0f, 0x8d, 0x85, 0x7b, 0x9e, 0x64, 0x8e, 0x16, 0x55, 0xb4, 0x0b, 0x8e, 0x38, 0x5a, 0xf8, + 0xef, 0x2e, 0x3a, 0x03, 0x2b, 0x98, 0x96, 0x8f, 0x2c, 0x93, 0xa0, 0x1d, 0x2d, 0xf6, 0xd3, 0x03, + 0xb8, 0xbf, 0x01, 0xb5, 0xe6, 0xd6, 0xd1, 0x22, 0x04, 0x07, 0x00, 0x3a, 0x1d, 0xbf, 0x20, 0xdc, + 0x4d, 0x20, 0x25, 0x3f, 0x99, 0x3d, 0xc6, 0x7e, 0xaf, 0x59, 0xe9, 0xee, 0x0c, 0xdd, 0x5e, 0x56, + 0x8c, 0x00, 0xc5, 0xb4, 0x20, 0x60, 0x05, 0x15, 0x5c, 0x7b, 0xef, 0x69, 0x7d, 0xcf, 0xf7, 0x3f, + 0x7a, 0x51, 0x67, 0xaf, 0x8d, 0xde, 0x34, 0x34, 0x98, 0x28, 0xc8, 0xd7, 0xe4, 0xba, 0x1e, 0xb5, + 0x88, 0xdf, 0xbf, 0xc4, 0xa5, 0x17, 0x97, 0xb5, 0xb8, 0x5c, 0xc4, 0x77, 0x2d, 0x3f, 0x24, 0xfe, + 0x0c, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xbb, 0x3f, 0x33, 0x35, 0x02, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go new file mode 100644 index 0000000000..c496d0f24d --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go @@ -0,0 +1,931 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group_service.proto + +package monitoring + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The `ListGroup` request. +type ListGroupsRequest struct { + // Required. The project whose groups are to be listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit + // the groups returned based on their parent-child relationship with the + // specified group. If no filter is specified, all groups are returned. + // + // Types that are valid to be assigned to Filter: + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} } +func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupsRequest) ProtoMessage() {} +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{0} +} + +func (m *ListGroupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupsRequest.Unmarshal(m, b) +} +func (m *ListGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupsRequest.Marshal(b, m, deterministic) +} +func (m *ListGroupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupsRequest.Merge(m, src) +} +func (m *ListGroupsRequest) XXX_Size() int { + return xxx_messageInfo_ListGroupsRequest.Size(m) +} +func (m *ListGroupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupsRequest proto.InternalMessageInfo + +func (m *ListGroupsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_AncestorsOfGroup struct { + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_DescendantsOfGroup struct { + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ListGroupsRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } +} + +// The `ListGroups` response. +type ListGroupsResponse struct { + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} } +func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupsResponse) ProtoMessage() {} +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{1} +} + +func (m *ListGroupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupsResponse.Unmarshal(m, b) +} +func (m *ListGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupsResponse.Marshal(b, m, deterministic) +} +func (m *ListGroupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupsResponse.Merge(m, src) +} +func (m *ListGroupsResponse) XXX_Size() int { + return xxx_messageInfo_ListGroupsResponse.Size(m) +} +func (m *ListGroupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupsResponse proto.InternalMessageInfo + +func (m *ListGroupsResponse) GetGroup() []*Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *ListGroupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + // Required. The group to retrieve. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} } +func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) } +func (*GetGroupRequest) ProtoMessage() {} +func (*GetGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{2} +} + +func (m *GetGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetGroupRequest.Unmarshal(m, b) +} +func (m *GetGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetGroupRequest.Marshal(b, m, deterministic) +} +func (m *GetGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGroupRequest.Merge(m, src) +} +func (m *GetGroupRequest) XXX_Size() int { + return xxx_messageInfo_GetGroupRequest.Size(m) +} +func (m *GetGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGroupRequest proto.InternalMessageInfo + +func (m *GetGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + // Required. The project in which to create the group. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Required. A group definition. It is an error to define the `name` field because + // the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} } +func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*CreateGroupRequest) ProtoMessage() {} +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{3} +} + +func (m *CreateGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateGroupRequest.Unmarshal(m, b) +} +func (m *CreateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateGroupRequest.Marshal(b, m, deterministic) +} +func (m *CreateGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateGroupRequest.Merge(m, src) +} +func (m *CreateGroupRequest) XXX_Size() int { + return xxx_messageInfo_CreateGroupRequest.Size(m) +} +func (m *CreateGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateGroupRequest proto.InternalMessageInfo + +func (m *CreateGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *CreateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + // Required. The new definition of the group. All fields of the existing group, + // excepting `name`, are replaced with the corresponding fields of this group. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} } +func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGroupRequest) ProtoMessage() {} +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{4} +} + +func (m *UpdateGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateGroupRequest.Unmarshal(m, b) +} +func (m *UpdateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateGroupRequest.Marshal(b, m, deterministic) +} +func (m *UpdateGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGroupRequest.Merge(m, src) +} +func (m *UpdateGroupRequest) XXX_Size() int { + return xxx_messageInfo_UpdateGroupRequest.Size(m) +} +func (m *UpdateGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateGroupRequest proto.InternalMessageInfo + +func (m *UpdateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *UpdateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. The default behavior is to be able to delete a +// single group without any descendants. +type DeleteGroupRequest struct { + // Required. The group to delete. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If this field is true, then the request means to delete a group with all + // its descendants. Otherwise, the request means to delete a group only when + // it has no descendants. The default value is false. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} } +func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteGroupRequest) ProtoMessage() {} +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{5} +} + +func (m *DeleteGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteGroupRequest.Unmarshal(m, b) +} +func (m *DeleteGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteGroupRequest.Marshal(b, m, deterministic) +} +func (m *DeleteGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteGroupRequest.Merge(m, src) +} +func (m *DeleteGroupRequest) XXX_Size() int { + return xxx_messageInfo_DeleteGroupRequest.Size(m) +} +func (m *DeleteGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteGroupRequest proto.InternalMessageInfo + +func (m *DeleteGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteGroupRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + // Required. The group whose members are listed. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `next_page_token` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // An optional [list + // filter](https://cloud.google.com/monitoring/api/learn_more#filtering) + // describing the members to be returned. The filter may reference the type, + // labels, and metadata of monitored resources that comprise the group. For + // example, to return only resources representing Compute Engine VM instances, + // use this filter: + // + // `resource.type = "gce_instance"` + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} } +func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersRequest) ProtoMessage() {} +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{6} +} + +func (m *ListGroupMembersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupMembersRequest.Unmarshal(m, b) +} +func (m *ListGroupMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupMembersRequest.Marshal(b, m, deterministic) +} +func (m *ListGroupMembersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupMembersRequest.Merge(m, src) +} +func (m *ListGroupMembersRequest) XXX_Size() int { + return xxx_messageInfo_ListGroupMembersRequest.Size(m) +} +func (m *ListGroupMembersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupMembersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupMembersRequest proto.InternalMessageInfo + +func (m *ListGroupMembersRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListGroupMembersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupMembersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListGroupMembersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListGroupMembersRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + // A set of monitored resources in the group. + Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} } +func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersResponse) ProtoMessage() {} +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{7} +} + +func (m *ListGroupMembersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupMembersResponse.Unmarshal(m, b) +} +func (m *ListGroupMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupMembersResponse.Marshal(b, m, deterministic) +} +func (m *ListGroupMembersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupMembersResponse.Merge(m, src) +} +func (m *ListGroupMembersResponse) XXX_Size() int { + return xxx_messageInfo_ListGroupMembersResponse.Size(m) +} +func (m *ListGroupMembersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupMembersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupMembersResponse proto.InternalMessageInfo + +func (m *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { + if m != nil { + return m.Members + } + return nil +} + +func (m *ListGroupMembersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListGroupMembersResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +func init() { + proto.RegisterType((*ListGroupsRequest)(nil), "google.monitoring.v3.ListGroupsRequest") + proto.RegisterType((*ListGroupsResponse)(nil), "google.monitoring.v3.ListGroupsResponse") + proto.RegisterType((*GetGroupRequest)(nil), "google.monitoring.v3.GetGroupRequest") + proto.RegisterType((*CreateGroupRequest)(nil), "google.monitoring.v3.CreateGroupRequest") + proto.RegisterType((*UpdateGroupRequest)(nil), "google.monitoring.v3.UpdateGroupRequest") + proto.RegisterType((*DeleteGroupRequest)(nil), "google.monitoring.v3.DeleteGroupRequest") + proto.RegisterType((*ListGroupMembersRequest)(nil), "google.monitoring.v3.ListGroupMembersRequest") + proto.RegisterType((*ListGroupMembersResponse)(nil), "google.monitoring.v3.ListGroupMembersResponse") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor_21ad21d0ed55c55a) +} + +var fileDescriptor_21ad21d0ed55c55a = []byte{ + // 999 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0x67, 0xed, 0x24, 0xb5, 0xc7, 0xad, 0xd2, 0x8c, 0xaa, 0xd6, 0xdd, 0xa4, 0xad, 0xb3, 0xfd, + 0x67, 0x85, 0x66, 0x57, 0xc4, 0xe2, 0x8f, 0x1a, 0x51, 0xc9, 0x2e, 0x28, 0x20, 0x51, 0x1a, 0x6d, + 0x43, 0x85, 0x50, 0x24, 0x6b, 0xb2, 0x7e, 0x76, 0x06, 0x76, 0x67, 0xb6, 0xb3, 0x63, 0x87, 0x14, + 0x85, 0x03, 0x37, 0x24, 0x84, 0x84, 0xb8, 0xa0, 0x7e, 0x02, 0xca, 0x81, 0x0f, 0xd2, 0x23, 0xdc, + 0x72, 0xea, 0x81, 0x13, 0x1f, 0xa1, 0x27, 0xb4, 0xb3, 0x3b, 0xf1, 0xc6, 0x7f, 0x93, 0x48, 0x1c, + 0xf7, 0xbd, 0xdf, 0xbc, 0xdf, 0x7b, 0x6f, 0xde, 0xfb, 0xcd, 0xa2, 0x6a, 0x87, 0xf3, 0x8e, 0x0f, + 0x4e, 0xc0, 0x19, 0x95, 0x5c, 0x50, 0xd6, 0x71, 0x7a, 0x35, 0xa7, 0x23, 0x78, 0x37, 0x6c, 0x46, + 0x20, 0x7a, 0xd4, 0x03, 0x3b, 0x14, 0x5c, 0x72, 0x7c, 0x29, 0x41, 0xda, 0x7d, 0xa4, 0xdd, 0xab, + 0x99, 0x4b, 0xe9, 0x79, 0x12, 0x52, 0x87, 0x30, 0xc6, 0x25, 0x91, 0x94, 0xb3, 0x28, 0x39, 0x63, + 0x5e, 0xc9, 0x78, 0x3d, 0x9f, 0x02, 0x93, 0xa9, 0xe3, 0x46, 0xc6, 0xd1, 0xa6, 0xe0, 0xb7, 0x9a, + 0x3b, 0xb0, 0x4b, 0x7a, 0x94, 0x8b, 0x14, 0x70, 0x33, 0x03, 0x48, 0x19, 0xa1, 0xd5, 0x14, 0x10, + 0xf1, 0xae, 0xd0, 0x29, 0x99, 0x57, 0x33, 0xa0, 0x01, 0xd7, 0xf2, 0xc8, 0xba, 0x3c, 0x1e, 0x04, + 0x9c, 0xa5, 0x90, 0xca, 0xf8, 0xd2, 0x53, 0xc4, 0x62, 0x8a, 0x50, 0x5f, 0x3b, 0xdd, 0xb6, 0x03, + 0x41, 0x28, 0xf7, 0x13, 0xa7, 0xf5, 0x22, 0x8f, 0x16, 0x3e, 0xa3, 0x91, 0xdc, 0x88, 0x0f, 0x44, + 0x2e, 0x3c, 0xeb, 0x42, 0x24, 0xf1, 0x3a, 0x9a, 0x61, 0x24, 0x80, 0xf2, 0xb9, 0x8a, 0x51, 0x2d, + 0x36, 0xee, 0xbe, 0xae, 0xe7, 0xde, 0xd4, 0x97, 0xf1, 0x8d, 0x4c, 0xd3, 0x92, 0x98, 0x24, 0xa4, + 0x91, 0xed, 0xf1, 0xc0, 0x51, 0xc7, 0x5d, 0x75, 0x08, 0xbb, 0x68, 0xc1, 0xdb, 0xa5, 0x7e, 0x4b, + 0x00, 0x6b, 0xf2, 0x76, 0x53, 0xa5, 0x52, 0xce, 0xa9, 0x48, 0xb7, 0xde, 0xd4, 0x97, 0xd1, 0xb4, + 0x30, 0x9f, 0xbc, 0xe5, 0xce, 0xeb, 0x00, 0x8f, 0xdb, 0xca, 0x84, 0xb7, 0x10, 0x26, 0xcc, 0x83, + 0x48, 0x72, 0x11, 0xf5, 0x83, 0xe6, 0x4f, 0x15, 0xf4, 0xe2, 0x51, 0x04, 0x1d, 0xf5, 0x4b, 0x74, + 0xa9, 0x05, 0x91, 0x07, 0xac, 0x45, 0x98, 0xcc, 0xc4, 0x9d, 0x39, 0x55, 0x5c, 0x9c, 0x89, 0xa1, + 0x23, 0x2f, 0xa2, 0x62, 0x48, 0x3a, 0xd0, 0x8c, 0xe8, 0x73, 0x28, 0xcf, 0x56, 0x8c, 0xea, 0xac, + 0x5b, 0x88, 0x0d, 0x4f, 0xe8, 0x73, 0xc0, 0xd7, 0x10, 0x52, 0x4e, 0xc9, 0xbf, 0x01, 0x56, 0x9e, + 0x8b, 0xc9, 0x5c, 0x05, 0xdf, 0x8a, 0x0d, 0x8d, 0x02, 0x9a, 0x6b, 0x53, 0x5f, 0x82, 0xb0, 0x38, + 0xc2, 0xd9, 0xbb, 0x89, 0x42, 0xce, 0x22, 0xc0, 0xef, 0xa0, 0xd9, 0x24, 0x4d, 0xa3, 0x92, 0xaf, + 0x96, 0xd6, 0x16, 0xed, 0x51, 0x23, 0x6d, 0x27, 0x37, 0x92, 0x20, 0xf1, 0x1d, 0x34, 0xcf, 0xe0, + 0x5b, 0xd9, 0xcc, 0xd0, 0xaa, 0x0b, 0x71, 0x2f, 0xc4, 0xe6, 0x4d, 0x4d, 0x6d, 0x7d, 0x8e, 0xe6, + 0x37, 0x20, 0xe1, 0x1b, 0x1c, 0x85, 0x7c, 0x76, 0x14, 0xd0, 0xc9, 0x46, 0xc1, 0xfa, 0xd3, 0x40, + 0xf8, 0xa1, 0x00, 0x22, 0x61, 0x64, 0xcc, 0x99, 0xb3, 0x8c, 0xd7, 0x7b, 0xba, 0xfc, 0xb8, 0x82, + 0xc9, 0xe5, 0x37, 0xf2, 0xaf, 0xeb, 0x39, 0xdd, 0x83, 0x9b, 0xe8, 0x42, 0x8f, 0xf8, 0xb4, 0x45, + 0x24, 0x34, 0x39, 0xf3, 0xf7, 0x55, 0x45, 0x05, 0xf7, 0xbc, 0x36, 0x3e, 0x66, 0xfe, 0xbe, 0xf5, + 0x0c, 0xe1, 0x2f, 0xc2, 0xd6, 0x60, 0xbe, 0xff, 0x2b, 0x25, 0x47, 0xf8, 0x23, 0xf0, 0x61, 0x4c, + 0x8b, 0xce, 0xd2, 0x76, 0xbc, 0x84, 0x8a, 0x02, 0xbc, 0xae, 0x88, 0x68, 0x2f, 0x69, 0x72, 0xc1, + 0xed, 0x1b, 0xac, 0x7f, 0x0d, 0x74, 0xe5, 0x68, 0xac, 0x1e, 0x41, 0xb0, 0x03, 0x62, 0xf2, 0xe2, + 0x9f, 0x94, 0xf6, 0xd8, 0xd0, 0xe7, 0x27, 0x0e, 0xfd, 0xcc, 0xc0, 0xd0, 0xe3, 0xcb, 0x7a, 0xe8, + 0xd5, 0xb6, 0x14, 0xdd, 0xf4, 0x0b, 0x3f, 0x40, 0x05, 0xca, 0x24, 0x88, 0x1e, 0xf1, 0xd5, 0xa6, + 0x94, 0xd6, 0xac, 0xd1, 0xdd, 0xdf, 0xa2, 0x01, 0x7c, 0x9a, 0x22, 0xdd, 0xa3, 0x33, 0xd6, 0x0b, + 0x03, 0x95, 0x87, 0x8b, 0x4d, 0x37, 0xe9, 0x7d, 0x74, 0x2e, 0x48, 0x4c, 0xe9, 0x2e, 0x5d, 0xd3, + 0xb1, 0x49, 0x48, 0xed, 0x47, 0x5a, 0xb0, 0xdd, 0x54, 0x94, 0x5d, 0x8d, 0x3e, 0xe9, 0x3e, 0xc5, + 0x45, 0x4b, 0x2e, 0x89, 0x9f, 0x6d, 0x49, 0x51, 0x59, 0xe2, 0x9e, 0xac, 0xfd, 0x56, 0x40, 0xe7, + 0x55, 0x62, 0x4f, 0x92, 0x37, 0x0a, 0xff, 0x64, 0x20, 0xd4, 0xdf, 0x78, 0x7c, 0x77, 0x74, 0xa9, + 0x43, 0x7a, 0x6d, 0x56, 0xa7, 0x03, 0x93, 0x92, 0xad, 0xb7, 0x0f, 0xeb, 0xea, 0xb2, 0x7e, 0xf8, + 0xfb, 0x9f, 0x5f, 0x73, 0xd7, 0xf1, 0x52, 0xfc, 0x58, 0x7c, 0x17, 0x1b, 0x3e, 0x0c, 0x05, 0xff, + 0x1a, 0x3c, 0x19, 0x39, 0x2b, 0x07, 0xc9, 0xf3, 0x11, 0xe1, 0x03, 0x54, 0xd0, 0x72, 0x80, 0x6f, + 0x8f, 0x19, 0xfa, 0xe3, 0x72, 0x61, 0x4e, 0xda, 0x0d, 0x6b, 0x35, 0x4b, 0x5e, 0xc1, 0xd7, 0x47, + 0x91, 0xa7, 0xdc, 0xce, 0xca, 0x01, 0xfe, 0xd9, 0x40, 0xa5, 0x8c, 0x7a, 0xe0, 0x31, 0x55, 0x0e, + 0x0b, 0xcc, 0xe4, 0x2c, 0x3e, 0x38, 0xac, 0xa3, 0x98, 0xf4, 0x9e, 0x62, 0x52, 0xb9, 0xdc, 0xb6, + 0x26, 0x36, 0xe2, 0x7e, 0xba, 0xcf, 0xbf, 0x18, 0xa8, 0x94, 0x91, 0x87, 0x71, 0x09, 0x0d, 0x2b, + 0xc8, 0xe4, 0x84, 0xd6, 0x0f, 0xeb, 0xb3, 0xfd, 0x5c, 0x56, 0xcd, 0x5b, 0x2a, 0x97, 0xe4, 0x09, + 0x1f, 0xdb, 0x1d, 0x9d, 0xd3, 0xf7, 0xa8, 0x94, 0x91, 0x8f, 0x71, 0x29, 0x0d, 0x2b, 0x8c, 0x79, + 0x59, 0x23, 0xf5, 0x7f, 0x81, 0xfd, 0x71, 0xfc, 0x5f, 0x30, 0x70, 0x49, 0x2b, 0xd3, 0x2e, 0xe9, + 0x77, 0x03, 0x5d, 0x1c, 0x5c, 0x30, 0xbc, 0x3a, 0x65, 0x1e, 0x8f, 0xab, 0x8e, 0x69, 0x9f, 0x14, + 0x9e, 0x0e, 0xf1, 0xbb, 0xd9, 0x14, 0xab, 0xf8, 0xce, 0xe4, 0x14, 0x9d, 0x74, 0x6b, 0xcd, 0x97, + 0xc6, 0xab, 0xfa, 0xd5, 0xb1, 0x52, 0xf6, 0x57, 0xfd, 0x47, 0x63, 0x57, 0xca, 0x30, 0xba, 0xef, + 0x38, 0x7b, 0x7b, 0x7b, 0x83, 0x42, 0x47, 0xba, 0x72, 0xd7, 0xf1, 0x7c, 0xde, 0x6d, 0xad, 0x86, + 0x3e, 0x91, 0x6d, 0x2e, 0x82, 0x7b, 0xd3, 0xe0, 0x7d, 0xae, 0x53, 0x40, 0x6d, 0x01, 0xa4, 0xd5, + 0x78, 0x69, 0xa0, 0xb2, 0xc7, 0x83, 0x91, 0x8d, 0x69, 0x2c, 0x64, 0x45, 0x63, 0x33, 0xbe, 0xbe, + 0x4d, 0xe3, 0xab, 0x07, 0x29, 0xb4, 0xc3, 0x7d, 0xc2, 0x3a, 0x36, 0x17, 0x1d, 0xa7, 0x03, 0x4c, + 0x5d, 0xae, 0xd3, 0x67, 0x3c, 0xfe, 0x9f, 0xb8, 0xde, 0xff, 0xfa, 0x23, 0x67, 0x6e, 0x24, 0x01, + 0x1e, 0xc6, 0x45, 0x6a, 0xf5, 0x8b, 0x19, 0x9f, 0xd6, 0x5e, 0x69, 0xe7, 0xb6, 0x72, 0x6e, 0xf7, + 0x9d, 0xdb, 0x4f, 0x6b, 0x3b, 0x73, 0x8a, 0xa4, 0xf6, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3c, + 0x8b, 0xf0, 0x28, 0x86, 0x0b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// GroupServiceClient is the client API for GroupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupServiceServer is the server API for GroupService service. +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations. +type UnimplementedGroupServiceServer struct { +} + +func (*UnimplementedGroupServiceServer) ListGroups(ctx context.Context, req *ListGroupsRequest) (*ListGroupsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented") +} +func (*UnimplementedGroupServiceServer) GetGroup(ctx context.Context, req *GetGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented") +} +func (*UnimplementedGroupServiceServer) CreateGroup(ctx context.Context, req *CreateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) UpdateGroup(ctx context.Context, req *UpdateGroupRequest) (*Group, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented") +} +func (*UnimplementedGroupServiceServer) DeleteGroup(ctx context.Context, req *DeleteGroupRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented") +} +func (*UnimplementedGroupServiceServer) ListGroupMembers(ctx context.Context, req *ListGroupMembersRequest) (*ListGroupMembersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented") +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go new file mode 100644 index 0000000000..be83f6d02e --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/distribution" + _ "google.golang.org/genproto/googleapis/api/label" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A single data point in a time series. +type Point struct { + // The time interval to which the data point applies. For `GAUGE` metrics, + // the start time is optional, but if it is supplied, it must equal the + // end time. For `DELTA` metrics, the start + // and end time should specify a non-zero interval, with subsequent points + // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` + // metrics, the start and end time should specify a non-zero interval, with + // subsequent points specifying the same start time and increasing end times, + // until an event resets the cumulative value to zero and sets a new start + // time for the following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_c76199a3d2c4c21e, []int{0} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *Point) GetValue() *TypedValue { + if m != nil { + return m.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` + // The associated monitored resource. Custom metrics can use only certain + // monitored resource types in their time series data. + Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Output only. The associated monitored resource metadata. When reading a + // a timeseries, this field will include metadata labels that are explicitly + // named in the reduction. When creating a timeseries, this field is ignored. + Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, points are + // returned in reverse time order. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_c76199a3d2c4c21e, []int{1} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetMetric() *metric.Metric { + if m != nil { + return m.Metric + } + return nil +} + +func (m *TimeSeries) GetResource() *monitoredres.MonitoredResource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +func init() { + proto.RegisterType((*Point)(nil), "google.monitoring.v3.Point") + proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries") +} + +func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor_c76199a3d2c4c21e) } + +var fileDescriptor_c76199a3d2c4c21e = []byte{ + // 441 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x49, 0xd7, 0xd6, 0x3a, 0x05, 0x2f, 0x06, 0xd1, 0x50, 0x59, 0xa8, 0x15, 0xb5, 0x78, + 0x91, 0x40, 0x03, 0x82, 0x08, 0x0b, 0xae, 0x8a, 0x8a, 0x2c, 0x94, 0x51, 0x7a, 0x21, 0x85, 0x32, + 0x4d, 0x0e, 0xe1, 0x60, 0xe6, 0x0f, 0x93, 0x69, 0x60, 0xaf, 0x7c, 0x18, 0xef, 0x7c, 0x14, 0x9f, + 0xc9, 0x0b, 0xc9, 0xcc, 0xa4, 0xed, 0x62, 0xb7, 0x77, 0xc9, 0x7c, 0xbf, 0xef, 0x7c, 0x67, 0xce, + 0x1c, 0xf2, 0xa4, 0x54, 0xaa, 0xac, 0x20, 0x15, 0x4a, 0xa2, 0x55, 0x06, 0x65, 0x99, 0x36, 0x59, + 0x2a, 0xc0, 0x1a, 0xcc, 0x13, 0x6d, 0x94, 0x55, 0xf4, 0x81, 0x47, 0x92, 0x3d, 0x92, 0x34, 0xd9, + 0xf8, 0x3c, 0x18, 0xb9, 0xc6, 0xb4, 0xc0, 0xda, 0x1a, 0xdc, 0x6c, 0x2d, 0x2a, 0xe9, 0x4d, 0xe3, + 0x87, 0x07, 0x72, 0xc5, 0x37, 0x50, 0x85, 0xf3, 0x47, 0x07, 0xe7, 0x87, 0x29, 0xe3, 0xa7, 0x87, + 0x82, 0x4f, 0x82, 0x62, 0x6d, 0xa0, 0x56, 0x5b, 0x93, 0x43, 0x80, 0x8e, 0x77, 0x9b, 0x2b, 0x21, + 0xba, 0xe0, 0xe9, 0x4f, 0xd2, 0x5f, 0x28, 0x94, 0x96, 0x5e, 0x90, 0x21, 0x4a, 0x0b, 0xa6, 0xe1, + 0x55, 0x1c, 0x4d, 0xa2, 0xd9, 0x68, 0x3e, 0x4d, 0x8e, 0xdd, 0x24, 0xf9, 0x86, 0x02, 0x3e, 0x07, + 0x92, 0xed, 0x3c, 0xf4, 0x15, 0xe9, 0x37, 0xbc, 0xda, 0x42, 0xdc, 0x73, 0xe6, 0xc9, 0x2d, 0xe6, + 0x6b, 0x0d, 0xc5, 0xb2, 0xe5, 0x98, 0xc7, 0xa7, 0x7f, 0x7b, 0x84, 0xb4, 0x25, 0xbf, 0x82, 0x41, + 0xa8, 0xe9, 0x4b, 0x32, 0xf0, 0xf7, 0x0c, 0x4d, 0xd0, 0xae, 0x0e, 0xd7, 0x98, 0x5c, 0x39, 0x85, + 0x05, 0x82, 0xbe, 0x26, 0xc3, 0xee, 0xc2, 0x21, 0xf5, 0xfc, 0x06, 0xdd, 0x8d, 0x85, 0x05, 0x88, + 0xed, 0x70, 0xfa, 0x96, 0x0c, 0x05, 0x58, 0x5e, 0x70, 0xcb, 0xe3, 0xbb, 0xce, 0xfa, 0xec, 0xa4, + 0xf5, 0x2a, 0xc0, 0x6c, 0x67, 0xa3, 0x9f, 0xc8, 0xc8, 0xf7, 0xb1, 0xfe, 0x81, 0xb2, 0x88, 0xcf, + 0x26, 0xd1, 0xec, 0xfe, 0xfc, 0xc5, 0xff, 0xed, 0xbe, 0x87, 0x3a, 0x37, 0xa8, 0xad, 0x32, 0xe1, + 0xe0, 0x0b, 0xca, 0x82, 0x11, 0xb1, 0xfb, 0xa6, 0x1f, 0x08, 0x71, 0xb3, 0x58, 0xdb, 0x6b, 0x0d, + 0xf1, 0x1d, 0x57, 0xe8, 0xf9, 0xc9, 0x42, 0x6e, 0x82, 0xed, 0x2c, 0xd9, 0xbd, 0xa6, 0xfb, 0xa4, + 0x19, 0x19, 0xe8, 0xf6, 0x29, 0xeb, 0xb8, 0x3f, 0x39, 0x9b, 0x8d, 0xe6, 0x8f, 0x8f, 0x3f, 0x81, + 0x7b, 0x6e, 0x16, 0xd0, 0xcb, 0x5f, 0x11, 0x89, 0x73, 0x25, 0x8e, 0xa2, 0x97, 0x23, 0x1f, 0xbc, + 0x68, 0x37, 0x65, 0x11, 0x7d, 0xbf, 0x08, 0x50, 0xa9, 0x2a, 0x2e, 0xcb, 0x44, 0x99, 0x32, 0x2d, + 0x41, 0xba, 0x3d, 0x4a, 0xbd, 0xc4, 0x35, 0xd6, 0x37, 0xb7, 0xed, 0xcd, 0xfe, 0xef, 0x77, 0x6f, + 0xfc, 0xd1, 0x17, 0x78, 0x57, 0xa9, 0x6d, 0xd1, 0x0d, 0xb9, 0xcd, 0x5a, 0x66, 0x7f, 0x3a, 0x71, + 0xe5, 0xc4, 0xd5, 0x5e, 0x5c, 0x2d, 0xb3, 0xcd, 0xc0, 0x85, 0x64, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x06, 0x37, 0xbb, 0x92, 0x7f, 0x03, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go new file mode 100644 index 0000000000..3f972bed67 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go @@ -0,0 +1,1406 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric_service.proto + +package monitoring + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + _ "github.com/golang/protobuf/ptypes/duration" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status1 "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Controls which fields are returned by `ListTimeSeries`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +var ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", +} + +var ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x)) +} + +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters) + // describing the descriptors to be returned. The filter can reference the + // descriptor's type and labels. For example, the following filter returns + // only Google Compute Engine descriptors that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMonitoredResourceDescriptorsRequest) Reset() { + *m = ListMonitoredResourceDescriptorsRequest{} +} +func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{0} +} + +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Marshal(b, m, deterministic) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Merge(m, src) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Size(m) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMonitoredResourceDescriptorsRequest proto.InternalMessageInfo + +func (m *ListMonitoredResourceDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMonitoredResourceDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMonitoredResourceDescriptorsResponse) Reset() { + *m = ListMonitoredResourceDescriptorsResponse{} +} +func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{1} +} + +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Marshal(b, m, deterministic) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Merge(m, src) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Size(m) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMonitoredResourceDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { + if m != nil { + return m.ResourceDescriptors + } + return nil +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + // Required. The monitored resource descriptor to get. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] + // + // The `[RESOURCE_TYPE]` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonitoredResourceDescriptorRequest{} } +func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{2} +} + +func (m *GetMonitoredResourceDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Unmarshal(m, b) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Merge(m, src) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Size(m) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMonitoredResourceDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMonitoredResourceDescriptorRequest proto.InternalMessageInfo + +func (m *GetMonitoredResourceDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} } +func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsRequest) ProtoMessage() {} +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{3} +} + +func (m *ListMetricDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMetricDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListMetricDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMetricDescriptorsRequest.Marshal(b, m, deterministic) +} +func (m *ListMetricDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsRequest.Merge(m, src) +} +func (m *ListMetricDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListMetricDescriptorsRequest.Size(m) +} +func (m *ListMetricDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsRequest proto.InternalMessageInfo + +func (m *ListMetricDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMetricDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} } +func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsResponse) ProtoMessage() {} +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{4} +} + +func (m *ListMetricDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMetricDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListMetricDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMetricDescriptorsResponse.Marshal(b, m, deterministic) +} +func (m *ListMetricDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsResponse.Merge(m, src) +} +func (m *ListMetricDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListMetricDescriptorsResponse.Size(m) +} +func (m *ListMetricDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { + if m != nil { + return m.MetricDescriptors + } + return nil +} + +func (m *ListMetricDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + // Required. The metric descriptor on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example value of `[METRIC_ID]` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} } +func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMetricDescriptorRequest) ProtoMessage() {} +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{5} +} + +func (m *GetMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *GetMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *GetMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMetricDescriptorRequest.Merge(m, src) +} +func (m *GetMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetMetricDescriptorRequest.Size(m) +} +func (m *GetMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMetricDescriptorRequest proto.InternalMessageInfo + +func (m *GetMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new [custom metric](https://cloud.google.com/monitoring/custom-metrics) + // descriptor. + MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} } +func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*CreateMetricDescriptorRequest) ProtoMessage() {} +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{6} +} + +func (m *CreateMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *CreateMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *CreateMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateMetricDescriptorRequest.Merge(m, src) +} +func (m *CreateMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_CreateMetricDescriptorRequest.Size(m) +} +func (m *CreateMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateMetricDescriptorRequest proto.InternalMessageInfo + +func (m *CreateMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + // Required. The metric descriptor on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] + // + // An example of `[METRIC_ID]` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} } +func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{7} +} + +func (m *DeleteMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *DeleteMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *DeleteMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMetricDescriptorRequest.Merge(m, src) +} +func (m *DeleteMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Size(m) +} +func (m *DeleteMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMetricDescriptorRequest proto.InternalMessageInfo + +func (m *DeleteMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + // Required. A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // that specifies which time series should be returned. The filter must + // specify a single metric type, and can additionally specify metric labels + // and other information. For example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.labels.instance_name = "my-instance-name" + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Required. The time interval for which results should be returned. Only time series + // that contain data points in the specified interval are included + // in the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series across specified labels. + // + // By default (if no `aggregation` is explicitly specified), the raw time + // series data is returned. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` + // Unsupported: must be left blank. The points in each time series are + // currently returned in reverse time order (most recent to oldest). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Required. Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. If + // `page_size` is empty or more than 100,000 results, the effective + // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the + // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is + // the maximum number of `TimeSeries` returned. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} } +func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesRequest) ProtoMessage() {} +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{8} +} + +func (m *ListTimeSeriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListTimeSeriesRequest.Unmarshal(m, b) +} +func (m *ListTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListTimeSeriesRequest.Marshal(b, m, deterministic) +} +func (m *ListTimeSeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListTimeSeriesRequest.Merge(m, src) +} +func (m *ListTimeSeriesRequest) XXX_Size() int { + return xxx_messageInfo_ListTimeSeriesRequest.Size(m) +} +func (m *ListTimeSeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListTimeSeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListTimeSeriesRequest proto.InternalMessageInfo + +func (m *ListTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListTimeSeriesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if m != nil { + return m.Aggregation + } + return nil +} + +func (m *ListTimeSeriesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if m != nil { + return m.View + } + return ListTimeSeriesRequest_FULL +} + +func (m *ListTimeSeriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTimeSeriesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `page_token` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. + ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} } +func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesResponse) ProtoMessage() {} +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{9} +} + +func (m *ListTimeSeriesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListTimeSeriesResponse.Unmarshal(m, b) +} +func (m *ListTimeSeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListTimeSeriesResponse.Marshal(b, m, deterministic) +} +func (m *ListTimeSeriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListTimeSeriesResponse.Merge(m, src) +} +func (m *ListTimeSeriesResponse) XXX_Size() int { + return xxx_messageInfo_ListTimeSeriesResponse.Size(m) +} +func (m *ListTimeSeriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListTimeSeriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListTimeSeriesResponse proto.InternalMessageInfo + +func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *ListTimeSeriesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { + if m != nil { + return m.ExecutionErrors + } + return nil +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + // + // The maximum number of `TimeSeries` objects per `Create` request is 200. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} } +func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesRequest) ProtoMessage() {} +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{10} +} + +func (m *CreateTimeSeriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesRequest.Unmarshal(m, b) +} +func (m *CreateTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesRequest.Marshal(b, m, deterministic) +} +func (m *CreateTimeSeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesRequest.Merge(m, src) +} +func (m *CreateTimeSeriesRequest) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesRequest.Size(m) +} +func (m *CreateTimeSeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesRequest proto.InternalMessageInfo + +func (m *CreateTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +// DEPRECATED. Used to hold per-time-series error status. +type CreateTimeSeriesError struct { + // DEPRECATED. Time series ID that resulted in the `status` error. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` // Deprecated: Do not use. + // DEPRECATED. The status of the requested write operation for `time_series`. + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} } +func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesError) ProtoMessage() {} +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{11} +} + +func (m *CreateTimeSeriesError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesError.Unmarshal(m, b) +} +func (m *CreateTimeSeriesError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesError.Marshal(b, m, deterministic) +} +func (m *CreateTimeSeriesError) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesError.Merge(m, src) +} +func (m *CreateTimeSeriesError) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesError.Size(m) +} +func (m *CreateTimeSeriesError) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesError.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesError proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +// Deprecated: Do not use. +func (m *CreateTimeSeriesError) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +// Summary of the result of a failed request to write data to a time series. +type CreateTimeSeriesSummary struct { + // The number of points in the request. + TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"` + // The number of points that were successfully written. + SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"` + // The number of points that failed to be written. Order is not guaranteed. + Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesSummary) Reset() { *m = CreateTimeSeriesSummary{} } +func (m *CreateTimeSeriesSummary) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesSummary) ProtoMessage() {} +func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{12} +} + +func (m *CreateTimeSeriesSummary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesSummary.Unmarshal(m, b) +} +func (m *CreateTimeSeriesSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesSummary.Marshal(b, m, deterministic) +} +func (m *CreateTimeSeriesSummary) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesSummary.Merge(m, src) +} +func (m *CreateTimeSeriesSummary) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesSummary.Size(m) +} +func (m *CreateTimeSeriesSummary) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesSummary.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesSummary proto.InternalMessageInfo + +func (m *CreateTimeSeriesSummary) GetTotalPointCount() int32 { + if m != nil { + return m.TotalPointCount + } + return 0 +} + +func (m *CreateTimeSeriesSummary) GetSuccessPointCount() int32 { + if m != nil { + return m.SuccessPointCount + } + return 0 +} + +func (m *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error { + if m != nil { + return m.Errors + } + return nil +} + +// Detailed information about an error category. +type CreateTimeSeriesSummary_Error struct { + // The status of the requested write operation. + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The number of points that couldn't be written because of `status`. + PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesSummary_Error) Reset() { *m = CreateTimeSeriesSummary_Error{} } +func (m *CreateTimeSeriesSummary_Error) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesSummary_Error) ProtoMessage() {} +func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{12, 0} +} + +func (m *CreateTimeSeriesSummary_Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesSummary_Error.Unmarshal(m, b) +} +func (m *CreateTimeSeriesSummary_Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesSummary_Error.Marshal(b, m, deterministic) +} +func (m *CreateTimeSeriesSummary_Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesSummary_Error.Merge(m, src) +} +func (m *CreateTimeSeriesSummary_Error) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesSummary_Error.Size(m) +} +func (m *CreateTimeSeriesSummary_Error) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesSummary_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesSummary_Error proto.InternalMessageInfo + +func (m *CreateTimeSeriesSummary_Error) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *CreateTimeSeriesSummary_Error) GetPointCount() int32 { + if m != nil { + return m.PointCount + } + return 0 +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView", ListTimeSeriesRequest_TimeSeriesView_name, ListTimeSeriesRequest_TimeSeriesView_value) + proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsRequest") + proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsResponse") + proto.RegisterType((*GetMonitoredResourceDescriptorRequest)(nil), "google.monitoring.v3.GetMonitoredResourceDescriptorRequest") + proto.RegisterType((*ListMetricDescriptorsRequest)(nil), "google.monitoring.v3.ListMetricDescriptorsRequest") + proto.RegisterType((*ListMetricDescriptorsResponse)(nil), "google.monitoring.v3.ListMetricDescriptorsResponse") + proto.RegisterType((*GetMetricDescriptorRequest)(nil), "google.monitoring.v3.GetMetricDescriptorRequest") + proto.RegisterType((*CreateMetricDescriptorRequest)(nil), "google.monitoring.v3.CreateMetricDescriptorRequest") + proto.RegisterType((*DeleteMetricDescriptorRequest)(nil), "google.monitoring.v3.DeleteMetricDescriptorRequest") + proto.RegisterType((*ListTimeSeriesRequest)(nil), "google.monitoring.v3.ListTimeSeriesRequest") + proto.RegisterType((*ListTimeSeriesResponse)(nil), "google.monitoring.v3.ListTimeSeriesResponse") + proto.RegisterType((*CreateTimeSeriesRequest)(nil), "google.monitoring.v3.CreateTimeSeriesRequest") + proto.RegisterType((*CreateTimeSeriesError)(nil), "google.monitoring.v3.CreateTimeSeriesError") + proto.RegisterType((*CreateTimeSeriesSummary)(nil), "google.monitoring.v3.CreateTimeSeriesSummary") + proto.RegisterType((*CreateTimeSeriesSummary_Error)(nil), "google.monitoring.v3.CreateTimeSeriesSummary.Error") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor_7b3d47b45a293957) +} + +var fileDescriptor_7b3d47b45a293957 = []byte{ + // 1483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcf, 0x73, 0xd3, 0xc6, + 0x17, 0xff, 0xae, 0x9d, 0x84, 0xb0, 0x1e, 0x20, 0x59, 0x20, 0x38, 0x86, 0x80, 0xd1, 0x77, 0x68, + 0x82, 0x31, 0x52, 0xc7, 0x2e, 0x65, 0x08, 0x0d, 0x33, 0xca, 0x0f, 0x68, 0x07, 0x68, 0x53, 0x9b, + 0x32, 0x53, 0xca, 0x8c, 0x47, 0x91, 0x37, 0x62, 0x5b, 0x4b, 0xab, 0xae, 0xd6, 0x09, 0x49, 0x86, + 0x4b, 0xff, 0x80, 0x5e, 0xda, 0x43, 0x7b, 0xeb, 0x99, 0x43, 0x67, 0x7a, 0xa2, 0xc3, 0xa1, 0x87, + 0x9e, 0x3a, 0x4c, 0x0f, 0x9d, 0xf6, 0xd4, 0x4c, 0x0f, 0x1c, 0x38, 0x71, 0xea, 0xb9, 0xa7, 0x8e, + 0x56, 0x92, 0x2d, 0xc9, 0xb2, 0x62, 0x27, 0xd3, 0x9b, 0xa5, 0xf7, 0xf6, 0xbd, 0xcf, 0xfb, 0xbc, + 0x7d, 0xbb, 0x1f, 0x0b, 0x5e, 0x34, 0x28, 0x35, 0x5a, 0x58, 0x31, 0xa9, 0x45, 0x38, 0x65, 0xc4, + 0x32, 0x94, 0x8d, 0xaa, 0x62, 0x62, 0xce, 0x88, 0xde, 0x70, 0x30, 0xdb, 0x20, 0x3a, 0x96, 0x6d, + 0x46, 0x39, 0x45, 0x27, 0x3c, 0x57, 0xb9, 0xeb, 0x2a, 0x6f, 0x54, 0x0b, 0x67, 0xfc, 0x00, 0x9a, + 0x4d, 0x14, 0xcd, 0xb2, 0x28, 0xd7, 0x38, 0xa1, 0x96, 0xe3, 0xad, 0x29, 0x9c, 0x0a, 0x59, 0xf5, + 0x16, 0xc1, 0x16, 0xf7, 0x0d, 0xe7, 0x42, 0x86, 0x75, 0x82, 0x5b, 0xcd, 0xc6, 0x1a, 0x7e, 0xa4, + 0x6d, 0x10, 0xca, 0x12, 0x56, 0x7a, 0x70, 0x7c, 0xc3, 0xff, 0xc3, 0x06, 0x0f, 0x0a, 0x6e, 0x36, + 0x18, 0x76, 0x68, 0x9b, 0x05, 0x58, 0x0b, 0xd3, 0x21, 0xa7, 0x98, 0xa9, 0x98, 0x58, 0xb1, 0xd6, + 0xc2, 0x2c, 0xc0, 0x76, 0x3e, 0xd1, 0x43, 0xa7, 0xa6, 0x49, 0xad, 0x54, 0x97, 0x08, 0xce, 0xb3, + 0xbe, 0x8b, 0x78, 0x5a, 0x6b, 0xaf, 0x2b, 0xcd, 0x36, 0x13, 0xdc, 0xf8, 0xf6, 0xd3, 0x71, 0x3b, + 0x36, 0x6d, 0xbe, 0x15, 0xab, 0x9e, 0xd9, 0xba, 0xe2, 0x70, 0x8d, 0xb7, 0x7d, 0x42, 0xa5, 0xdf, + 0x00, 0x9c, 0xbd, 0x43, 0x1c, 0x7e, 0x37, 0xa8, 0xbc, 0xe6, 0x57, 0xb7, 0x8c, 0x1d, 0x9d, 0x11, + 0x9b, 0x53, 0xe6, 0xd4, 0xf0, 0xe7, 0x6d, 0xec, 0x70, 0xf4, 0x21, 0x1c, 0xb1, 0x34, 0x13, 0xe7, + 0x47, 0x8b, 0x60, 0xee, 0xf0, 0xe2, 0xc2, 0x4b, 0x35, 0xf3, 0x8f, 0x7a, 0x15, 0x5d, 0x09, 0xf5, + 0xcf, 0xcb, 0xa2, 0xd9, 0xc4, 0x91, 0x75, 0x6a, 0x2a, 0x29, 0x41, 0x6b, 0x22, 0x14, 0x9a, 0x82, + 0x63, 0xeb, 0xa4, 0xc5, 0x31, 0xcb, 0x67, 0xdc, 0xa0, 0x35, 0xff, 0x09, 0x9d, 0x86, 0x87, 0x6d, + 0xcd, 0xc0, 0x0d, 0x87, 0x6c, 0xe3, 0x7c, 0xb6, 0x08, 0xe6, 0x46, 0x6b, 0xe3, 0xee, 0x8b, 0x3a, + 0xd9, 0xc6, 0x68, 0x06, 0x42, 0x61, 0xe4, 0xf4, 0x33, 0x6c, 0xe5, 0x47, 0xc4, 0x42, 0xe1, 0x7e, + 0xcf, 0x7d, 0x21, 0x7d, 0x0f, 0xe0, 0xdc, 0xde, 0x25, 0x39, 0x36, 0xb5, 0x1c, 0x8c, 0x1e, 0xc0, + 0x13, 0x41, 0x3f, 0x1b, 0xcd, 0xae, 0x3d, 0x0f, 0x8a, 0xd9, 0xb9, 0x5c, 0x65, 0xd6, 0xaf, 0x48, + 0xd6, 0x6c, 0x22, 0xa7, 0x55, 0x73, 0x9c, 0xf5, 0xe6, 0x40, 0x6f, 0xc0, 0x63, 0x16, 0x7e, 0xcc, + 0x1b, 0x21, 0xb0, 0x5e, 0x95, 0x47, 0xdc, 0xd7, 0xab, 0x1d, 0xc0, 0xdb, 0xf0, 0xc2, 0x2d, 0x9c, + 0x06, 0x37, 0xde, 0x80, 0x6c, 0xb8, 0x01, 0xf0, 0x20, 0x0d, 0x90, 0x9e, 0x03, 0x78, 0x46, 0x90, + 0x25, 0xb6, 0x5a, 0x42, 0xd3, 0x6f, 0x46, 0x9a, 0x5e, 0x11, 0x39, 0xcb, 0xa8, 0x94, 0x92, 0x33, + 0x16, 0xe9, 0x3f, 0xec, 0xf4, 0xd7, 0x00, 0xce, 0xf4, 0x01, 0xef, 0xb7, 0xf7, 0x36, 0x44, 0xfe, + 0xd9, 0xd3, 0xdb, 0xdc, 0x33, 0x91, 0xe6, 0xc6, 0x51, 0x4f, 0x9a, 0xf1, 0xa0, 0x03, 0xf7, 0xb3, + 0x09, 0x0b, 0x6e, 0x3f, 0xe3, 0x11, 0x63, 0x84, 0x66, 0xc3, 0x84, 0xc2, 0xa1, 0x09, 0x95, 0x9e, + 0x01, 0x38, 0xb3, 0xc4, 0xb0, 0xc6, 0xf1, 0x50, 0x99, 0xf6, 0xd1, 0xba, 0xf7, 0xe1, 0x64, 0x0f, + 0x89, 0xa2, 0xf2, 0x3d, 0x38, 0x5c, 0xcc, 0xbe, 0x54, 0x33, 0xb5, 0x89, 0x38, 0x91, 0x92, 0x01, + 0x67, 0x96, 0x71, 0x0b, 0x0f, 0x09, 0x7c, 0x1f, 0x14, 0xfd, 0x95, 0x85, 0x27, 0xdd, 0xfd, 0x71, + 0x8f, 0x98, 0xb8, 0x8e, 0x19, 0xc1, 0x9d, 0x5d, 0x7d, 0xcb, 0xcf, 0x00, 0x45, 0x86, 0xaa, 0xc8, + 0x70, 0x19, 0x5e, 0xd2, 0x5b, 0xb4, 0xdd, 0x0c, 0x46, 0xd9, 0xd4, 0x2c, 0xcd, 0xc0, 0x2c, 0x9e, + 0x6b, 0x95, 0xd1, 0x4f, 0xb1, 0xce, 0x7d, 0x6e, 0x4e, 0x47, 0xb7, 0xb5, 0x57, 0x72, 0xb0, 0xb7, + 0x97, 0xe0, 0x38, 0xb1, 0x38, 0x66, 0x1b, 0x5a, 0x4b, 0x6c, 0xde, 0x5c, 0x45, 0x92, 0x93, 0x2e, + 0x3d, 0xd9, 0x05, 0xf8, 0x9e, 0xef, 0xe9, 0x85, 0xe8, 0x2c, 0x44, 0x4b, 0x30, 0xa7, 0x19, 0x06, + 0xc3, 0x86, 0x38, 0xec, 0xc5, 0x1c, 0xe6, 0x2a, 0xe7, 0x93, 0xe3, 0xa8, 0x5d, 0xc7, 0x5a, 0x78, + 0x15, 0x9a, 0x86, 0xe3, 0x94, 0x35, 0x31, 0x6b, 0xac, 0x6d, 0xe5, 0xc7, 0xc4, 0x9e, 0x3d, 0x24, + 0x9e, 0x17, 0xb7, 0x50, 0x1d, 0x8e, 0x6c, 0x10, 0xbc, 0x99, 0x3f, 0x54, 0x04, 0x73, 0x47, 0x2b, + 0xf3, 0xc9, 0x81, 0x13, 0x59, 0x94, 0xbb, 0x6f, 0xee, 0x13, 0xbc, 0xe9, 0x01, 0x17, 0xc1, 0xa2, + 0x53, 0x3d, 0x9e, 0x3a, 0xd5, 0x87, 0xe3, 0x53, 0x3d, 0x0b, 0x8f, 0x46, 0x03, 0xa3, 0x71, 0x38, + 0x72, 0xf3, 0xa3, 0x3b, 0x77, 0x26, 0xfe, 0x87, 0x72, 0xf0, 0xd0, 0xbb, 0x2b, 0xea, 0xf2, 0x4a, + 0xad, 0x3e, 0x01, 0xa4, 0x9f, 0x01, 0x9c, 0x8a, 0x03, 0xf3, 0xe7, 0x5e, 0x85, 0x39, 0x4e, 0x4c, + 0xec, 0x2a, 0x0e, 0x82, 0x83, 0x81, 0x2f, 0xf6, 0x27, 0xdf, 0x5f, 0x0e, 0x79, 0xe7, 0xf7, 0xa0, + 0xd3, 0x8e, 0x16, 0xe0, 0x04, 0x7e, 0x8c, 0xf5, 0xb6, 0xcb, 0x73, 0x03, 0x33, 0xe6, 0x1e, 0x30, + 0x59, 0x91, 0x0f, 0x05, 0xf9, 0x98, 0xad, 0xcb, 0x75, 0x71, 0xeb, 0xd6, 0x8e, 0x75, 0x7c, 0x57, + 0x84, 0xab, 0xf4, 0x14, 0xc0, 0x53, 0xde, 0x18, 0xf7, 0xdf, 0xa5, 0xd9, 0x83, 0xee, 0xd2, 0x9b, + 0x51, 0x3a, 0x32, 0x83, 0xd1, 0xe1, 0x35, 0x34, 0xc4, 0x89, 0xf4, 0x25, 0x80, 0x27, 0xe3, 0x60, + 0x45, 0x1d, 0x68, 0x25, 0x4e, 0x38, 0x18, 0x28, 0x43, 0x26, 0x0f, 0x22, 0xa4, 0xcb, 0x70, 0xcc, + 0x93, 0x27, 0xfe, 0xf9, 0x92, 0x40, 0xa1, 0x58, 0xe3, 0x7b, 0x49, 0xdf, 0x64, 0x7a, 0xd9, 0xab, + 0xb7, 0x4d, 0x53, 0x63, 0x5b, 0xa8, 0x04, 0x27, 0x39, 0xe5, 0x5a, 0xab, 0x61, 0x53, 0x62, 0xf1, + 0x86, 0x4e, 0xdb, 0x16, 0x17, 0xc0, 0x46, 0x6b, 0xc7, 0x84, 0x61, 0xd5, 0x7d, 0xbf, 0xe4, 0xbe, + 0x46, 0x32, 0x3c, 0xee, 0xb4, 0x75, 0x1d, 0x3b, 0x4e, 0xc4, 0x3b, 0x23, 0xbc, 0x27, 0x7d, 0x53, + 0xc8, 0xff, 0x36, 0x1c, 0x8b, 0xb4, 0xba, 0x9a, 0x5c, 0x69, 0x1f, 0x68, 0xb2, 0xe0, 0xac, 0xe6, + 0x87, 0x28, 0xdc, 0x83, 0xa3, 0x1e, 0x89, 0xa5, 0x4e, 0xf5, 0xa0, 0x5f, 0xf5, 0x41, 0xe5, 0xe8, + 0x1c, 0xcc, 0xf5, 0x22, 0x85, 0x76, 0x07, 0x62, 0xe5, 0xa7, 0x23, 0xf0, 0x88, 0x77, 0x2e, 0xd6, + 0x3d, 0xd9, 0x8d, 0x5e, 0x01, 0x58, 0xdc, 0x4b, 0x18, 0xa1, 0x85, 0xfe, 0x07, 0xc0, 0x00, 0x1a, + 0xb1, 0x70, 0x63, 0xbf, 0xcb, 0xbd, 0xc1, 0x95, 0xd4, 0x5d, 0x55, 0x6c, 0xd9, 0x2f, 0xfe, 0x78, + 0xf5, 0x55, 0xe6, 0x2d, 0x54, 0x71, 0x65, 0xf0, 0x8e, 0xfb, 0x62, 0xc1, 0xf6, 0x76, 0xb4, 0xa3, + 0x94, 0x9e, 0x74, 0x75, 0x7a, 0x52, 0x05, 0xbf, 0x02, 0x78, 0x36, 0x5d, 0x4f, 0xa1, 0xeb, 0xc9, + 0x28, 0x07, 0x52, 0x61, 0x85, 0x41, 0x45, 0xa1, 0xb4, 0x1c, 0xae, 0xe5, 0x2a, 0xba, 0x92, 0x54, + 0x4b, 0x6a, 0x29, 0x4a, 0xa9, 0xf4, 0x04, 0xfd, 0x08, 0xbc, 0x4b, 0xac, 0x47, 0xe4, 0xa0, 0x4a, + 0x0a, 0xd7, 0x7d, 0xe4, 0x5c, 0xa1, 0x3a, 0xd4, 0x1a, 0xbf, 0x29, 0x6f, 0x87, 0x0b, 0xb9, 0x88, + 0x66, 0xfb, 0x34, 0xa5, 0x07, 0xe0, 0x77, 0x00, 0x1e, 0x4f, 0x50, 0x42, 0xe8, 0xcd, 0xfe, 0xf4, + 0x27, 0x2b, 0x82, 0x42, 0xaa, 0xce, 0x90, 0xae, 0x85, 0xf1, 0x95, 0x51, 0x29, 0x99, 0xe8, 0x38, + 0x3c, 0xc1, 0xee, 0x2f, 0x00, 0x4e, 0x25, 0xab, 0x28, 0x94, 0x3a, 0xd3, 0xfb, 0x03, 0xfa, 0xc9, + 0xae, 0x3a, 0xe5, 0xe2, 0x2a, 0xf7, 0xe8, 0x29, 0x01, 0xfd, 0x1d, 0x69, 0x50, 0x6a, 0xe7, 0x7b, + 0x15, 0x99, 0xcb, 0xf6, 0x54, 0xb2, 0xae, 0xea, 0x57, 0x4a, 0xaa, 0x0a, 0x2b, 0x4c, 0x05, 0x8b, + 0x82, 0x7f, 0x94, 0xf2, 0x8a, 0xfb, 0x8f, 0x32, 0xc6, 0x76, 0x69, 0x18, 0xb6, 0x7f, 0x00, 0xf0, + 0x68, 0xf4, 0xc6, 0x46, 0x97, 0x86, 0x10, 0x1c, 0x85, 0xf2, 0x60, 0xce, 0xfe, 0xb6, 0x5d, 0xde, + 0x55, 0xa7, 0x05, 0xdb, 0x9e, 0x1c, 0x2b, 0x07, 0x92, 0xaa, 0xec, 0x4a, 0x14, 0x81, 0x5e, 0x42, + 0xc5, 0x64, 0xc2, 0x43, 0x57, 0xd2, 0xb7, 0x00, 0x4e, 0xc4, 0xcf, 0x71, 0x74, 0x79, 0xb0, 0xf3, + 0x7e, 0x2f, 0x2a, 0x6f, 0xec, 0xaa, 0x13, 0x02, 0x61, 0xe8, 0xee, 0x14, 0xc0, 0x2e, 0x48, 0x7b, + 0x02, 0x9b, 0x07, 0xa5, 0xc2, 0x2e, 0x78, 0xa1, 0x4e, 0xf7, 0x95, 0xc5, 0xbf, 0xab, 0xcf, 0xc1, + 0x23, 0xce, 0x6d, 0x67, 0x5e, 0x51, 0x36, 0x37, 0x37, 0xe3, 0x12, 0x41, 0x6b, 0xf3, 0x47, 0x8a, + 0xd0, 0x12, 0x97, 0xed, 0x96, 0xc6, 0xd7, 0x29, 0x33, 0xcb, 0x7b, 0xb9, 0x77, 0x73, 0x0d, 0xe1, + 0x2a, 0x33, 0xac, 0x35, 0x87, 0xf1, 0xdf, 0x64, 0x84, 0xe3, 0xc5, 0x3f, 0x47, 0x5f, 0xab, 0x7f, + 0x83, 0x61, 0x64, 0x3f, 0xba, 0xde, 0xa1, 0x6a, 0xc7, 0xff, 0x95, 0x30, 0x3b, 0xca, 0x4e, 0xcf, + 0xec, 0x2c, 0xb8, 0x9b, 0x73, 0x85, 0x32, 0x43, 0xb3, 0xc8, 0xb6, 0xf7, 0xc1, 0x49, 0xd9, 0x09, + 0x3f, 0x0e, 0x11, 0xe6, 0xda, 0x3a, 0x6d, 0x35, 0xb1, 0x6b, 0xf7, 0x7e, 0x0c, 0xb1, 0x14, 0x94, + 0x8a, 0xe0, 0xb5, 0xfa, 0x2c, 0xb3, 0xcf, 0xbf, 0xf4, 0xe8, 0x83, 0xa4, 0xf2, 0xd3, 0xee, 0x97, + 0x9d, 0xde, 0x0f, 0x5e, 0x21, 0x4c, 0x4f, 0xd0, 0xc7, 0xe9, 0x94, 0x1c, 0x24, 0xf4, 0xdd, 0x5e, + 0x9a, 0x0e, 0x12, 0xce, 0xa5, 0x0e, 0xe6, 0x75, 0x6a, 0x26, 0x0e, 0xe6, 0x22, 0x8a, 0x28, 0xa1, + 0x55, 0x77, 0x0a, 0x57, 0xc1, 0x83, 0x1b, 0xbe, 0xaf, 0x41, 0x5b, 0x9a, 0x65, 0xc8, 0x94, 0x19, + 0x8a, 0x81, 0x2d, 0x31, 0xa3, 0x4a, 0x97, 0xf6, 0xe8, 0x47, 0xb9, 0xeb, 0xdd, 0xa7, 0xa7, 0x99, + 0xc2, 0x2d, 0x2f, 0xc0, 0x92, 0x3b, 0x45, 0x81, 0x16, 0x70, 0x53, 0xde, 0xaf, 0xbe, 0x08, 0x8c, + 0x0f, 0x85, 0xf1, 0x61, 0xd7, 0xf8, 0xf0, 0x7e, 0x75, 0x6d, 0x4c, 0x24, 0xa9, 0xfe, 0x1b, 0x00, + 0x00, 0xff, 0xff, 0xf4, 0x33, 0x56, 0x15, 0x2f, 0x15, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// MetricServiceClient is the client API for MetricService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type metricServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { + out := new(monitoredres.MonitoredResourceDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricServiceServer is the server API for MetricService service. +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics). + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be + // deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*empty.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*empty.Empty, error) +} + +// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricServiceServer struct { +} + +func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(ctx context.Context, req *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(ctx context.Context, req *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListMetricDescriptors(ctx context.Context, req *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented") +} +func (*UnimplementedMetricServiceServer) GetMetricDescriptor(ctx context.Context, req *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(ctx context.Context, req *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(ctx context.Context, req *DeleteMetricDescriptorRequest) (*empty.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented") +} +func (*UnimplementedMetricServiceServer) ListTimeSeries(ctx context.Context, req *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) { + return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented") +} +func (*UnimplementedMetricServiceServer) CreateTimeSeries(ctx context.Context, req *CreateTimeSeriesRequest) (*empty.Empty, error) { + return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented") +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go new file mode 100644 index 0000000000..95c3fd93b0 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/mutation_record.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Describes a change made to a configuration. +type MutationRecord struct { + // When the change occurred. + MutateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MutationRecord) Reset() { *m = MutationRecord{} } +func (m *MutationRecord) String() string { return proto.CompactTextString(m) } +func (*MutationRecord) ProtoMessage() {} +func (*MutationRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_83c24e690bdb9101, []int{0} +} + +func (m *MutationRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MutationRecord.Unmarshal(m, b) +} +func (m *MutationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MutationRecord.Marshal(b, m, deterministic) +} +func (m *MutationRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutationRecord.Merge(m, src) +} +func (m *MutationRecord) XXX_Size() int { + return xxx_messageInfo_MutationRecord.Size(m) +} +func (m *MutationRecord) XXX_DiscardUnknown() { + xxx_messageInfo_MutationRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_MutationRecord proto.InternalMessageInfo + +func (m *MutationRecord) GetMutateTime() *timestamp.Timestamp { + if m != nil { + return m.MutateTime + } + return nil +} + +func (m *MutationRecord) GetMutatedBy() string { + if m != nil { + return m.MutatedBy + } + return "" +} + +func init() { + proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor_83c24e690bdb9101) +} + +var fileDescriptor_83c24e690bdb9101 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33, + 0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c, + 0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c, + 0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, + 0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83, + 0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce, + 0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c, + 0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4, + 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4, + 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87, + 0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32, + 0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go new file mode 100644 index 0000000000..3935ddb468 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go @@ -0,0 +1,395 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + api "google.golang.org/genproto/googleapis/api" + _ "google.golang.org/genproto/googleapis/api/annotations" + label "google.golang.org/genproto/googleapis/api/label" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +var NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", +} + +var NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, +} + +func (x NotificationChannel_VerificationStatus) String() string { + return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x)) +} + +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4399f1e4bc1a75ef, []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + // The full REST resource name for this descriptor. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // The type of notification channel, such as "email", "sms", etc. + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` // Deprecated: Do not use. + // The product launch stage for channels of this type. + LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} } +func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) } +func (*NotificationChannelDescriptor) ProtoMessage() {} +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_4399f1e4bc1a75ef, []int{0} +} + +func (m *NotificationChannelDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationChannelDescriptor.Unmarshal(m, b) +} +func (m *NotificationChannelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationChannelDescriptor.Marshal(b, m, deterministic) +} +func (m *NotificationChannelDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationChannelDescriptor.Merge(m, src) +} +func (m *NotificationChannelDescriptor) XXX_Size() int { + return xxx_messageInfo_NotificationChannelDescriptor.Size(m) +} +func (m *NotificationChannelDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationChannelDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationChannelDescriptor proto.InternalMessageInfo + +func (m *NotificationChannelDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannelDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +// Deprecated: Do not use. +func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if m != nil { + return m.SupportedTiers + } + return nil +} + +func (m *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage { + if m != nil { + return m.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + // The type of the notification channel. This field matches the + // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The full REST resource name for this channel. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the + // `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *wrappers.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationChannel) Reset() { *m = NotificationChannel{} } +func (m *NotificationChannel) String() string { return proto.CompactTextString(m) } +func (*NotificationChannel) ProtoMessage() {} +func (*NotificationChannel) Descriptor() ([]byte, []int) { + return fileDescriptor_4399f1e4bc1a75ef, []int{1} +} + +func (m *NotificationChannel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationChannel.Unmarshal(m, b) +} +func (m *NotificationChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationChannel.Marshal(b, m, deterministic) +} +func (m *NotificationChannel) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationChannel.Merge(m, src) +} +func (m *NotificationChannel) XXX_Size() int { + return xxx_messageInfo_NotificationChannel.Size(m) +} +func (m *NotificationChannel) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationChannel.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationChannel proto.InternalMessageInfo + +func (m *NotificationChannel) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannel) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannel) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannel) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannel) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NotificationChannel) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if m != nil { + return m.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (m *NotificationChannel) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value) + proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor") + proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.UserLabelsEntry") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor_4399f1e4bc1a75ef) +} + +var fileDescriptor_4399f1e4bc1a75ef = []byte{ + // 759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcb, 0x6e, 0xd3, 0x40, + 0x14, 0xc5, 0x49, 0xfa, 0x1a, 0x57, 0x69, 0x99, 0x56, 0x60, 0x82, 0x0a, 0x69, 0x59, 0x10, 0x55, + 0xc2, 0x96, 0x12, 0x10, 0x34, 0x94, 0x4a, 0x79, 0x15, 0x22, 0xd1, 0x10, 0xe5, 0x85, 0x54, 0x55, + 0xb2, 0x1c, 0x67, 0xea, 0x0e, 0x38, 0x1e, 0x6b, 0xc6, 0x0e, 0x0a, 0x51, 0xff, 0x84, 0x05, 0x2b, + 0x24, 0xf8, 0x14, 0x3e, 0xa5, 0x2b, 0xd6, 0x2c, 0x10, 0xf2, 0x23, 0xb1, 0xd3, 0xb8, 0x90, 0x76, + 0x37, 0xf7, 0x9c, 0x7b, 0xcf, 0xdc, 0x99, 0x39, 0xd7, 0x06, 0x8f, 0x35, 0x42, 0x34, 0x1d, 0x49, + 0x7d, 0x62, 0x60, 0x8b, 0x50, 0x6c, 0x68, 0xd2, 0x20, 0x27, 0x19, 0xc4, 0xc2, 0xa7, 0x58, 0x55, + 0x2c, 0x4c, 0x0c, 0xd1, 0xa4, 0xc4, 0x22, 0x70, 0xd3, 0x4b, 0x14, 0x83, 0x44, 0x71, 0x90, 0x4b, + 0xdd, 0xf1, 0xcb, 0x15, 0x13, 0x4b, 0xba, 0xd2, 0x45, 0xba, 0x97, 0x9d, 0xda, 0x9a, 0xc2, 0x6d, + 0x43, 0x3d, 0x93, 0x99, 0xa5, 0x68, 0xc8, 0xa7, 0xef, 0x85, 0x68, 0x8a, 0x18, 0xb1, 0xa9, 0x3a, + 0xa6, 0xb6, 0x23, 0x1b, 0x52, 0x49, 0xbf, 0x3f, 0x6e, 0x25, 0xf5, 0xc0, 0x4f, 0x71, 0xa3, 0xae, + 0x7d, 0x2a, 0x7d, 0xa2, 0x8a, 0x69, 0x22, 0xca, 0x3c, 0x7e, 0xe7, 0x57, 0x02, 0x6c, 0xd5, 0x42, + 0x27, 0x28, 0x9d, 0x29, 0x86, 0x81, 0xf4, 0x32, 0x62, 0x2a, 0xc5, 0xa6, 0x45, 0x28, 0x84, 0x20, + 0x61, 0x28, 0x7d, 0x24, 0x2c, 0xa6, 0xb9, 0xcc, 0x4a, 0xc3, 0x5d, 0x3b, 0x98, 0x35, 0x34, 0x91, + 0xc0, 0x79, 0x98, 0xb3, 0x86, 0xdb, 0x60, 0xb5, 0x87, 0x99, 0xa9, 0x2b, 0x43, 0xd9, 0xcd, 0x8f, + 0xb9, 0x1c, 0xef, 0x63, 0x35, 0xa7, 0x2c, 0x0d, 0xf8, 0x9e, 0x2f, 0x8c, 0x89, 0x21, 0xc4, 0xfd, + 0x8c, 0x00, 0x82, 0x39, 0xb0, 0xe8, 0x5e, 0x0d, 0x13, 0x12, 0xe9, 0x78, 0x86, 0xcf, 0xde, 0x17, + 0xfd, 0xab, 0x54, 0x4c, 0x2c, 0xbe, 0x75, 0x98, 0xa0, 0xb3, 0x86, 0x9f, 0x0a, 0x6b, 0x60, 0x8d, + 0xd9, 0xa6, 0x49, 0xa8, 0x85, 0x7a, 0xb2, 0x85, 0x11, 0x65, 0xc2, 0x42, 0x3a, 0x9e, 0x49, 0x66, + 0xb7, 0xc5, 0xa8, 0x87, 0x10, 0x9b, 0x88, 0x0e, 0xb0, 0x8a, 0x5a, 0x18, 0xd1, 0x62, 0x4c, 0xe0, + 0x1a, 0xc9, 0x49, 0xb5, 0x03, 0x31, 0x98, 0x07, 0xab, 0xe1, 0x77, 0x10, 0x96, 0xd2, 0x5c, 0x26, + 0x99, 0xbd, 0x3b, 0xdd, 0x8a, 0xc3, 0x37, 0x1d, 0xba, 0xc1, 0xeb, 0x41, 0x90, 0xff, 0x1a, 0xbb, + 0x28, 0x7c, 0x89, 0x81, 0xe7, 0xa1, 0x1d, 0xbd, 0x32, 0xc5, 0xc4, 0x4c, 0x54, 0x49, 0x5f, 0xfa, + 0xf7, 0x6d, 0x1f, 0x9a, 0x94, 0x7c, 0x40, 0xaa, 0xc5, 0xa4, 0x91, 0xbf, 0x3a, 0x9f, 0x72, 0xd8, + 0x4c, 0x05, 0x93, 0x46, 0xaa, 0x87, 0xc9, 0xbd, 0x09, 0x78, 0x0e, 0xeb, 0x84, 0x6a, 0x8a, 0x81, + 0x3f, 0xbb, 0x45, 0x4c, 0x1a, 0x85, 0xc3, 0x9b, 0x29, 0x96, 0x4f, 0x89, 0xde, 0x43, 0x0e, 0xeb, + 0x2d, 0x6e, 0xa6, 0xc2, 0xed, 0xee, 0x7c, 0x5b, 0x02, 0x1b, 0x11, 0x97, 0x10, 0x69, 0xaa, 0x28, + 0xf3, 0x5d, 0x36, 0x5a, 0xfc, 0xbf, 0x46, 0x4b, 0xcc, 0x1a, 0xed, 0x68, 0x62, 0xb4, 0x05, 0xd7, + 0x68, 0xcf, 0xa2, 0xad, 0x12, 0xd1, 0xa7, 0x67, 0x43, 0x56, 0x31, 0x2c, 0x3a, 0x9c, 0x58, 0xf0, + 0x18, 0xf0, 0x36, 0x43, 0x54, 0xf6, 0x35, 0x97, 0x5d, 0xcd, 0xbd, 0xf9, 0x35, 0xdb, 0x0c, 0xd1, + 0xb0, 0x2e, 0xb0, 0x27, 0x00, 0xec, 0x83, 0x8d, 0x01, 0xa2, 0x93, 0x12, 0xc7, 0x94, 0x96, 0xcd, + 0x84, 0x15, 0xd7, 0x95, 0xfb, 0xf3, 0xef, 0xd1, 0x09, 0x89, 0x34, 0x5d, 0x8d, 0x06, 0x1c, 0xcc, + 0x60, 0xf0, 0x29, 0x58, 0x42, 0x86, 0xd2, 0xd5, 0x51, 0x4f, 0xe0, 0xd3, 0x5c, 0x86, 0xcf, 0xa6, + 0xc6, 0x5b, 0x8c, 0xbf, 0x21, 0x62, 0x91, 0x10, 0xbd, 0xa3, 0xe8, 0x36, 0x6a, 0x8c, 0x53, 0x53, + 0x7b, 0x80, 0x0f, 0xf5, 0x0f, 0xd7, 0x41, 0xfc, 0x23, 0x1a, 0xfa, 0x4f, 0xe9, 0x2c, 0xe1, 0x26, + 0x58, 0x18, 0x38, 0x25, 0xfe, 0x77, 0xc1, 0x0b, 0xf2, 0xb1, 0x17, 0x5c, 0xea, 0x15, 0x58, 0xbb, + 0x74, 0xfc, 0xeb, 0x94, 0xef, 0xbc, 0x07, 0x70, 0xf6, 0x64, 0xf0, 0x11, 0x78, 0xd8, 0xa9, 0x34, + 0xaa, 0x87, 0xd5, 0x52, 0xa1, 0x55, 0x7d, 0x57, 0x93, 0x9b, 0xad, 0x42, 0xab, 0xdd, 0x94, 0xdb, + 0xb5, 0x66, 0xbd, 0x52, 0xaa, 0x1e, 0x56, 0x2b, 0xe5, 0xf5, 0x5b, 0x30, 0x09, 0x40, 0xbb, 0xe6, + 0xa5, 0x55, 0xca, 0xeb, 0x1c, 0x5c, 0x05, 0xcb, 0x93, 0x28, 0x96, 0xff, 0xc3, 0x5d, 0x14, 0x7e, + 0x73, 0xe0, 0xc9, 0xb5, 0x46, 0x19, 0x1e, 0xcc, 0x37, 0xc0, 0x4c, 0x1a, 0x85, 0x51, 0xd9, 0x9f, + 0x95, 0x73, 0xf8, 0xe6, 0xba, 0x83, 0x7b, 0xa5, 0xd2, 0xfe, 0x3c, 0x03, 0x7b, 0x65, 0x35, 0xb7, + 0x5b, 0xfc, 0xce, 0x01, 0x41, 0x25, 0xfd, 0x48, 0x87, 0x15, 0x6f, 0x87, 0x0f, 0x5f, 0x77, 0x9c, + 0x51, 0xe7, 0x8e, 0x0f, 0xfc, 0x54, 0x8d, 0xe8, 0x8a, 0xa1, 0x89, 0x84, 0x6a, 0x92, 0x86, 0x0c, + 0xd7, 0x37, 0x52, 0x70, 0x77, 0xd3, 0xff, 0xab, 0x97, 0x41, 0xf4, 0x23, 0x96, 0x7a, 0xed, 0x09, + 0x94, 0x74, 0x62, 0xf7, 0xc4, 0xa3, 0x60, 0xc7, 0x4e, 0xee, 0xe7, 0x98, 0x3c, 0x71, 0xc9, 0x93, + 0x80, 0x3c, 0xe9, 0xe4, 0xba, 0x8b, 0xee, 0x26, 0xb9, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, + 0xba, 0x27, 0x74, 0xa4, 0x07, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go new file mode 100644 index 0000000000..9cff827943 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go @@ -0,0 +1,1377 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification_service.proto + +package monitoring + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "github.com/golang/protobuf/ptypes/struct" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + // Required. The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // Note that this names the parent container in which to look for the + // descriptors; to retrieve a single descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelDescriptorsRequest) Reset() { + *m = ListNotificationChannelDescriptorsRequest{} +} +func (m *ListNotificationChannelDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{0} +} + +func (m *ListNotificationChannelDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Merge(m, src) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Size(m) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelDescriptorsRequest proto.InternalMessageInfo + +func (m *ListNotificationChannelDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelDescriptorsResponse) Reset() { + *m = ListNotificationChannelDescriptorsResponse{} +} +func (m *ListNotificationChannelDescriptorsResponse) String() string { + return proto.CompactTextString(m) +} +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{1} +} + +func (m *ListNotificationChannelDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Merge(m, src) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Size(m) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelDescriptorsResponse proto.InternalMessageInfo + +func (m *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if m != nil { + return m.ChannelDescriptors + } + return nil +} + +func (m *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + // Required. The channel type for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelDescriptorRequest) Reset() { + *m = GetNotificationChannelDescriptorRequest{} +} +func (m *GetNotificationChannelDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{2} +} + +func (m *GetNotificationChannelDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelDescriptorRequest.Merge(m, src) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Size(m) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelDescriptorRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container into which the channel will be + // written, this does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Required. The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNotificationChannelRequest) Reset() { *m = CreateNotificationChannelRequest{} } +func (m *CreateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNotificationChannelRequest) ProtoMessage() {} +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{3} +} + +func (m *CreateNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNotificationChannelRequest.Unmarshal(m, b) +} +func (m *CreateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *CreateNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNotificationChannelRequest.Merge(m, src) +} +func (m *CreateNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_CreateNotificationChannelRequest.Size(m) +} +func (m *CreateNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNotificationChannelRequest proto.InternalMessageInfo + +func (m *CreateNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + // Required. The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER] + // + // This names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelsRequest) Reset() { *m = ListNotificationChannelsRequest{} } +func (m *ListNotificationChannelsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsRequest) ProtoMessage() {} +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{4} +} + +func (m *ListNotificationChannelsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelsRequest.Unmarshal(m, b) +} +func (m *ListNotificationChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelsRequest.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelsRequest.Merge(m, src) +} +func (m *ListNotificationChannelsRequest) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelsRequest.Size(m) +} +func (m *ListNotificationChannelsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelsRequest proto.InternalMessageInfo + +func (m *ListNotificationChannelsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelsResponse) Reset() { *m = ListNotificationChannelsResponse{} } +func (m *ListNotificationChannelsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsResponse) ProtoMessage() {} +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{5} +} + +func (m *ListNotificationChannelsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelsResponse.Unmarshal(m, b) +} +func (m *ListNotificationChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelsResponse.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelsResponse.Merge(m, src) +} +func (m *ListNotificationChannelsResponse) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelsResponse.Size(m) +} +func (m *ListNotificationChannelsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelsResponse proto.InternalMessageInfo + +func (m *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *ListNotificationChannelsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelRequest) Reset() { *m = GetNotificationChannelRequest{} } +func (m *GetNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelRequest) ProtoMessage() {} +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{6} +} + +func (m *GetNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelRequest.Merge(m, src) +} +func (m *GetNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelRequest.Size(m) +} +func (m *GetNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + // The fields to update. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateNotificationChannelRequest) Reset() { *m = UpdateNotificationChannelRequest{} } +func (m *UpdateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNotificationChannelRequest) ProtoMessage() {} +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{7} +} + +func (m *UpdateNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateNotificationChannelRequest.Unmarshal(m, b) +} +func (m *UpdateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *UpdateNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateNotificationChannelRequest.Merge(m, src) +} +func (m *UpdateNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_UpdateNotificationChannelRequest.Size(m) +} +func (m *UpdateNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateNotificationChannelRequest proto.InternalMessageInfo + +func (m *UpdateNotificationChannelRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + // Required. The channel for which to execute the request. The format is: + // + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNotificationChannelRequest) Reset() { *m = DeleteNotificationChannelRequest{} } +func (m *DeleteNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNotificationChannelRequest) ProtoMessage() {} +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{8} +} + +func (m *DeleteNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNotificationChannelRequest.Unmarshal(m, b) +} +func (m *DeleteNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *DeleteNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNotificationChannelRequest.Merge(m, src) +} +func (m *DeleteNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_DeleteNotificationChannelRequest.Size(m) +} +func (m *DeleteNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNotificationChannelRequest proto.InternalMessageInfo + +func (m *DeleteNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteNotificationChannelRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + // Required. The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendNotificationChannelVerificationCodeRequest) Reset() { + *m = SendNotificationChannelVerificationCodeRequest{} +} +func (m *SendNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{9} +} + +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Unmarshal(m, b) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Merge(m, src) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Size() int { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Size(m) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendNotificationChannelVerificationCodeRequest proto.InternalMessageInfo + +func (m *SendNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + // Required. The notification channel for which a verification code is to be generated + // and retrieved. This must name a channel that is already verified; if + // the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelVerificationCodeRequest) Reset() { + *m = GetNotificationChannelVerificationCodeRequest{} +} +func (m *GetNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{10} +} + +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Merge(m, src) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Size(m) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelVerificationCodeRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamp.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelVerificationCodeResponse) Reset() { + *m = GetNotificationChannelVerificationCodeResponse{} +} +func (m *GetNotificationChannelVerificationCodeResponse) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{11} +} + +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Unmarshal(m, b) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Merge(m, src) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Size(m) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelVerificationCodeResponse proto.InternalMessageInfo + +func (m *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamp.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + // Required. The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifyNotificationChannelRequest) Reset() { *m = VerifyNotificationChannelRequest{} } +func (m *VerifyNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*VerifyNotificationChannelRequest) ProtoMessage() {} +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{12} +} + +func (m *VerifyNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifyNotificationChannelRequest.Unmarshal(m, b) +} +func (m *VerifyNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifyNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *VerifyNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyNotificationChannelRequest.Merge(m, src) +} +func (m *VerifyNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_VerifyNotificationChannelRequest.Size(m) +} +func (m *VerifyNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyNotificationChannelRequest proto.InternalMessageInfo + +func (m *VerifyNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VerifyNotificationChannelRequest) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func init() { + proto.RegisterType((*ListNotificationChannelDescriptorsRequest)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsRequest") + proto.RegisterType((*ListNotificationChannelDescriptorsResponse)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsResponse") + proto.RegisterType((*GetNotificationChannelDescriptorRequest)(nil), "google.monitoring.v3.GetNotificationChannelDescriptorRequest") + proto.RegisterType((*CreateNotificationChannelRequest)(nil), "google.monitoring.v3.CreateNotificationChannelRequest") + proto.RegisterType((*ListNotificationChannelsRequest)(nil), "google.monitoring.v3.ListNotificationChannelsRequest") + proto.RegisterType((*ListNotificationChannelsResponse)(nil), "google.monitoring.v3.ListNotificationChannelsResponse") + proto.RegisterType((*GetNotificationChannelRequest)(nil), "google.monitoring.v3.GetNotificationChannelRequest") + proto.RegisterType((*UpdateNotificationChannelRequest)(nil), "google.monitoring.v3.UpdateNotificationChannelRequest") + proto.RegisterType((*DeleteNotificationChannelRequest)(nil), "google.monitoring.v3.DeleteNotificationChannelRequest") + proto.RegisterType((*SendNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.SendNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeResponse)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeResponse") + proto.RegisterType((*VerifyNotificationChannelRequest)(nil), "google.monitoring.v3.VerifyNotificationChannelRequest") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/notification_service.proto", fileDescriptor_7e2bcd7194b305fe) +} + +var fileDescriptor_7e2bcd7194b305fe = []byte{ + // 1210 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x41, 0x6f, 0xdc, 0x44, + 0x14, 0xd6, 0xec, 0x26, 0x69, 0xf2, 0x22, 0x84, 0x34, 0x0d, 0xe9, 0xee, 0xb6, 0x55, 0x2d, 0x1f, + 0x9a, 0x74, 0x95, 0xd8, 0x52, 0x96, 0x06, 0x9a, 0xa8, 0xb4, 0xde, 0x24, 0xad, 0x40, 0x14, 0x45, + 0x4e, 0x89, 0x04, 0x8a, 0x58, 0x39, 0xf6, 0xec, 0xc6, 0xcd, 0xae, 0xc7, 0x78, 0x66, 0x93, 0xa6, + 0x55, 0x91, 0x68, 0x25, 0x10, 0x67, 0x7e, 0x05, 0x3d, 0x20, 0x2e, 0x08, 0x09, 0x71, 0x42, 0x9c, + 0x72, 0x42, 0x70, 0x0b, 0x20, 0xf5, 0xd0, 0x13, 0xe2, 0x17, 0x70, 0x42, 0x1e, 0x7b, 0xb3, 0xce, + 0xc6, 0xde, 0xb5, 0xb3, 0xe5, 0xb6, 0x9e, 0xf7, 0x66, 0xde, 0xfb, 0xbe, 0xf7, 0xcd, 0xbc, 0x97, + 0x80, 0xda, 0xa0, 0xb4, 0xd1, 0x24, 0x6a, 0x8b, 0x3a, 0x36, 0xa7, 0x9e, 0xed, 0x34, 0xd4, 0xbd, + 0x8a, 0xea, 0x50, 0x6e, 0xd7, 0x6d, 0xd3, 0xe0, 0x36, 0x75, 0x6a, 0x8c, 0x78, 0x7b, 0xb6, 0x49, + 0x14, 0xd7, 0xa3, 0x9c, 0xe2, 0xa9, 0x60, 0x83, 0xd2, 0xdd, 0xa0, 0xec, 0x55, 0x4a, 0x97, 0xc2, + 0x63, 0x0c, 0xd7, 0x56, 0x0d, 0xc7, 0xa1, 0x5c, 0x6c, 0x65, 0xc1, 0x9e, 0xd2, 0x85, 0x88, 0xd5, + 0x6c, 0xda, 0xc4, 0xe1, 0xa1, 0xe1, 0x4a, 0xc4, 0x50, 0xb7, 0x49, 0xd3, 0xaa, 0x6d, 0x93, 0x1d, + 0x63, 0xcf, 0xa6, 0x5e, 0xe8, 0x50, 0x8c, 0x38, 0x78, 0x84, 0xd1, 0xb6, 0xd7, 0x49, 0xa4, 0x34, + 0x33, 0x30, 0xf3, 0xd0, 0xf1, 0x62, 0xe8, 0x28, 0xbe, 0xb6, 0xdb, 0x75, 0x95, 0xb4, 0x5c, 0x7e, + 0x10, 0x1a, 0xa5, 0x5e, 0x63, 0x90, 0x46, 0xcb, 0x60, 0xbb, 0xa1, 0xc7, 0xa5, 0x5e, 0x0f, 0xc6, + 0xbd, 0xb6, 0xd9, 0x8b, 0xe0, 0xd8, 0xca, 0xed, 0x16, 0x61, 0xdc, 0x68, 0xb9, 0x81, 0x83, 0xfc, + 0x13, 0x82, 0x6b, 0xef, 0xdb, 0x8c, 0x7f, 0x10, 0x49, 0x6c, 0x65, 0xc7, 0x70, 0x1c, 0xd2, 0x5c, + 0x25, 0xcc, 0xf4, 0x6c, 0x97, 0x53, 0x8f, 0xe9, 0xe4, 0xd3, 0x36, 0x61, 0x1c, 0x6f, 0xc0, 0x88, + 0x63, 0xb4, 0x48, 0x61, 0x44, 0x42, 0xb3, 0x13, 0xd5, 0x5b, 0x2f, 0xb4, 0xdc, 0xbf, 0xda, 0x0d, + 0xfc, 0x56, 0x84, 0xec, 0x20, 0x9e, 0xe1, 0xda, 0x4c, 0x31, 0x69, 0x4b, 0xed, 0x7b, 0xac, 0x2e, + 0x0e, 0xc3, 0x17, 0x61, 0xc2, 0x35, 0x1a, 0xa4, 0xc6, 0xec, 0x47, 0xa4, 0x90, 0x93, 0xd0, 0xec, + 0xa8, 0x3e, 0xee, 0x2f, 0x6c, 0xd8, 0x8f, 0x08, 0xbe, 0x0c, 0x20, 0x8c, 0x9c, 0xee, 0x12, 0xa7, + 0x90, 0xf7, 0xe3, 0xea, 0xc2, 0xfd, 0xbe, 0xbf, 0x20, 0xff, 0x88, 0xa0, 0x9c, 0x26, 0x7d, 0xe6, + 0x52, 0x87, 0x11, 0x6c, 0xc1, 0x79, 0x33, 0xb0, 0xd6, 0xac, 0xae, 0xb9, 0x80, 0xa4, 0xfc, 0xec, + 0xe4, 0x42, 0x45, 0x89, 0xd3, 0x8e, 0xd2, 0x1f, 0x02, 0x36, 0x4f, 0x45, 0xc3, 0x57, 0xe1, 0x75, + 0x87, 0x3c, 0xe4, 0xb5, 0x48, 0xe2, 0x39, 0x91, 0xf8, 0x6b, 0xfe, 0xf2, 0xfa, 0x71, 0xf2, 0x9f, + 0xc1, 0xcc, 0x5d, 0xd2, 0x3f, 0xf5, 0x5e, 0xe2, 0xf3, 0x51, 0xe2, 0x61, 0x38, 0xe2, 0xe5, 0x5f, + 0x11, 0x48, 0x2b, 0x1e, 0x31, 0x38, 0x89, 0xf1, 0xee, 0x44, 0x7e, 0xf7, 0x44, 0xe4, 0xeb, 0x22, + 0xb2, 0x8a, 0xe7, 0x33, 0x45, 0x0e, 0x0b, 0x6d, 0xc0, 0xd4, 0x89, 0x9b, 0x1b, 0x52, 0x27, 0xc8, + 0x99, 0x5c, 0xb8, 0x96, 0x9a, 0xfe, 0x6a, 0xfe, 0x85, 0x96, 0xd3, 0xcf, 0x3b, 0xa7, 0x2d, 0xf2, + 0x1f, 0x08, 0xae, 0x24, 0xe8, 0x81, 0xf5, 0x22, 0x1a, 0x1d, 0x1e, 0xd1, 0x34, 0x8c, 0xd5, 0xed, + 0x26, 0x27, 0x5e, 0x61, 0x4c, 0x14, 0x38, 0xfc, 0xc2, 0x45, 0x18, 0xa7, 0x9e, 0x45, 0xbc, 0xda, + 0xf6, 0x41, 0xe1, 0x9c, 0xb0, 0x9c, 0x13, 0xdf, 0xd5, 0x83, 0x93, 0x6a, 0xcf, 0xf7, 0x55, 0xfb, + 0x48, 0xaf, 0xda, 0x9f, 0x23, 0x90, 0x92, 0xd1, 0x85, 0x1a, 0xff, 0x04, 0xde, 0x88, 0x63, 0x99, + 0x15, 0xf2, 0x42, 0xe5, 0xe9, 0x69, 0xd6, 0xa7, 0x62, 0x18, 0x4e, 0xaf, 0xee, 0x07, 0x70, 0x39, + 0x5e, 0xdd, 0x7d, 0x95, 0x05, 0x67, 0xa9, 0x83, 0xfc, 0x33, 0x02, 0xe9, 0x43, 0xd7, 0xea, 0xaf, + 0xe4, 0x65, 0x98, 0x6c, 0x0b, 0x1f, 0xf1, 0x7c, 0x86, 0xaa, 0x2b, 0x75, 0xe8, 0xe8, 0xbc, 0x90, + 0xca, 0x1d, 0xff, 0x85, 0xbd, 0x67, 0xb0, 0x5d, 0x1d, 0x02, 0x77, 0xff, 0x77, 0xa2, 0x76, 0xf3, + 0xaf, 0x4e, 0xbb, 0xcf, 0x10, 0x48, 0xab, 0xa4, 0x49, 0xb2, 0x5f, 0xc7, 0x33, 0x91, 0x86, 0xa7, + 0x60, 0xb4, 0x4e, 0x3d, 0x33, 0xb8, 0x08, 0xe3, 0x7a, 0xf0, 0x21, 0x3f, 0x06, 0x65, 0x83, 0x38, + 0x56, 0xcc, 0xb6, 0x4d, 0xe2, 0x75, 0x97, 0xa8, 0x45, 0x7a, 0x53, 0x42, 0xc3, 0xd7, 0xf1, 0x07, + 0x04, 0xf3, 0xf1, 0xa2, 0xf9, 0xff, 0x83, 0xfb, 0xfa, 0x20, 0x0f, 0x5d, 0xdb, 0x23, 0x35, 0xbf, + 0x49, 0x26, 0xea, 0xe3, 0x7e, 0xa7, 0x83, 0xea, 0x10, 0xb8, 0xfb, 0x0b, 0xf2, 0xe7, 0x08, 0x94, + 0xb4, 0x99, 0x87, 0x17, 0x15, 0xc3, 0x88, 0x49, 0xad, 0x30, 0x75, 0x5d, 0xfc, 0x1e, 0x2e, 0x87, + 0x2f, 0x10, 0x48, 0x22, 0xda, 0x41, 0x0a, 0x01, 0xbd, 0x02, 0xc2, 0x2e, 0x84, 0x00, 0xc4, 0xf5, + 0x0f, 0x84, 0x2d, 0x16, 0x16, 0xfe, 0xc6, 0x50, 0x8a, 0xd9, 0xb6, 0x11, 0x4c, 0x6a, 0xf8, 0x1f, + 0x04, 0xf2, 0xe0, 0xa6, 0x8d, 0x6f, 0xc5, 0x5f, 0xaa, 0xd4, 0xd3, 0x4a, 0xe9, 0xf6, 0xd9, 0x0f, + 0x08, 0x4a, 0x24, 0xaf, 0x1c, 0x69, 0x02, 0xea, 0xd3, 0xdf, 0x5f, 0x7e, 0x9d, 0x5b, 0xc4, 0x6f, + 0xfa, 0x23, 0xdc, 0x63, 0x7f, 0xe1, 0xa6, 0xeb, 0xd1, 0x07, 0xc4, 0xe4, 0x4c, 0x2d, 0x3f, 0x51, + 0x9d, 0xfe, 0x38, 0xfe, 0x42, 0x20, 0x0d, 0xea, 0xf3, 0xf8, 0x66, 0x7c, 0xae, 0x29, 0xe7, 0x83, + 0xd2, 0x59, 0x66, 0x17, 0x79, 0x2d, 0x8a, 0xee, 0x6d, 0xbc, 0x18, 0x87, 0x6e, 0x00, 0x38, 0xb5, + 0xfc, 0x04, 0xff, 0x82, 0xa0, 0x90, 0xd4, 0x95, 0xf0, 0xf5, 0x4c, 0x35, 0x38, 0x2e, 0xdd, 0x62, + 0xd6, 0x6d, 0x61, 0xc1, 0x6e, 0x44, 0x21, 0xcd, 0xe1, 0x72, 0xea, 0x82, 0x31, 0xfc, 0x3d, 0x82, + 0xe9, 0x78, 0xba, 0x71, 0x25, 0x4b, 0x71, 0x3a, 0x10, 0xd2, 0xf7, 0x04, 0x79, 0x39, 0x9a, 0xb5, + 0x82, 0xe7, 0xd2, 0x16, 0x42, 0xd0, 0xff, 0x12, 0x41, 0x31, 0x71, 0x8a, 0xc3, 0x09, 0x44, 0x0e, + 0x1a, 0xfb, 0xb2, 0x64, 0x6f, 0x1d, 0x69, 0x45, 0x3f, 0xd9, 0xb9, 0xb8, 0x06, 0x29, 0x20, 0x55, + 0xe5, 0x0c, 0x85, 0x58, 0x8a, 0x6d, 0xb4, 0xf8, 0x59, 0x0e, 0x8a, 0x89, 0x2d, 0x3e, 0x09, 0xe6, + 0xa0, 0x99, 0x20, 0x0b, 0xcc, 0xa7, 0xe8, 0x48, 0x93, 0x22, 0x03, 0x44, 0x32, 0xdc, 0x8f, 0x16, + 0xb4, 0x00, 0x6e, 0x8c, 0x83, 0x92, 0xb2, 0xac, 0x09, 0x2c, 0x7c, 0x8b, 0xa0, 0x98, 0x38, 0x23, + 0x24, 0xb1, 0x30, 0x68, 0xa8, 0x28, 0x4d, 0x9f, 0x6a, 0x30, 0x6b, 0xfe, 0xdf, 0xa0, 0xb2, 0x76, + 0xa4, 0x81, 0xa8, 0xac, 0x18, 0x0d, 0x02, 0x75, 0x96, 0xb3, 0xa9, 0xf3, 0x4f, 0x04, 0x33, 0x29, + 0xe7, 0x09, 0xbc, 0x1a, 0x9f, 0x7e, 0xb6, 0x71, 0x24, 0x11, 0xcc, 0x46, 0xf4, 0x92, 0xdd, 0x91, + 0xb5, 0x2c, 0x30, 0x96, 0x18, 0x71, 0xac, 0xde, 0x80, 0x4b, 0xa8, 0x8c, 0xbf, 0xcc, 0xc1, 0xd5, + 0x74, 0x6d, 0x1f, 0xaf, 0x64, 0x79, 0x44, 0x92, 0xc0, 0xad, 0x0e, 0x77, 0x48, 0xf8, 0x4a, 0xea, + 0x51, 0x2a, 0xd6, 0xe4, 0xdb, 0x99, 0xa8, 0x68, 0x10, 0x1e, 0xc7, 0xc4, 0x21, 0x82, 0x62, 0xe2, + 0xf0, 0x91, 0xa4, 0xcc, 0x41, 0xd3, 0x4a, 0x96, 0xfb, 0xf9, 0xde, 0x91, 0x36, 0x21, 0xc4, 0xea, + 0x8f, 0x20, 0x41, 0x4b, 0x93, 0x2b, 0x99, 0x90, 0xed, 0x89, 0x6c, 0x96, 0x50, 0xb9, 0xf4, 0x0d, + 0x3a, 0xd4, 0x8a, 0x89, 0x23, 0xd1, 0x6f, 0xda, 0x57, 0x68, 0x87, 0x73, 0x97, 0x2d, 0xa9, 0xea, + 0xfe, 0xfe, 0x7e, 0xef, 0xc0, 0x64, 0xb4, 0xf9, 0x8e, 0x6a, 0x36, 0x69, 0xdb, 0x9a, 0x77, 0x9b, + 0x06, 0xaf, 0x53, 0xaf, 0x35, 0x37, 0xc8, 0xbd, 0x1b, 0x2b, 0x83, 0xab, 0xe2, 0x11, 0xc3, 0xaa, + 0x7e, 0x87, 0xa0, 0x60, 0xd2, 0x56, 0x2c, 0x51, 0xd5, 0x42, 0x94, 0xa9, 0x70, 0xfe, 0x5a, 0xf7, + 0xef, 0xc5, 0x3a, 0xfa, 0xf8, 0x9d, 0x70, 0x47, 0x83, 0x36, 0x0d, 0xa7, 0xa1, 0x50, 0xaf, 0xa1, + 0x36, 0x88, 0x23, 0x6e, 0x8d, 0xda, 0x0d, 0x7c, 0xf2, 0x1f, 0x58, 0xcb, 0xdd, 0xaf, 0xe7, 0xb9, + 0xd2, 0xdd, 0xe0, 0x80, 0x15, 0x1f, 0xab, 0x72, 0xaf, 0x1b, 0x78, 0xb3, 0x72, 0xd8, 0x31, 0x6e, + 0x09, 0xe3, 0x56, 0xd7, 0xb8, 0xb5, 0x59, 0xd9, 0x1e, 0x13, 0x41, 0x2a, 0xff, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x4f, 0x09, 0xed, 0xc9, 0xde, 0x13, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// NotificationChannelServiceClient is the client API for NotificationChannelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotificationChannelServiceServer is the server API for NotificationChannelService service. +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or PagerDuty service. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations. +type UnimplementedNotificationChannelServiceServer struct { +} + +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(ctx context.Context, req *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(ctx context.Context, req *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(ctx context.Context, req *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(ctx context.Context, req *GetNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(ctx context.Context, req *CreateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(ctx context.Context, req *UpdateNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(ctx context.Context, req *DeleteNotificationChannelRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(ctx context.Context, req *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(ctx context.Context, req *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented") +} +func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(ctx context.Context, req *VerifyNotificationChannelRequest) (*NotificationChannel, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented") +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go new file mode 100644 index 0000000000..498a65f13e --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service.pb.go @@ -0,0 +1,1553 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/service.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + _ "github.com/golang/protobuf/ptypes/timestamp" + _ "google.golang.org/genproto/googleapis/api/annotations" + _ "google.golang.org/genproto/googleapis/api/monitoredres" + calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `ServiceLevelObjective.View` determines what form of +// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`, +// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs. +type ServiceLevelObjective_View int32 + +const ( + // Same as FULL. + ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0 + // Return the embedded `ServiceLevelIndicator` in the form in which it was + // defined. If it was defined using a `BasicSli`, return that `BasicSli`. + ServiceLevelObjective_FULL ServiceLevelObjective_View = 2 + // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead + // return the `ServiceLevelIndicator` with its mode of computation fully + // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using + // `RequestBasedSli` or `WindowsBasedSli`, return the + // `ServiceLevelIndicator` as it was provided. + ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1 +) + +var ServiceLevelObjective_View_name = map[int32]string{ + 0: "VIEW_UNSPECIFIED", + 2: "FULL", + 1: "EXPLICIT", +} + +var ServiceLevelObjective_View_value = map[string]int32{ + "VIEW_UNSPECIFIED": 0, + "FULL": 2, + "EXPLICIT": 1, +} + +func (x ServiceLevelObjective_View) String() string { + return proto.EnumName(ServiceLevelObjective_View_name, int32(x)) +} + +func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{1, 0} +} + +// A `Service` is a discrete, autonomous, and network-accessible unit, designed +// to solve an individual concern +// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In +// Stackdriver Monitoring, a `Service` acts as the root resource under which +// operational aspects of the service are accessible. +type Service struct { + // Resource name for this Service. Of the form + // `projects/{project_id}/services/{service_id}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this Service. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // REQUIRED. Service-identifying atoms specifying the underlying service. + // + // Types that are valid to be assigned to Identifier: + // *Service_Custom_ + // *Service_AppEngine_ + // *Service_CloudEndpoints_ + // *Service_ClusterIstio_ + Identifier isService_Identifier `protobuf_oneof:"identifier"` + // Configuration for how to query telemetry on a Service. + Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{0} +} + +func (m *Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service.Unmarshal(m, b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return xxx_messageInfo_Service.Size(m) +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Service) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isService_Identifier interface { + isService_Identifier() +} + +type Service_Custom_ struct { + Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"` +} + +type Service_AppEngine_ struct { + AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"` +} + +type Service_CloudEndpoints_ struct { + CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"` +} + +type Service_ClusterIstio_ struct { + ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"` +} + +func (*Service_Custom_) isService_Identifier() {} + +func (*Service_AppEngine_) isService_Identifier() {} + +func (*Service_CloudEndpoints_) isService_Identifier() {} + +func (*Service_ClusterIstio_) isService_Identifier() {} + +func (m *Service) GetIdentifier() isService_Identifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Service) GetCustom() *Service_Custom { + if x, ok := m.GetIdentifier().(*Service_Custom_); ok { + return x.Custom + } + return nil +} + +func (m *Service) GetAppEngine() *Service_AppEngine { + if x, ok := m.GetIdentifier().(*Service_AppEngine_); ok { + return x.AppEngine + } + return nil +} + +func (m *Service) GetCloudEndpoints() *Service_CloudEndpoints { + if x, ok := m.GetIdentifier().(*Service_CloudEndpoints_); ok { + return x.CloudEndpoints + } + return nil +} + +func (m *Service) GetClusterIstio() *Service_ClusterIstio { + if x, ok := m.GetIdentifier().(*Service_ClusterIstio_); ok { + return x.ClusterIstio + } + return nil +} + +func (m *Service) GetTelemetry() *Service_Telemetry { + if m != nil { + return m.Telemetry + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Service) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Service_Custom_)(nil), + (*Service_AppEngine_)(nil), + (*Service_CloudEndpoints_)(nil), + (*Service_ClusterIstio_)(nil), + } +} + +// Custom view of service telemetry. Currently a place-holder pending final +// design. +type Service_Custom struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service_Custom) Reset() { *m = Service_Custom{} } +func (m *Service_Custom) String() string { return proto.CompactTextString(m) } +func (*Service_Custom) ProtoMessage() {} +func (*Service_Custom) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{0, 0} +} + +func (m *Service_Custom) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service_Custom.Unmarshal(m, b) +} +func (m *Service_Custom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service_Custom.Marshal(b, m, deterministic) +} +func (m *Service_Custom) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service_Custom.Merge(m, src) +} +func (m *Service_Custom) XXX_Size() int { + return xxx_messageInfo_Service_Custom.Size(m) +} +func (m *Service_Custom) XXX_DiscardUnknown() { + xxx_messageInfo_Service_Custom.DiscardUnknown(m) +} + +var xxx_messageInfo_Service_Custom proto.InternalMessageInfo + +// App Engine service. Learn more at https://cloud.google.com/appengine. +type Service_AppEngine struct { + // The ID of the App Engine module underlying this service. Corresponds to + // the `module_id` resource label in the `gae_app` monitored resource: + // https://cloud.google.com/monitoring/api/resources#tag_gae_app + ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service_AppEngine) Reset() { *m = Service_AppEngine{} } +func (m *Service_AppEngine) String() string { return proto.CompactTextString(m) } +func (*Service_AppEngine) ProtoMessage() {} +func (*Service_AppEngine) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{0, 1} +} + +func (m *Service_AppEngine) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service_AppEngine.Unmarshal(m, b) +} +func (m *Service_AppEngine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service_AppEngine.Marshal(b, m, deterministic) +} +func (m *Service_AppEngine) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service_AppEngine.Merge(m, src) +} +func (m *Service_AppEngine) XXX_Size() int { + return xxx_messageInfo_Service_AppEngine.Size(m) +} +func (m *Service_AppEngine) XXX_DiscardUnknown() { + xxx_messageInfo_Service_AppEngine.DiscardUnknown(m) +} + +var xxx_messageInfo_Service_AppEngine proto.InternalMessageInfo + +func (m *Service_AppEngine) GetModuleId() string { + if m != nil { + return m.ModuleId + } + return "" +} + +// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints. +type Service_CloudEndpoints struct { + // The name of the Cloud Endpoints service underlying this service. + // Corresponds to the `service` resource label in the `api` monitored + // resource: https://cloud.google.com/monitoring/api/resources#tag_api + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service_CloudEndpoints) Reset() { *m = Service_CloudEndpoints{} } +func (m *Service_CloudEndpoints) String() string { return proto.CompactTextString(m) } +func (*Service_CloudEndpoints) ProtoMessage() {} +func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{0, 2} +} + +func (m *Service_CloudEndpoints) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service_CloudEndpoints.Unmarshal(m, b) +} +func (m *Service_CloudEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service_CloudEndpoints.Marshal(b, m, deterministic) +} +func (m *Service_CloudEndpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service_CloudEndpoints.Merge(m, src) +} +func (m *Service_CloudEndpoints) XXX_Size() int { + return xxx_messageInfo_Service_CloudEndpoints.Size(m) +} +func (m *Service_CloudEndpoints) XXX_DiscardUnknown() { + xxx_messageInfo_Service_CloudEndpoints.DiscardUnknown(m) +} + +var xxx_messageInfo_Service_CloudEndpoints proto.InternalMessageInfo + +func (m *Service_CloudEndpoints) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +// Istio service. Learn more at http://istio.io. +type Service_ClusterIstio struct { + // The location of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `location` resource label in `k8s_cluster` + // resources. + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The name of the Kubernetes cluster in which this Istio service is + // defined. Corresponds to the `cluster_name` resource label in + // `k8s_cluster` resources. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + // The namespace of the Istio service underlying this service. Corresponds + // to the `destination_service_namespace` metric label in Istio metrics. + ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"` + // The name of the Istio service underlying this service. Corresponds to the + // `destination_service_name` metric label in Istio metrics. + ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service_ClusterIstio) Reset() { *m = Service_ClusterIstio{} } +func (m *Service_ClusterIstio) String() string { return proto.CompactTextString(m) } +func (*Service_ClusterIstio) ProtoMessage() {} +func (*Service_ClusterIstio) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{0, 3} +} + +func (m *Service_ClusterIstio) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service_ClusterIstio.Unmarshal(m, b) +} +func (m *Service_ClusterIstio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service_ClusterIstio.Marshal(b, m, deterministic) +} +func (m *Service_ClusterIstio) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service_ClusterIstio.Merge(m, src) +} +func (m *Service_ClusterIstio) XXX_Size() int { + return xxx_messageInfo_Service_ClusterIstio.Size(m) +} +func (m *Service_ClusterIstio) XXX_DiscardUnknown() { + xxx_messageInfo_Service_ClusterIstio.DiscardUnknown(m) +} + +var xxx_messageInfo_Service_ClusterIstio proto.InternalMessageInfo + +func (m *Service_ClusterIstio) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *Service_ClusterIstio) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *Service_ClusterIstio) GetServiceNamespace() string { + if m != nil { + return m.ServiceNamespace + } + return "" +} + +func (m *Service_ClusterIstio) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +// Configuration for how to query telemetry on a Service. +type Service_Telemetry struct { + // The full name of the resource that defines this service. Formatted as + // described in https://cloud.google.com/apis/design/resource_names. + ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Service_Telemetry) Reset() { *m = Service_Telemetry{} } +func (m *Service_Telemetry) String() string { return proto.CompactTextString(m) } +func (*Service_Telemetry) ProtoMessage() {} +func (*Service_Telemetry) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{0, 4} +} + +func (m *Service_Telemetry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Service_Telemetry.Unmarshal(m, b) +} +func (m *Service_Telemetry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Service_Telemetry.Marshal(b, m, deterministic) +} +func (m *Service_Telemetry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service_Telemetry.Merge(m, src) +} +func (m *Service_Telemetry) XXX_Size() int { + return xxx_messageInfo_Service_Telemetry.Size(m) +} +func (m *Service_Telemetry) XXX_DiscardUnknown() { + xxx_messageInfo_Service_Telemetry.DiscardUnknown(m) +} + +var xxx_messageInfo_Service_Telemetry proto.InternalMessageInfo + +func (m *Service_Telemetry) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +// A Service-Level Objective (SLO) describes a level of desired good service. It +// consists of a service-level indicator (SLI), a performance goal, and a period +// over which the objective is to be evaluated against that goal. The SLO can +// use SLIs defined in a number of different manners. Typical SLOs might include +// "99% of requests in each rolling week have latency below 200 milliseconds" or +// "99.5% of requests in each calendar month return successfully." +type ServiceLevelObjective struct { + // Resource name for this `ServiceLevelObjective`. + // Of the form + // `projects/{project_id}/services/{service_id}/serviceLevelObjectives/{slo_name}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Name used for UI elements listing this SLO. + DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The definition of good service, used to measure and calculate the quality + // of the `Service`'s performance with respect to a single aspect of service + // quality. + ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"` + // The fraction of service that must be good in order for this objective to be + // met. `0 < goal <= 0.999`. + Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"` + // The time period over which the objective will be evaluated. + // + // Types that are valid to be assigned to Period: + // *ServiceLevelObjective_RollingPeriod + // *ServiceLevelObjective_CalendarPeriod + Period isServiceLevelObjective_Period `protobuf_oneof:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceLevelObjective) Reset() { *m = ServiceLevelObjective{} } +func (m *ServiceLevelObjective) String() string { return proto.CompactTextString(m) } +func (*ServiceLevelObjective) ProtoMessage() {} +func (*ServiceLevelObjective) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{1} +} + +func (m *ServiceLevelObjective) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceLevelObjective.Unmarshal(m, b) +} +func (m *ServiceLevelObjective) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceLevelObjective.Marshal(b, m, deterministic) +} +func (m *ServiceLevelObjective) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceLevelObjective.Merge(m, src) +} +func (m *ServiceLevelObjective) XXX_Size() int { + return xxx_messageInfo_ServiceLevelObjective.Size(m) +} +func (m *ServiceLevelObjective) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceLevelObjective.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceLevelObjective proto.InternalMessageInfo + +func (m *ServiceLevelObjective) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ServiceLevelObjective) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator { + if m != nil { + return m.ServiceLevelIndicator + } + return nil +} + +func (m *ServiceLevelObjective) GetGoal() float64 { + if m != nil { + return m.Goal + } + return 0 +} + +type isServiceLevelObjective_Period interface { + isServiceLevelObjective_Period() +} + +type ServiceLevelObjective_RollingPeriod struct { + RollingPeriod *duration.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"` +} + +type ServiceLevelObjective_CalendarPeriod struct { + CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"` +} + +func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {} + +func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {} + +func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period { + if m != nil { + return m.Period + } + return nil +} + +func (m *ServiceLevelObjective) GetRollingPeriod() *duration.Duration { + if x, ok := m.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok { + return x.RollingPeriod + } + return nil +} + +func (m *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod { + if x, ok := m.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok { + return x.CalendarPeriod + } + return calendarperiod.CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ServiceLevelObjective) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ServiceLevelObjective_RollingPeriod)(nil), + (*ServiceLevelObjective_CalendarPeriod)(nil), + } +} + +// A Service-Level Indicator (SLI) describes the "performance" of a service. For +// some services, the SLI is well-defined. In such cases, the SLI can be +// described easily by referencing the well-known SLI and providing the needed +// parameters. Alternatively, a "custom" SLI can be defined with a query to the +// underlying metric store. An SLI is defined to be `good_service / +// total_service` over any queried time interval. The value of performance +// always falls into the range `0 <= performance <= 1`. A custom SLI describes +// how to compute this ratio, whether this is by dividing values from a pair of +// time series, cutting a `Distribution` into good and bad counts, or counting +// time windows in which the service complies with a criterion. For separation +// of concerns, a single Service-Level Indicator measures performance for only +// one aspect of service quality, such as fraction of successful queries or +// fast-enough queries. +type ServiceLevelIndicator struct { + // Service level indicators can be grouped by whether the "unit" of service + // being measured is based on counts of good requests or on counts of good + // time windows + // + // Types that are valid to be assigned to Type: + // *ServiceLevelIndicator_BasicSli + // *ServiceLevelIndicator_RequestBased + // *ServiceLevelIndicator_WindowsBased + Type isServiceLevelIndicator_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceLevelIndicator) Reset() { *m = ServiceLevelIndicator{} } +func (m *ServiceLevelIndicator) String() string { return proto.CompactTextString(m) } +func (*ServiceLevelIndicator) ProtoMessage() {} +func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{2} +} + +func (m *ServiceLevelIndicator) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceLevelIndicator.Unmarshal(m, b) +} +func (m *ServiceLevelIndicator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceLevelIndicator.Marshal(b, m, deterministic) +} +func (m *ServiceLevelIndicator) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceLevelIndicator.Merge(m, src) +} +func (m *ServiceLevelIndicator) XXX_Size() int { + return xxx_messageInfo_ServiceLevelIndicator.Size(m) +} +func (m *ServiceLevelIndicator) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceLevelIndicator.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceLevelIndicator proto.InternalMessageInfo + +type isServiceLevelIndicator_Type interface { + isServiceLevelIndicator_Type() +} + +type ServiceLevelIndicator_BasicSli struct { + BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"` +} + +type ServiceLevelIndicator_RequestBased struct { + RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"` +} + +type ServiceLevelIndicator_WindowsBased struct { + WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"` +} + +func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {} + +func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {} + +func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ServiceLevelIndicator) GetBasicSli() *BasicSli { + if x, ok := m.GetType().(*ServiceLevelIndicator_BasicSli); ok { + return x.BasicSli + } + return nil +} + +func (m *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli { + if x, ok := m.GetType().(*ServiceLevelIndicator_RequestBased); ok { + return x.RequestBased + } + return nil +} + +func (m *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli { + if x, ok := m.GetType().(*ServiceLevelIndicator_WindowsBased); ok { + return x.WindowsBased + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ServiceLevelIndicator) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ServiceLevelIndicator_BasicSli)(nil), + (*ServiceLevelIndicator_RequestBased)(nil), + (*ServiceLevelIndicator_WindowsBased)(nil), + } +} + +// An SLI measuring performance on a well-known service type. Performance will +// be computed on the basis of pre-defined metrics. The type of the +// `service_resource` determines the metrics to use and the +// `service_resource.labels` and `metric_labels` are used to construct a +// monitoring filter to filter that metric down to just the data relevant to +// this service. +type BasicSli struct { + // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from + // other methods will not be used to calculate performance for this SLI. If + // omitted, this SLI applies to all the Service's methods. For service types + // that don't support breaking down by method, setting this field will result + // in an error. + Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"` + // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry + // from other locations will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all locations in which the Service has + // activity. For service types that don't support breaking down by location, + // setting this field will result in an error. + Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"` + // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry + // from other API versions will not be used to calculate performance for this + // SLI. If omitted, this SLI applies to all API versions. For service types + // that don't support breaking down by version, setting this field will result + // in an error. + Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"` + // This SLI can be evaluated on the basis of availability or latency. + // + // Types that are valid to be assigned to SliCriteria: + // *BasicSli_Availability + // *BasicSli_Latency + SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BasicSli) Reset() { *m = BasicSli{} } +func (m *BasicSli) String() string { return proto.CompactTextString(m) } +func (*BasicSli) ProtoMessage() {} +func (*BasicSli) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{3} +} + +func (m *BasicSli) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BasicSli.Unmarshal(m, b) +} +func (m *BasicSli) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BasicSli.Marshal(b, m, deterministic) +} +func (m *BasicSli) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicSli.Merge(m, src) +} +func (m *BasicSli) XXX_Size() int { + return xxx_messageInfo_BasicSli.Size(m) +} +func (m *BasicSli) XXX_DiscardUnknown() { + xxx_messageInfo_BasicSli.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicSli proto.InternalMessageInfo + +func (m *BasicSli) GetMethod() []string { + if m != nil { + return m.Method + } + return nil +} + +func (m *BasicSli) GetLocation() []string { + if m != nil { + return m.Location + } + return nil +} + +func (m *BasicSli) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type isBasicSli_SliCriteria interface { + isBasicSli_SliCriteria() +} + +type BasicSli_Availability struct { + Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"` +} + +type BasicSli_Latency struct { + Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"` +} + +func (*BasicSli_Availability) isBasicSli_SliCriteria() {} + +func (*BasicSli_Latency) isBasicSli_SliCriteria() {} + +func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria { + if m != nil { + return m.SliCriteria + } + return nil +} + +func (m *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria { + if x, ok := m.GetSliCriteria().(*BasicSli_Availability); ok { + return x.Availability + } + return nil +} + +func (m *BasicSli) GetLatency() *BasicSli_LatencyCriteria { + if x, ok := m.GetSliCriteria().(*BasicSli_Latency); ok { + return x.Latency + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*BasicSli) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*BasicSli_Availability)(nil), + (*BasicSli_Latency)(nil), + } +} + +// Future parameters for the availability SLI. +type BasicSli_AvailabilityCriteria struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BasicSli_AvailabilityCriteria) Reset() { *m = BasicSli_AvailabilityCriteria{} } +func (m *BasicSli_AvailabilityCriteria) String() string { return proto.CompactTextString(m) } +func (*BasicSli_AvailabilityCriteria) ProtoMessage() {} +func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{3, 0} +} + +func (m *BasicSli_AvailabilityCriteria) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BasicSli_AvailabilityCriteria.Unmarshal(m, b) +} +func (m *BasicSli_AvailabilityCriteria) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BasicSli_AvailabilityCriteria.Marshal(b, m, deterministic) +} +func (m *BasicSli_AvailabilityCriteria) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicSli_AvailabilityCriteria.Merge(m, src) +} +func (m *BasicSli_AvailabilityCriteria) XXX_Size() int { + return xxx_messageInfo_BasicSli_AvailabilityCriteria.Size(m) +} +func (m *BasicSli_AvailabilityCriteria) XXX_DiscardUnknown() { + xxx_messageInfo_BasicSli_AvailabilityCriteria.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicSli_AvailabilityCriteria proto.InternalMessageInfo + +// Parameters for a latency threshold SLI. +type BasicSli_LatencyCriteria struct { + // Good service is defined to be the count of requests made to this service + // that return in no more than `threshold`. + Threshold *duration.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BasicSli_LatencyCriteria) Reset() { *m = BasicSli_LatencyCriteria{} } +func (m *BasicSli_LatencyCriteria) String() string { return proto.CompactTextString(m) } +func (*BasicSli_LatencyCriteria) ProtoMessage() {} +func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{3, 1} +} + +func (m *BasicSli_LatencyCriteria) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BasicSli_LatencyCriteria.Unmarshal(m, b) +} +func (m *BasicSli_LatencyCriteria) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BasicSli_LatencyCriteria.Marshal(b, m, deterministic) +} +func (m *BasicSli_LatencyCriteria) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicSli_LatencyCriteria.Merge(m, src) +} +func (m *BasicSli_LatencyCriteria) XXX_Size() int { + return xxx_messageInfo_BasicSli_LatencyCriteria.Size(m) +} +func (m *BasicSli_LatencyCriteria) XXX_DiscardUnknown() { + xxx_messageInfo_BasicSli_LatencyCriteria.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicSli_LatencyCriteria proto.InternalMessageInfo + +func (m *BasicSli_LatencyCriteria) GetThreshold() *duration.Duration { + if m != nil { + return m.Threshold + } + return nil +} + +// Range of numerical values, inclusive of `min` and exclusive of `max`. If the +// open range "< range.max" is desired, set `range.min = -infinity`. If the open +// range ">= range.min" is desired, set `range.max = infinity`. +type Range struct { + // Range minimum. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // Range maximum. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Range) Reset() { *m = Range{} } +func (m *Range) String() string { return proto.CompactTextString(m) } +func (*Range) ProtoMessage() {} +func (*Range) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{4} +} + +func (m *Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Range.Unmarshal(m, b) +} +func (m *Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Range.Marshal(b, m, deterministic) +} +func (m *Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_Range.Merge(m, src) +} +func (m *Range) XXX_Size() int { + return xxx_messageInfo_Range.Size(m) +} +func (m *Range) XXX_DiscardUnknown() { + xxx_messageInfo_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_Range proto.InternalMessageInfo + +func (m *Range) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *Range) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +// Service Level Indicators for which atomic units of service are counted +// directly. +type RequestBasedSli struct { + // The means to compute a ratio of `good_service` to `total_service`. + // + // Types that are valid to be assigned to Method: + // *RequestBasedSli_GoodTotalRatio + // *RequestBasedSli_DistributionCut + Method isRequestBasedSli_Method `protobuf_oneof:"method"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestBasedSli) Reset() { *m = RequestBasedSli{} } +func (m *RequestBasedSli) String() string { return proto.CompactTextString(m) } +func (*RequestBasedSli) ProtoMessage() {} +func (*RequestBasedSli) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{5} +} + +func (m *RequestBasedSli) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestBasedSli.Unmarshal(m, b) +} +func (m *RequestBasedSli) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestBasedSli.Marshal(b, m, deterministic) +} +func (m *RequestBasedSli) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBasedSli.Merge(m, src) +} +func (m *RequestBasedSli) XXX_Size() int { + return xxx_messageInfo_RequestBasedSli.Size(m) +} +func (m *RequestBasedSli) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBasedSli.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestBasedSli proto.InternalMessageInfo + +type isRequestBasedSli_Method interface { + isRequestBasedSli_Method() +} + +type RequestBasedSli_GoodTotalRatio struct { + GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"` +} + +type RequestBasedSli_DistributionCut struct { + DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"` +} + +func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {} + +func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {} + +func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method { + if m != nil { + return m.Method + } + return nil +} + +func (m *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio { + if x, ok := m.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok { + return x.GoodTotalRatio + } + return nil +} + +func (m *RequestBasedSli) GetDistributionCut() *DistributionCut { + if x, ok := m.GetMethod().(*RequestBasedSli_DistributionCut); ok { + return x.DistributionCut + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*RequestBasedSli) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*RequestBasedSli_GoodTotalRatio)(nil), + (*RequestBasedSli_DistributionCut)(nil), + } +} + +// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the +// `good_service / total_service` ratio. The specified `TimeSeries` must have +// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = +// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify +// exactly two of good, bad, and total, and the relationship `good_service + +// bad_service = total_service` will be assumed. +type TimeSeriesRatio struct { + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying good service provided. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying bad service, either demanded service + // that was not provided or demanded service that was of inadequate quality. + // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have + // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"` + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` quantifying total demanded service. Must have + // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind = + // DELTA` or `MetricKind = CUMULATIVE`. + TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeriesRatio) Reset() { *m = TimeSeriesRatio{} } +func (m *TimeSeriesRatio) String() string { return proto.CompactTextString(m) } +func (*TimeSeriesRatio) ProtoMessage() {} +func (*TimeSeriesRatio) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{6} +} + +func (m *TimeSeriesRatio) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeriesRatio.Unmarshal(m, b) +} +func (m *TimeSeriesRatio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeriesRatio.Marshal(b, m, deterministic) +} +func (m *TimeSeriesRatio) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesRatio.Merge(m, src) +} +func (m *TimeSeriesRatio) XXX_Size() int { + return xxx_messageInfo_TimeSeriesRatio.Size(m) +} +func (m *TimeSeriesRatio) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesRatio.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesRatio proto.InternalMessageInfo + +func (m *TimeSeriesRatio) GetGoodServiceFilter() string { + if m != nil { + return m.GoodServiceFilter + } + return "" +} + +func (m *TimeSeriesRatio) GetBadServiceFilter() string { + if m != nil { + return m.BadServiceFilter + } + return "" +} + +func (m *TimeSeriesRatio) GetTotalServiceFilter() string { + if m != nil { + return m.TotalServiceFilter + } + return "" +} + +// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring +// good service and total service. The `TimeSeries` must have `ValueType = +// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The +// computed `good_service` will be the count of values x in the `Distribution` +// such that `range.min <= x < range.max`. +type DistributionCut struct { + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying a `TimeSeries` aggregating values. Must have `ValueType = + // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. + DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound to + // an infinite value. + Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionCut) Reset() { *m = DistributionCut{} } +func (m *DistributionCut) String() string { return proto.CompactTextString(m) } +func (*DistributionCut) ProtoMessage() {} +func (*DistributionCut) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{7} +} + +func (m *DistributionCut) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionCut.Unmarshal(m, b) +} +func (m *DistributionCut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionCut.Marshal(b, m, deterministic) +} +func (m *DistributionCut) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionCut.Merge(m, src) +} +func (m *DistributionCut) XXX_Size() int { + return xxx_messageInfo_DistributionCut.Size(m) +} +func (m *DistributionCut) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionCut.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionCut proto.InternalMessageInfo + +func (m *DistributionCut) GetDistributionFilter() string { + if m != nil { + return m.DistributionFilter + } + return "" +} + +func (m *DistributionCut) GetRange() *Range { + if m != nil { + return m.Range + } + return nil +} + +// A `WindowsBasedSli` defines `good_service` as the count of time windows for +// which the provided service was of good quality. Criteria for determining +// if service was good are embedded in the `window_criterion`. +type WindowsBasedSli struct { + // The criterion to use for evaluating window goodness. + // + // Types that are valid to be assigned to WindowCriterion: + // *WindowsBasedSli_GoodBadMetricFilter + // *WindowsBasedSli_GoodTotalRatioThreshold + // *WindowsBasedSli_MetricMeanInRange + // *WindowsBasedSli_MetricSumInRange + WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"` + // Duration over which window quality is evaluated. Must be an integer + // fraction of a day and at least `60s`. + WindowPeriod *duration.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsBasedSli) Reset() { *m = WindowsBasedSli{} } +func (m *WindowsBasedSli) String() string { return proto.CompactTextString(m) } +func (*WindowsBasedSli) ProtoMessage() {} +func (*WindowsBasedSli) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{8} +} + +func (m *WindowsBasedSli) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WindowsBasedSli.Unmarshal(m, b) +} +func (m *WindowsBasedSli) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WindowsBasedSli.Marshal(b, m, deterministic) +} +func (m *WindowsBasedSli) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsBasedSli.Merge(m, src) +} +func (m *WindowsBasedSli) XXX_Size() int { + return xxx_messageInfo_WindowsBasedSli.Size(m) +} +func (m *WindowsBasedSli) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsBasedSli.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsBasedSli proto.InternalMessageInfo + +type isWindowsBasedSli_WindowCriterion interface { + isWindowsBasedSli_WindowCriterion() +} + +type WindowsBasedSli_GoodBadMetricFilter struct { + GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"` +} + +type WindowsBasedSli_GoodTotalRatioThreshold struct { + GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"` +} + +type WindowsBasedSli_MetricMeanInRange struct { + MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"` +} + +type WindowsBasedSli_MetricSumInRange struct { + MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"` +} + +func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {} + +func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {} + +func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion { + if m != nil { + return m.WindowCriterion + } + return nil +} + +func (m *WindowsBasedSli) GetGoodBadMetricFilter() string { + if x, ok := m.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok { + return x.GoodBadMetricFilter + } + return "" +} + +func (m *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold { + if x, ok := m.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok { + return x.GoodTotalRatioThreshold + } + return nil +} + +func (m *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange { + if x, ok := m.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok { + return x.MetricMeanInRange + } + return nil +} + +func (m *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange { + if x, ok := m.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok { + return x.MetricSumInRange + } + return nil +} + +func (m *WindowsBasedSli) GetWindowPeriod() *duration.Duration { + if m != nil { + return m.WindowPeriod + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*WindowsBasedSli) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*WindowsBasedSli_GoodBadMetricFilter)(nil), + (*WindowsBasedSli_GoodTotalRatioThreshold)(nil), + (*WindowsBasedSli_MetricMeanInRange)(nil), + (*WindowsBasedSli_MetricSumInRange)(nil), + } +} + +// A `PerformanceThreshold` is used when each window is good when that window +// has a sufficiently high `performance`. +type WindowsBasedSli_PerformanceThreshold struct { + // The means, either a request-based SLI or a basic SLI, by which to compute + // performance over a window. + // + // Types that are valid to be assigned to Type: + // *WindowsBasedSli_PerformanceThreshold_Performance + // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance + Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"` + // If window `performance >= threshold`, the window is counted as good. + Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsBasedSli_PerformanceThreshold) Reset() { *m = WindowsBasedSli_PerformanceThreshold{} } +func (m *WindowsBasedSli_PerformanceThreshold) String() string { return proto.CompactTextString(m) } +func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {} +func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{8, 0} +} + +func (m *WindowsBasedSli_PerformanceThreshold) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WindowsBasedSli_PerformanceThreshold.Unmarshal(m, b) +} +func (m *WindowsBasedSli_PerformanceThreshold) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WindowsBasedSli_PerformanceThreshold.Marshal(b, m, deterministic) +} +func (m *WindowsBasedSli_PerformanceThreshold) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsBasedSli_PerformanceThreshold.Merge(m, src) +} +func (m *WindowsBasedSli_PerformanceThreshold) XXX_Size() int { + return xxx_messageInfo_WindowsBasedSli_PerformanceThreshold.Size(m) +} +func (m *WindowsBasedSli_PerformanceThreshold) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsBasedSli_PerformanceThreshold.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsBasedSli_PerformanceThreshold proto.InternalMessageInfo + +type isWindowsBasedSli_PerformanceThreshold_Type interface { + isWindowsBasedSli_PerformanceThreshold_Type() +} + +type WindowsBasedSli_PerformanceThreshold_Performance struct { + Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"` +} + +type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct { + BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"` +} + +func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() { +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli { + if x, ok := m.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok { + return x.Performance + } + return nil +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli { + if x, ok := m.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok { + return x.BasicSliPerformance + } + return nil +} + +func (m *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 { + if m != nil { + return m.Threshold + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*WindowsBasedSli_PerformanceThreshold) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*WindowsBasedSli_PerformanceThreshold_Performance)(nil), + (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil), + } +} + +// A `MetricRange` is used when each window is good when the value x of a +// single `TimeSeries` satisfies `range.min <= x < range.max`. The provided +// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and +// `MetricKind = GAUGE`. +type WindowsBasedSli_MetricRange struct { + // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) + // specifying the `TimeSeries` to use for evaluating window quality. + TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // Range of values considered "good." For a one-sided range, set one bound + // to an infinite value. + Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsBasedSli_MetricRange) Reset() { *m = WindowsBasedSli_MetricRange{} } +func (m *WindowsBasedSli_MetricRange) String() string { return proto.CompactTextString(m) } +func (*WindowsBasedSli_MetricRange) ProtoMessage() {} +func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) { + return fileDescriptor_72cc01277ee8f421, []int{8, 1} +} + +func (m *WindowsBasedSli_MetricRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WindowsBasedSli_MetricRange.Unmarshal(m, b) +} +func (m *WindowsBasedSli_MetricRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WindowsBasedSli_MetricRange.Marshal(b, m, deterministic) +} +func (m *WindowsBasedSli_MetricRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsBasedSli_MetricRange.Merge(m, src) +} +func (m *WindowsBasedSli_MetricRange) XXX_Size() int { + return xxx_messageInfo_WindowsBasedSli_MetricRange.Size(m) +} +func (m *WindowsBasedSli_MetricRange) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsBasedSli_MetricRange.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsBasedSli_MetricRange proto.InternalMessageInfo + +func (m *WindowsBasedSli_MetricRange) GetTimeSeries() string { + if m != nil { + return m.TimeSeries + } + return "" +} + +func (m *WindowsBasedSli_MetricRange) GetRange() *Range { + if m != nil { + return m.Range + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.ServiceLevelObjective_View", ServiceLevelObjective_View_name, ServiceLevelObjective_View_value) + proto.RegisterType((*Service)(nil), "google.monitoring.v3.Service") + proto.RegisterType((*Service_Custom)(nil), "google.monitoring.v3.Service.Custom") + proto.RegisterType((*Service_AppEngine)(nil), "google.monitoring.v3.Service.AppEngine") + proto.RegisterType((*Service_CloudEndpoints)(nil), "google.monitoring.v3.Service.CloudEndpoints") + proto.RegisterType((*Service_ClusterIstio)(nil), "google.monitoring.v3.Service.ClusterIstio") + proto.RegisterType((*Service_Telemetry)(nil), "google.monitoring.v3.Service.Telemetry") + proto.RegisterType((*ServiceLevelObjective)(nil), "google.monitoring.v3.ServiceLevelObjective") + proto.RegisterType((*ServiceLevelIndicator)(nil), "google.monitoring.v3.ServiceLevelIndicator") + proto.RegisterType((*BasicSli)(nil), "google.monitoring.v3.BasicSli") + proto.RegisterType((*BasicSli_AvailabilityCriteria)(nil), "google.monitoring.v3.BasicSli.AvailabilityCriteria") + proto.RegisterType((*BasicSli_LatencyCriteria)(nil), "google.monitoring.v3.BasicSli.LatencyCriteria") + proto.RegisterType((*Range)(nil), "google.monitoring.v3.Range") + proto.RegisterType((*RequestBasedSli)(nil), "google.monitoring.v3.RequestBasedSli") + proto.RegisterType((*TimeSeriesRatio)(nil), "google.monitoring.v3.TimeSeriesRatio") + proto.RegisterType((*DistributionCut)(nil), "google.monitoring.v3.DistributionCut") + proto.RegisterType((*WindowsBasedSli)(nil), "google.monitoring.v3.WindowsBasedSli") + proto.RegisterType((*WindowsBasedSli_PerformanceThreshold)(nil), "google.monitoring.v3.WindowsBasedSli.PerformanceThreshold") + proto.RegisterType((*WindowsBasedSli_MetricRange)(nil), "google.monitoring.v3.WindowsBasedSli.MetricRange") +} + +func init() { proto.RegisterFile("google/monitoring/v3/service.proto", fileDescriptor_72cc01277ee8f421) } + +var fileDescriptor_72cc01277ee8f421 = []byte{ + // 1498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x5f, 0x6f, 0xdb, 0x46, + 0x12, 0x17, 0xfd, 0x47, 0x7f, 0x46, 0xb2, 0xa5, 0xac, 0x1d, 0x47, 0x51, 0x0e, 0x89, 0xa3, 0x5c, + 0x70, 0x86, 0x13, 0x90, 0x89, 0x7d, 0x87, 0x03, 0x72, 0xb8, 0x00, 0x96, 0x63, 0x9f, 0x14, 0x38, + 0x39, 0x87, 0x76, 0x9c, 0xbb, 0x43, 0x00, 0xde, 0x8a, 0x5c, 0x2b, 0x5b, 0x90, 0x5c, 0x96, 0xa4, + 0xec, 0xb8, 0x46, 0x3e, 0x4a, 0x1f, 0x8a, 0x16, 0x08, 0xd0, 0x87, 0x7e, 0x88, 0x3e, 0xf4, 0x21, + 0xdf, 0xa1, 0x40, 0x9f, 0xf3, 0xda, 0xb7, 0x3e, 0x15, 0xfb, 0xcf, 0xa2, 0x14, 0xc5, 0x96, 0xfb, + 0xb6, 0x3b, 0xf3, 0x9b, 0xdf, 0xcc, 0xce, 0xce, 0xcc, 0x92, 0xd0, 0xec, 0x31, 0xd6, 0xf3, 0x89, + 0x15, 0xb0, 0x90, 0xa6, 0x2c, 0xa6, 0x61, 0xcf, 0x3a, 0x5a, 0xb7, 0x12, 0x12, 0x1f, 0x51, 0x97, + 0x98, 0x51, 0xcc, 0x52, 0x86, 0x16, 0x25, 0xc6, 0x1c, 0x60, 0xcc, 0xa3, 0xf5, 0xc6, 0x1d, 0x65, + 0x89, 0x23, 0xaa, 0xad, 0x89, 0xe7, 0xc4, 0x24, 0x61, 0xfd, 0x58, 0x9b, 0x36, 0xae, 0x67, 0x40, + 0x23, 0xaa, 0x9b, 0x4a, 0x25, 0x76, 0xdd, 0xfe, 0xa1, 0xe5, 0xf5, 0x63, 0x9c, 0x52, 0x16, 0x2a, + 0xfd, 0xad, 0x51, 0x7d, 0x4a, 0x03, 0x92, 0xa4, 0x38, 0x88, 0x14, 0xe0, 0xb6, 0x02, 0xa4, 0x27, + 0x11, 0xb1, 0x5c, 0xec, 0x93, 0xd0, 0xc3, 0xb1, 0x13, 0x91, 0x98, 0x32, 0x4f, 0x42, 0x9a, 0xef, + 0x0b, 0x50, 0xd8, 0x93, 0x67, 0x41, 0x08, 0x66, 0x42, 0x1c, 0x90, 0xba, 0xb1, 0x6c, 0xac, 0x94, + 0x6c, 0xb1, 0x46, 0xb7, 0xa1, 0xe2, 0xd1, 0x24, 0xf2, 0xf1, 0x89, 0x23, 0x74, 0x53, 0x42, 0x57, + 0x56, 0xb2, 0xe7, 0x1c, 0xf2, 0x18, 0xf2, 0x6e, 0x3f, 0x49, 0x59, 0x50, 0xcf, 0x2f, 0x1b, 0x2b, + 0xe5, 0xb5, 0x3f, 0x9b, 0xe3, 0xb2, 0x61, 0x2a, 0x2f, 0xe6, 0xa6, 0xc0, 0xb6, 0x73, 0xb6, 0xb2, + 0x42, 0x6d, 0x00, 0x1c, 0x45, 0x0e, 0x09, 0x7b, 0x34, 0x24, 0xf5, 0x82, 0xe0, 0xf8, 0xcb, 0xf9, + 0x1c, 0x1b, 0x51, 0xb4, 0x25, 0xe0, 0xed, 0x9c, 0x5d, 0xc2, 0x7a, 0x83, 0x5e, 0x41, 0xd5, 0xf5, + 0x59, 0xdf, 0x73, 0x48, 0xe8, 0x45, 0x8c, 0x86, 0x69, 0x52, 0x2f, 0x0a, 0xba, 0xfb, 0x17, 0x84, + 0xc4, 0x8d, 0xb6, 0xb4, 0x4d, 0x3b, 0x67, 0xcf, 0xbb, 0x43, 0x12, 0xf4, 0x02, 0xe6, 0x5c, 0xbf, + 0x9f, 0xa4, 0x24, 0x76, 0x68, 0x92, 0x52, 0x56, 0x2f, 0x09, 0xda, 0xd5, 0x8b, 0x68, 0x85, 0x49, + 0x87, 0x5b, 0xb4, 0x73, 0x76, 0xc5, 0xcd, 0xec, 0xd1, 0x16, 0x94, 0x52, 0xe2, 0x93, 0x80, 0xa4, + 0xf1, 0x49, 0x7d, 0x6e, 0x92, 0x43, 0xef, 0x6b, 0xb8, 0x3d, 0xb0, 0x6c, 0x14, 0x21, 0x2f, 0x13, + 0xda, 0x58, 0x81, 0xd2, 0x59, 0x5a, 0xd0, 0x0d, 0x28, 0x05, 0xcc, 0xeb, 0xfb, 0xc4, 0xa1, 0x9e, + 0xba, 0xcf, 0xa2, 0x14, 0x74, 0xbc, 0xc6, 0x2a, 0xcc, 0x0f, 0x9f, 0x18, 0xd5, 0xa1, 0xa0, 0x0a, + 0x5a, 0x81, 0xf5, 0xb6, 0xf1, 0xb5, 0x01, 0x95, 0xec, 0x39, 0x50, 0x03, 0x8a, 0x3e, 0x73, 0x45, + 0x19, 0x6a, 0x62, 0xbd, 0xe7, 0xc5, 0xa2, 0xd3, 0x94, 0x2d, 0x16, 0x25, 0x13, 0xc5, 0x72, 0x0f, + 0xae, 0x28, 0x6a, 0x01, 0x49, 0x22, 0xec, 0x92, 0xfa, 0xb4, 0xc0, 0xd5, 0x94, 0xe2, 0xb9, 0x96, + 0x73, 0xbe, 0x2c, 0xb8, 0x3e, 0x23, 0xf9, 0x32, 0xb8, 0xc6, 0x03, 0x28, 0x9d, 0xe5, 0x05, 0xdd, + 0x81, 0x39, 0xdd, 0x42, 0x4e, 0xa6, 0x92, 0x2b, 0x5a, 0xc8, 0x2d, 0x1e, 0xbd, 0x37, 0x3e, 0x6e, + 0x7c, 0x6b, 0xc0, 0xed, 0x4c, 0x92, 0x65, 0xda, 0x71, 0x44, 0x13, 0xd3, 0x65, 0x81, 0xa5, 0xfb, + 0xe1, 0x6e, 0x14, 0xb3, 0x2f, 0x88, 0x9b, 0x26, 0xd6, 0xa9, 0x5a, 0xbd, 0xd3, 0x7d, 0x9f, 0x58, + 0xa7, 0x6a, 0xf5, 0x0e, 0x59, 0x2c, 0xee, 0xe1, 0x90, 0x7e, 0x25, 0x72, 0x90, 0x58, 0xa7, 0xd9, + 0xed, 0x58, 0x83, 0x3b, 0x87, 0xcc, 0xf7, 0x48, 0x9c, 0x58, 0xa7, 0x72, 0x31, 0x16, 0x64, 0xac, + 0xb6, 0x2a, 0x00, 0xd4, 0x23, 0x61, 0x4a, 0x0f, 0x29, 0x89, 0x9b, 0xbf, 0xcc, 0xc2, 0x55, 0x15, + 0xd9, 0x0e, 0x39, 0x22, 0xfe, 0xbf, 0xbb, 0x3c, 0x22, 0x7a, 0x34, 0x59, 0xdf, 0x96, 0x3f, 0xed, + 0x5b, 0x17, 0xae, 0xe9, 0xec, 0xfa, 0x9c, 0xd0, 0xa1, 0xa1, 0x47, 0x5d, 0x9c, 0xb2, 0x58, 0x5c, + 0x48, 0x79, 0xed, 0xde, 0xb9, 0xf5, 0x28, 0x82, 0xe8, 0x68, 0x13, 0xfb, 0x6a, 0x32, 0x4e, 0xcc, + 0x63, 0xeb, 0x31, 0xec, 0x8b, 0xab, 0x33, 0x6c, 0xb1, 0x46, 0x2d, 0x98, 0x8f, 0x99, 0xef, 0xd3, + 0xb0, 0xa7, 0x66, 0x51, 0x7d, 0x56, 0xf8, 0xbb, 0xae, 0xfd, 0xe9, 0x81, 0x66, 0x3e, 0x51, 0x03, + 0xaf, 0x9d, 0xb3, 0xe7, 0x94, 0xc9, 0xae, 0xb0, 0x40, 0xdb, 0x50, 0x1d, 0x19, 0x68, 0x62, 0xfa, + 0xcc, 0xaf, 0xdd, 0xd0, 0x24, 0x7c, 0xe8, 0x99, 0x9b, 0x0a, 0x23, 0xad, 0x44, 0x67, 0x0f, 0x49, + 0x9a, 0x7f, 0x85, 0x99, 0x03, 0x4a, 0x8e, 0xd1, 0x22, 0xd4, 0x0e, 0x3a, 0x5b, 0xaf, 0x9c, 0x97, + 0xcf, 0xf7, 0x76, 0xb7, 0x36, 0x3b, 0xdb, 0x9d, 0xad, 0x27, 0xb5, 0x1c, 0x2a, 0xc2, 0xcc, 0xf6, + 0xcb, 0x9d, 0x9d, 0xda, 0x14, 0xaa, 0x40, 0x71, 0xeb, 0x3f, 0xbb, 0x3b, 0x9d, 0xcd, 0xce, 0x7e, + 0xcd, 0x78, 0xf4, 0x61, 0xea, 0xe3, 0xc6, 0x4f, 0x53, 0x60, 0x5d, 0x58, 0x43, 0x23, 0x37, 0x75, + 0x30, 0x51, 0x45, 0x59, 0xc9, 0x38, 0xe3, 0x01, 0x40, 0x5d, 0x17, 0xd3, 0x9a, 0x77, 0xe8, 0xff, + 0x97, 0x2c, 0xc1, 0xcb, 0x7b, 0xd8, 0x9f, 0xa0, 0x66, 0x2f, 0xcf, 0x6a, 0xac, 0x2e, 0x1b, 0xad, + 0x22, 0xe4, 0xe5, 0x0d, 0x36, 0x7f, 0x33, 0x86, 0x4b, 0x7c, 0x50, 0x46, 0xff, 0x84, 0x52, 0x17, + 0x27, 0xd4, 0x75, 0x12, 0x9f, 0x8a, 0x5a, 0x2a, 0xaf, 0xdd, 0x1c, 0x5f, 0x9d, 0x2d, 0x0e, 0xdb, + 0xf3, 0x69, 0x3b, 0x67, 0x17, 0xbb, 0x6a, 0x8d, 0x76, 0xf8, 0x60, 0xf8, 0xb2, 0x4f, 0x92, 0xd4, + 0xe9, 0xe2, 0x84, 0xc8, 0x91, 0x58, 0x5e, 0xbb, 0x3b, 0x9e, 0xc2, 0x96, 0xd0, 0x16, 0x47, 0x4a, + 0xa6, 0x4a, 0x9c, 0x11, 0x71, 0xb6, 0x63, 0x1a, 0x7a, 0xec, 0x38, 0x51, 0x6c, 0x53, 0xe7, 0xb1, + 0xbd, 0x92, 0xd0, 0x2c, 0xdb, 0x71, 0x46, 0xd4, 0xca, 0xc3, 0x0c, 0x2f, 0xd5, 0xe6, 0xaf, 0x53, + 0x50, 0xd4, 0xc1, 0xa3, 0x25, 0xc8, 0x07, 0x24, 0x7d, 0xc3, 0xbc, 0x7a, 0x61, 0x79, 0x7a, 0xa5, + 0x64, 0xab, 0xdd, 0xd0, 0xf4, 0x2d, 0x0a, 0xcd, 0x60, 0xfa, 0xd6, 0xa1, 0x70, 0x44, 0xe2, 0x84, + 0xab, 0x4a, 0x42, 0xa5, 0xb7, 0xe8, 0xbf, 0x50, 0xc1, 0x47, 0x98, 0xfa, 0xb8, 0x4b, 0x7d, 0x9a, + 0x9e, 0xa8, 0x78, 0xd7, 0xcf, 0x4f, 0xa0, 0xb9, 0x91, 0x31, 0xd9, 0x8c, 0x69, 0x4a, 0x62, 0x8a, + 0x79, 0xf4, 0x59, 0x2a, 0xf4, 0x14, 0x0a, 0x3e, 0x4e, 0x49, 0xe8, 0x9e, 0xa8, 0xa1, 0x61, 0x5e, + 0xc0, 0xba, 0x23, 0xd1, 0x19, 0x42, 0x4d, 0xd0, 0x58, 0x82, 0xc5, 0x71, 0x3e, 0x1b, 0x4f, 0xa1, + 0x3a, 0x62, 0x85, 0xfe, 0x0e, 0xa5, 0xf4, 0x4d, 0x4c, 0x92, 0x37, 0xcc, 0xf7, 0x94, 0xe3, 0xcf, + 0x4f, 0x0f, 0x7b, 0x80, 0x6d, 0xcd, 0x43, 0x25, 0xf1, 0xa9, 0xe3, 0x2a, 0xa2, 0xe6, 0x3d, 0x98, + 0xb5, 0x71, 0xd8, 0x23, 0xa8, 0x06, 0xd3, 0x01, 0x95, 0x4f, 0x9a, 0x61, 0xf3, 0xa5, 0x90, 0xe0, + 0xb7, 0x22, 0x59, 0x5c, 0x82, 0xdf, 0x36, 0x7f, 0x34, 0xa0, 0x3a, 0x52, 0x1c, 0xe8, 0x05, 0xd4, + 0x7a, 0x8c, 0x79, 0x4e, 0xca, 0x52, 0xec, 0x3b, 0xc2, 0xe1, 0xf9, 0xd5, 0xb5, 0x4f, 0x03, 0xb2, + 0x47, 0x62, 0x4a, 0x12, 0x1b, 0xcb, 0x0f, 0x83, 0x79, 0x4e, 0xb0, 0xcf, 0xed, 0x85, 0x04, 0xd9, + 0x50, 0xf3, 0x68, 0x92, 0xc6, 0xb4, 0xdb, 0xe7, 0xe1, 0x3b, 0x6e, 0x3f, 0x55, 0x67, 0xfc, 0x0c, + 0xe5, 0x93, 0x0c, 0x7a, 0xb3, 0x9f, 0xb6, 0x73, 0x76, 0xd5, 0x1b, 0x16, 0xf1, 0x26, 0x93, 0x25, + 0xd4, 0xfc, 0xc6, 0x80, 0xea, 0x48, 0x0c, 0xc8, 0x84, 0x05, 0x71, 0x08, 0xdd, 0xa8, 0x87, 0xd4, + 0x4f, 0x49, 0xac, 0xde, 0xdb, 0x2b, 0x5c, 0xa5, 0xda, 0x72, 0x5b, 0x28, 0xd0, 0x7d, 0x40, 0x5d, + 0xfc, 0x09, 0x7c, 0x56, 0x3e, 0xe3, 0x5d, 0x3c, 0x82, 0x7e, 0x00, 0x8b, 0x32, 0x3b, 0x23, 0xf8, + 0xbc, 0xc0, 0x23, 0xa1, 0x1b, 0xb2, 0x68, 0xf6, 0xa1, 0x3a, 0x72, 0x26, 0x64, 0xc1, 0xc2, 0x50, + 0x52, 0x86, 0x42, 0x44, 0x59, 0x95, 0xf2, 0xfa, 0x10, 0x66, 0x63, 0x7e, 0xb3, 0xea, 0x71, 0xb9, + 0xf1, 0x99, 0x5e, 0xe7, 0x10, 0x5b, 0x22, 0x9b, 0xdf, 0xe5, 0xa1, 0x3a, 0xd2, 0xae, 0xe8, 0x6f, + 0xb0, 0x24, 0x52, 0xc3, 0xcf, 0xcb, 0xbf, 0x32, 0xa8, 0x3b, 0x74, 0xdc, 0x76, 0xce, 0x16, 0xa9, + 0x6b, 0x61, 0xef, 0x99, 0xd0, 0x2a, 0xef, 0x27, 0xd0, 0x18, 0x2d, 0x0b, 0x67, 0x50, 0xb1, 0xb2, + 0x01, 0x1f, 0x4d, 0x34, 0x30, 0xcc, 0x5d, 0x12, 0x1f, 0xb2, 0x38, 0xc0, 0xa1, 0x4b, 0xf6, 0x35, + 0x43, 0x3b, 0x67, 0x5f, 0x1b, 0xae, 0x9a, 0x33, 0x15, 0xf2, 0x60, 0x51, 0x05, 0x1a, 0x10, 0x1c, + 0x3a, 0x34, 0x74, 0x64, 0x1e, 0xe4, 0xd7, 0xf9, 0xc3, 0xc9, 0x9c, 0xca, 0xc3, 0x88, 0xec, 0xb4, + 0x73, 0xf6, 0x15, 0x49, 0xf8, 0x8c, 0xe0, 0xb0, 0x13, 0xca, 0x7e, 0xe9, 0xc2, 0x82, 0xf2, 0x92, + 0xf4, 0x83, 0x81, 0x93, 0xc2, 0x1f, 0x77, 0x52, 0x93, 0x7c, 0x7b, 0xfd, 0x40, 0xfb, 0x78, 0xac, + 0x07, 0xad, 0x7e, 0xe2, 0x67, 0x2e, 0xea, 0x74, 0x35, 0x5a, 0xe5, 0xe3, 0xde, 0xf8, 0xd9, 0x80, + 0xc5, 0x71, 0xd9, 0x43, 0x1d, 0x28, 0x47, 0x03, 0xf9, 0x65, 0x5f, 0x83, 0xac, 0x2d, 0xda, 0x87, + 0xab, 0x67, 0x2f, 0x93, 0x93, 0x25, 0x9d, 0x9e, 0xf0, 0x95, 0x5a, 0xd0, 0xaf, 0x54, 0x26, 0x52, + 0xf4, 0xa7, 0xec, 0x7c, 0x93, 0x13, 0x28, 0x33, 0xc4, 0xd4, 0x93, 0xd1, 0xc0, 0x50, 0xce, 0xa4, + 0x10, 0xdd, 0x82, 0x32, 0xff, 0x03, 0xe4, 0x6d, 0x46, 0x49, 0xa2, 0x3e, 0x07, 0x21, 0x3d, 0xeb, + 0xf5, 0x41, 0x4b, 0xcc, 0x4c, 0xda, 0x12, 0x2d, 0x04, 0x35, 0x75, 0x05, 0x6a, 0x64, 0xb2, 0xb0, + 0xf5, 0x83, 0x01, 0x75, 0x97, 0x05, 0x63, 0xad, 0x5b, 0x4b, 0xaa, 0x93, 0x9f, 0x9d, 0x49, 0x77, + 0xf9, 0x2d, 0xed, 0x1a, 0xff, 0x7b, 0xac, 0xf0, 0x3d, 0xe6, 0xe3, 0xb0, 0x67, 0xb2, 0xb8, 0x67, + 0xf5, 0x48, 0x28, 0xee, 0xd0, 0x1a, 0x7c, 0x3d, 0x0d, 0xff, 0x67, 0xff, 0x63, 0xb0, 0xfb, 0x7e, + 0xaa, 0xf1, 0x2f, 0x49, 0x20, 0x7e, 0x5e, 0xcc, 0x81, 0x03, 0xf3, 0x60, 0xfd, 0x83, 0x56, 0xbe, + 0x16, 0xca, 0xd7, 0x03, 0xe5, 0xeb, 0x83, 0xf5, 0x6e, 0x5e, 0x38, 0x59, 0xff, 0x3d, 0x00, 0x00, + 0xff, 0xff, 0x5d, 0xb0, 0x09, 0xb8, 0xcb, 0x0f, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go new file mode 100644 index 0000000000..a0875baf11 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/service_service.pb.go @@ -0,0 +1,1232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/service_service.proto + +package monitoring + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + _ "google.golang.org/genproto/googleapis/api/monitoredres" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The `CreateService` request. +type CreateServiceRequest struct { + // Required. Resource name of the parent workspace. + // Of the form `projects/{project_id}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The Service id to use for this Service. If omitted, an id will be + // generated instead. Must match the pattern [a-z0-9\-]+ + ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Required. The `Service` to create. + Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} } +func (m *CreateServiceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceRequest) ProtoMessage() {} +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{0} +} + +func (m *CreateServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateServiceRequest.Unmarshal(m, b) +} +func (m *CreateServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateServiceRequest.Marshal(b, m, deterministic) +} +func (m *CreateServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateServiceRequest.Merge(m, src) +} +func (m *CreateServiceRequest) XXX_Size() int { + return xxx_messageInfo_CreateServiceRequest.Size(m) +} +func (m *CreateServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateServiceRequest proto.InternalMessageInfo + +func (m *CreateServiceRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateServiceRequest) GetServiceId() string { + if m != nil { + return m.ServiceId + } + return "" +} + +func (m *CreateServiceRequest) GetService() *Service { + if m != nil { + return m.Service + } + return nil +} + +// The `GetService` request. +type GetServiceRequest struct { + // Required. Resource name of the `Service`. + // Of the form `projects/{project_id}/services/{service_id}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (m *GetServiceRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{1} +} + +func (m *GetServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceRequest.Unmarshal(m, b) +} +func (m *GetServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceRequest.Marshal(b, m, deterministic) +} +func (m *GetServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceRequest.Merge(m, src) +} +func (m *GetServiceRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceRequest.Size(m) +} +func (m *GetServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceRequest proto.InternalMessageInfo + +func (m *GetServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListServices` request. +type ListServicesRequest struct { + // Required. Resource name of the parent `Workspace`. + // Of the form `projects/{project_id}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `Service`s to return. The filter currently + // supports the following fields: + // + // - `identifier_case` + // - `app_engine.module_id` + // - `cloud_endpoints.service` + // - `cluster_istio.location` + // - `cluster_istio.cluster_name` + // - `cluster_istio.service_namespace` + // - `cluster_istio.service_name` + // + // `identifier_case` refers to which option in the identifier oneof is + // populated. For example, the filter `identifier_case = "CUSTOM"` would match + // all services with a value for the `custom` field. Valid options are + // "CUSTOM", "APP_ENGINE", "CLOUD_ENDPOINTS", and "CLUSTER_ISTIO". + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (m *ListServicesRequest) String() string { return proto.CompactTextString(m) } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{2} +} + +func (m *ListServicesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServicesRequest.Unmarshal(m, b) +} +func (m *ListServicesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServicesRequest.Marshal(b, m, deterministic) +} +func (m *ListServicesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServicesRequest.Merge(m, src) +} +func (m *ListServicesRequest) XXX_Size() int { + return xxx_messageInfo_ListServicesRequest.Size(m) +} +func (m *ListServicesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListServicesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServicesRequest proto.InternalMessageInfo + +func (m *ListServicesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListServicesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListServicesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServicesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListServices` response. +type ListServicesResponse struct { + // The `Service`s matching the specified filter. + Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (m *ListServicesResponse) String() string { return proto.CompactTextString(m) } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{3} +} + +func (m *ListServicesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServicesResponse.Unmarshal(m, b) +} +func (m *ListServicesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServicesResponse.Marshal(b, m, deterministic) +} +func (m *ListServicesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServicesResponse.Merge(m, src) +} +func (m *ListServicesResponse) XXX_Size() int { + return xxx_messageInfo_ListServicesResponse.Size(m) +} +func (m *ListServicesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListServicesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServicesResponse proto.InternalMessageInfo + +func (m *ListServicesResponse) GetServices() []*Service { + if m != nil { + return m.Services + } + return nil +} + +func (m *ListServicesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `UpdateService` request. +type UpdateServiceRequest struct { + // Required. The `Service` to draw updates from. + // The given `name` specifies the resource to update. + Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} } +func (m *UpdateServiceRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateServiceRequest) ProtoMessage() {} +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{4} +} + +func (m *UpdateServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateServiceRequest.Unmarshal(m, b) +} +func (m *UpdateServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateServiceRequest.Marshal(b, m, deterministic) +} +func (m *UpdateServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateServiceRequest.Merge(m, src) +} +func (m *UpdateServiceRequest) XXX_Size() int { + return xxx_messageInfo_UpdateServiceRequest.Size(m) +} +func (m *UpdateServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateServiceRequest proto.InternalMessageInfo + +func (m *UpdateServiceRequest) GetService() *Service { + if m != nil { + return m.Service + } + return nil +} + +func (m *UpdateServiceRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// The `DeleteService` request. +type DeleteServiceRequest struct { + // Required. Resource name of the `Service` to delete. + // Of the form `projects/{project_id}/services/{service_id}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteServiceRequest) Reset() { *m = DeleteServiceRequest{} } +func (m *DeleteServiceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceRequest) ProtoMessage() {} +func (*DeleteServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{5} +} + +func (m *DeleteServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteServiceRequest.Unmarshal(m, b) +} +func (m *DeleteServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteServiceRequest.Marshal(b, m, deterministic) +} +func (m *DeleteServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteServiceRequest.Merge(m, src) +} +func (m *DeleteServiceRequest) XXX_Size() int { + return xxx_messageInfo_DeleteServiceRequest.Size(m) +} +func (m *DeleteServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteServiceRequest proto.InternalMessageInfo + +func (m *DeleteServiceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateServiceLevelObjective` request. +type CreateServiceLevelObjectiveRequest struct { + // Required. Resource name of the parent `Service`. + // Of the form `projects/{project_id}/services/{service_id}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Optional. The ServiceLevelObjective id to use for this + // ServiceLevelObjective. If omitted, an id will be generated instead. Must + // match the pattern [a-z0-9\-]+ + ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"` + // Required. The `ServiceLevelObjective` to create. + // The provided `name` will be respected if no `ServiceLevelObjective` exists + // with this name. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateServiceLevelObjectiveRequest) Reset() { *m = CreateServiceLevelObjectiveRequest{} } +func (m *CreateServiceLevelObjectiveRequest) String() string { return proto.CompactTextString(m) } +func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {} +func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{6} +} + +func (m *CreateServiceLevelObjectiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateServiceLevelObjectiveRequest.Unmarshal(m, b) +} +func (m *CreateServiceLevelObjectiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateServiceLevelObjectiveRequest.Marshal(b, m, deterministic) +} +func (m *CreateServiceLevelObjectiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateServiceLevelObjectiveRequest.Merge(m, src) +} +func (m *CreateServiceLevelObjectiveRequest) XXX_Size() int { + return xxx_messageInfo_CreateServiceLevelObjectiveRequest.Size(m) +} +func (m *CreateServiceLevelObjectiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateServiceLevelObjectiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateServiceLevelObjectiveRequest proto.InternalMessageInfo + +func (m *CreateServiceLevelObjectiveRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string { + if m != nil { + return m.ServiceLevelObjectiveId + } + return "" +} + +func (m *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if m != nil { + return m.ServiceLevelObjective + } + return nil +} + +// The `GetServiceLevelObjective` request. +type GetServiceLevelObjectiveRequest struct { + // Required. Resource name of the `ServiceLevelObjective` to get. + // Of the form + // `projects/{project_id}/services/{service_id}/serviceLevelObjectives/{slo_name}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceLevelObjectiveRequest) Reset() { *m = GetServiceLevelObjectiveRequest{} } +func (m *GetServiceLevelObjectiveRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceLevelObjectiveRequest) ProtoMessage() {} +func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{7} +} + +func (m *GetServiceLevelObjectiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceLevelObjectiveRequest.Unmarshal(m, b) +} +func (m *GetServiceLevelObjectiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceLevelObjectiveRequest.Marshal(b, m, deterministic) +} +func (m *GetServiceLevelObjectiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceLevelObjectiveRequest.Merge(m, src) +} +func (m *GetServiceLevelObjectiveRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceLevelObjectiveRequest.Size(m) +} +func (m *GetServiceLevelObjectiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceLevelObjectiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceLevelObjectiveRequest proto.InternalMessageInfo + +func (m *GetServiceLevelObjectiveRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View { + if m != nil { + return m.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` request. +type ListServiceLevelObjectivesRequest struct { + // Required. Resource name of the parent `Service`. + // Of the form `projects/{project_id}/services/{service_id}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // A filter specifying what `ServiceLevelObjective`s to return. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A non-negative number that is the maximum number of results to return. + // When 0, use default page size. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each + // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the + // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the + // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed. + View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceLevelObjectivesRequest) Reset() { *m = ListServiceLevelObjectivesRequest{} } +func (m *ListServiceLevelObjectivesRequest) String() string { return proto.CompactTextString(m) } +func (*ListServiceLevelObjectivesRequest) ProtoMessage() {} +func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{8} +} + +func (m *ListServiceLevelObjectivesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceLevelObjectivesRequest.Unmarshal(m, b) +} +func (m *ListServiceLevelObjectivesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceLevelObjectivesRequest.Marshal(b, m, deterministic) +} +func (m *ListServiceLevelObjectivesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceLevelObjectivesRequest.Merge(m, src) +} +func (m *ListServiceLevelObjectivesRequest) XXX_Size() int { + return xxx_messageInfo_ListServiceLevelObjectivesRequest.Size(m) +} +func (m *ListServiceLevelObjectivesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceLevelObjectivesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceLevelObjectivesRequest proto.InternalMessageInfo + +func (m *ListServiceLevelObjectivesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListServiceLevelObjectivesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListServiceLevelObjectivesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListServiceLevelObjectivesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View { + if m != nil { + return m.View + } + return ServiceLevelObjective_VIEW_UNSPECIFIED +} + +// The `ListServiceLevelObjectives` response. +type ListServiceLevelObjectivesResponse struct { + // The `ServiceLevelObjective`s matching the specified filter. + ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceLevelObjectivesResponse) Reset() { *m = ListServiceLevelObjectivesResponse{} } +func (m *ListServiceLevelObjectivesResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceLevelObjectivesResponse) ProtoMessage() {} +func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{9} +} + +func (m *ListServiceLevelObjectivesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceLevelObjectivesResponse.Unmarshal(m, b) +} +func (m *ListServiceLevelObjectivesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceLevelObjectivesResponse.Marshal(b, m, deterministic) +} +func (m *ListServiceLevelObjectivesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceLevelObjectivesResponse.Merge(m, src) +} +func (m *ListServiceLevelObjectivesResponse) XXX_Size() int { + return xxx_messageInfo_ListServiceLevelObjectivesResponse.Size(m) +} +func (m *ListServiceLevelObjectivesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceLevelObjectivesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceLevelObjectivesResponse proto.InternalMessageInfo + +func (m *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective { + if m != nil { + return m.ServiceLevelObjectives + } + return nil +} + +func (m *ListServiceLevelObjectivesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `UpdateServiceLevelObjective` request. +type UpdateServiceLevelObjectiveRequest struct { + // Required. The `ServiceLevelObjective` to draw updates from. + // The given `name` specifies the resource to update. + ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"` + // A set of field paths defining which fields to use for the update. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateServiceLevelObjectiveRequest) Reset() { *m = UpdateServiceLevelObjectiveRequest{} } +func (m *UpdateServiceLevelObjectiveRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {} +func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{10} +} + +func (m *UpdateServiceLevelObjectiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateServiceLevelObjectiveRequest.Unmarshal(m, b) +} +func (m *UpdateServiceLevelObjectiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateServiceLevelObjectiveRequest.Marshal(b, m, deterministic) +} +func (m *UpdateServiceLevelObjectiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateServiceLevelObjectiveRequest.Merge(m, src) +} +func (m *UpdateServiceLevelObjectiveRequest) XXX_Size() int { + return xxx_messageInfo_UpdateServiceLevelObjectiveRequest.Size(m) +} +func (m *UpdateServiceLevelObjectiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateServiceLevelObjectiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateServiceLevelObjectiveRequest proto.InternalMessageInfo + +func (m *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective { + if m != nil { + return m.ServiceLevelObjective + } + return nil +} + +func (m *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// The `DeleteServiceLevelObjective` request. +type DeleteServiceLevelObjectiveRequest struct { + // Required. Resource name of the `ServiceLevelObjective` to delete. + // Of the form + // `projects/{project_id}/services/{service_id}/serviceLevelObjectives/{slo_name}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteServiceLevelObjectiveRequest) Reset() { *m = DeleteServiceLevelObjectiveRequest{} } +func (m *DeleteServiceLevelObjectiveRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {} +func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e551c54e1485b8, []int{11} +} + +func (m *DeleteServiceLevelObjectiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteServiceLevelObjectiveRequest.Unmarshal(m, b) +} +func (m *DeleteServiceLevelObjectiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteServiceLevelObjectiveRequest.Marshal(b, m, deterministic) +} +func (m *DeleteServiceLevelObjectiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteServiceLevelObjectiveRequest.Merge(m, src) +} +func (m *DeleteServiceLevelObjectiveRequest) XXX_Size() int { + return xxx_messageInfo_DeleteServiceLevelObjectiveRequest.Size(m) +} +func (m *DeleteServiceLevelObjectiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteServiceLevelObjectiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteServiceLevelObjectiveRequest proto.InternalMessageInfo + +func (m *DeleteServiceLevelObjectiveRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*CreateServiceRequest)(nil), "google.monitoring.v3.CreateServiceRequest") + proto.RegisterType((*GetServiceRequest)(nil), "google.monitoring.v3.GetServiceRequest") + proto.RegisterType((*ListServicesRequest)(nil), "google.monitoring.v3.ListServicesRequest") + proto.RegisterType((*ListServicesResponse)(nil), "google.monitoring.v3.ListServicesResponse") + proto.RegisterType((*UpdateServiceRequest)(nil), "google.monitoring.v3.UpdateServiceRequest") + proto.RegisterType((*DeleteServiceRequest)(nil), "google.monitoring.v3.DeleteServiceRequest") + proto.RegisterType((*CreateServiceLevelObjectiveRequest)(nil), "google.monitoring.v3.CreateServiceLevelObjectiveRequest") + proto.RegisterType((*GetServiceLevelObjectiveRequest)(nil), "google.monitoring.v3.GetServiceLevelObjectiveRequest") + proto.RegisterType((*ListServiceLevelObjectivesRequest)(nil), "google.monitoring.v3.ListServiceLevelObjectivesRequest") + proto.RegisterType((*ListServiceLevelObjectivesResponse)(nil), "google.monitoring.v3.ListServiceLevelObjectivesResponse") + proto.RegisterType((*UpdateServiceLevelObjectiveRequest)(nil), "google.monitoring.v3.UpdateServiceLevelObjectiveRequest") + proto.RegisterType((*DeleteServiceLevelObjectiveRequest)(nil), "google.monitoring.v3.DeleteServiceLevelObjectiveRequest") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/service_service.proto", fileDescriptor_a3e551c54e1485b8) +} + +var fileDescriptor_a3e551c54e1485b8 = []byte{ + // 1145 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0xd7, 0x6c, 0xd2, 0xb4, 0x79, 0x25, 0xad, 0x18, 0x42, 0xb2, 0xf1, 0x12, 0x9a, 0xb8, 0x08, + 0xd2, 0x25, 0xd8, 0xb0, 0x0b, 0x6a, 0x49, 0x5a, 0x24, 0x27, 0x85, 0xaa, 0xd0, 0x88, 0xc8, 0xa5, + 0x39, 0xa0, 0x48, 0x2b, 0x67, 0x77, 0xb2, 0x31, 0xf1, 0xda, 0xae, 0x3d, 0xbb, 0x4b, 0x8b, 0x82, + 0x10, 0x12, 0x07, 0x24, 0x84, 0x54, 0x71, 0x80, 0xaf, 0x00, 0xe2, 0xc4, 0xbf, 0x3b, 0xd7, 0x72, + 0x03, 0x4e, 0x7b, 0xea, 0x81, 0x13, 0x1f, 0xa1, 0x27, 0x64, 0x7b, 0xbc, 0xb6, 0x37, 0x63, 0x3b, + 0x26, 0xe9, 0x29, 0xf1, 0xbc, 0x37, 0xef, 0xcf, 0xef, 0xbd, 0xdf, 0x7b, 0xa3, 0x85, 0x6a, 0xdb, + 0xb2, 0xda, 0x06, 0x91, 0x3b, 0x96, 0xa9, 0x53, 0xcb, 0xd1, 0xcd, 0xb6, 0xdc, 0xab, 0xcb, 0x2e, + 0x71, 0x7a, 0x7a, 0x93, 0x34, 0xd8, 0x5f, 0xc9, 0x76, 0x2c, 0x6a, 0xe1, 0xe9, 0x40, 0x57, 0x8a, + 0x74, 0xa5, 0x5e, 0x5d, 0x78, 0x8e, 0x59, 0xd0, 0x6c, 0x5d, 0xd6, 0x4c, 0xd3, 0xa2, 0x1a, 0xd5, + 0x2d, 0xd3, 0x0d, 0xee, 0x08, 0xb3, 0x31, 0x69, 0xd3, 0xd0, 0x89, 0x49, 0x99, 0xe0, 0x42, 0x4c, + 0xb0, 0xab, 0x13, 0xa3, 0xd5, 0xd8, 0x21, 0x7b, 0x5a, 0x4f, 0xb7, 0x1c, 0xa6, 0x70, 0x31, 0xa6, + 0xc0, 0x3c, 0x92, 0x56, 0xc3, 0x21, 0xae, 0xd5, 0x75, 0xc2, 0x90, 0x84, 0xb9, 0x98, 0xd2, 0x88, + 0x48, 0xcc, 0xca, 0x8c, 0xe9, 0x54, 0x98, 0x8e, 0xff, 0xb5, 0xd3, 0xdd, 0x95, 0x49, 0xc7, 0xa6, + 0xf7, 0x98, 0x70, 0x61, 0x54, 0x18, 0x84, 0xd9, 0xd1, 0xdc, 0xfd, 0x40, 0x43, 0xfc, 0x0d, 0xc1, + 0xf4, 0xba, 0x43, 0x34, 0x4a, 0x6e, 0x07, 0x66, 0x55, 0x72, 0xb7, 0x4b, 0x5c, 0x8a, 0x15, 0x98, + 0xb0, 0x35, 0x87, 0x98, 0xb4, 0x8c, 0x16, 0xd0, 0xd2, 0xe4, 0xda, 0xa5, 0x47, 0x4a, 0xe9, 0xb1, + 0x72, 0x11, 0x2f, 0xc6, 0xa0, 0x0b, 0xac, 0x6b, 0xb6, 0xee, 0x4a, 0x4d, 0xab, 0x23, 0x87, 0x16, + 0xd8, 0x45, 0x3c, 0x0f, 0x10, 0x56, 0x41, 0x6f, 0x95, 0xc7, 0x3c, 0x33, 0xea, 0x24, 0x3b, 0xb9, + 0xd9, 0xc2, 0x57, 0xe1, 0x34, 0xfb, 0x28, 0x97, 0x16, 0xd0, 0xd2, 0xd9, 0xda, 0xbc, 0xc4, 0xab, + 0x8e, 0xc4, 0xcc, 0xae, 0x8d, 0x3d, 0x52, 0x4a, 0x6a, 0x78, 0x45, 0x54, 0xe1, 0xe9, 0x1b, 0x84, + 0x8e, 0x04, 0x7d, 0x0d, 0xc6, 0x4d, 0xad, 0x43, 0x92, 0x21, 0xc3, 0x11, 0x42, 0xf6, 0xaf, 0x89, + 0x3f, 0x22, 0x78, 0xe6, 0x96, 0xee, 0x86, 0x56, 0xdd, 0x13, 0xc4, 0x62, 0x06, 0x26, 0x76, 0x75, + 0x83, 0x12, 0xc7, 0xcf, 0x75, 0x52, 0x65, 0x5f, 0xb8, 0x02, 0x93, 0xb6, 0xd6, 0x26, 0x0d, 0x57, + 0xbf, 0x4f, 0x7c, 0x88, 0x4e, 0xa9, 0x67, 0xbc, 0x83, 0xdb, 0xfa, 0x7d, 0xe2, 0x01, 0xe8, 0x0b, + 0xa9, 0xb5, 0x4f, 0xcc, 0xf2, 0x78, 0x00, 0xa0, 0x77, 0xf2, 0x81, 0x77, 0x20, 0xde, 0x83, 0xe9, + 0x64, 0xb4, 0xae, 0x6d, 0x99, 0x2e, 0xc1, 0x6f, 0xc2, 0x19, 0x86, 0x92, 0x5b, 0x46, 0x0b, 0x63, + 0xb9, 0xc8, 0xaa, 0x43, 0x75, 0xfc, 0x22, 0x9c, 0x37, 0xc9, 0xc7, 0xb4, 0x11, 0x73, 0x1b, 0xc4, + 0x3b, 0xe5, 0x1d, 0x6f, 0x0e, 0x5d, 0x3f, 0x40, 0x30, 0x7d, 0xc7, 0x6e, 0x1d, 0x6e, 0x9b, 0x58, + 0x51, 0x51, 0xe1, 0xa2, 0xe2, 0x55, 0x38, 0xdb, 0xf5, 0xad, 0xfa, 0x2d, 0xca, 0xda, 0x42, 0x08, + 0x2d, 0x84, 0x5d, 0x2c, 0xbd, 0xe3, 0x75, 0xf1, 0x86, 0xe6, 0xee, 0xab, 0x10, 0xa8, 0x7b, 0xff, + 0x8b, 0x77, 0x60, 0xfa, 0x3a, 0x31, 0xc8, 0xa1, 0x90, 0x8e, 0xd9, 0x14, 0x5f, 0x97, 0x40, 0x4c, + 0x30, 0xe4, 0x16, 0xe9, 0x11, 0xe3, 0xfd, 0x9d, 0x8f, 0x48, 0x93, 0xea, 0xbd, 0x3c, 0xbe, 0x40, + 0x81, 0x1e, 0x59, 0x05, 0x21, 0xe4, 0x8b, 0xe1, 0xf9, 0x68, 0x58, 0xa1, 0x93, 0x88, 0x3f, 0xb3, + 0x2e, 0x2f, 0x88, 0x9b, 0x2d, 0xbc, 0x07, 0xb3, 0x29, 0x97, 0x19, 0x8c, 0x2f, 0x67, 0x16, 0x22, + 0x69, 0x2f, 0x28, 0xcb, 0xb3, 0x5c, 0x5f, 0xe2, 0x2f, 0x08, 0x2e, 0x44, 0xd4, 0xe3, 0xa3, 0xf1, + 0x5e, 0x02, 0xf3, 0xcb, 0x3e, 0x16, 0xaf, 0x81, 0x9c, 0x8b, 0xc5, 0x88, 0x35, 0xdf, 0x08, 0xbe, + 0x0e, 0xe3, 0x3d, 0x9d, 0xf4, 0xfd, 0x3c, 0xce, 0xd5, 0x5e, 0x2d, 0x90, 0x87, 0xb4, 0xa5, 0x93, + 0xbe, 0xea, 0xdf, 0x16, 0x3f, 0x2b, 0xc1, 0x62, 0x8c, 0x2e, 0x49, 0x45, 0xf7, 0x04, 0xcb, 0xf8, + 0x04, 0xa8, 0x3e, 0x84, 0xe0, 0xd4, 0xb1, 0x20, 0xf8, 0x19, 0x81, 0x98, 0x05, 0x01, 0x9b, 0x1f, + 0x04, 0xca, 0x29, 0xad, 0x14, 0xce, 0x93, 0x22, 0xbd, 0xa4, 0xce, 0x70, 0xdb, 0xe8, 0xe8, 0xb3, + 0xe6, 0x0f, 0x04, 0x62, 0x62, 0xd6, 0xf0, 0x5b, 0x2e, 0x83, 0x00, 0xe8, 0x44, 0x09, 0x70, 0xbc, + 0x29, 0x75, 0x17, 0xc4, 0xc4, 0x94, 0x7a, 0xf2, 0xfc, 0xa9, 0xfd, 0x7b, 0x1e, 0xca, 0x4c, 0xbe, + 0x31, 0x34, 0xc0, 0x0e, 0xf0, 0xb7, 0x08, 0xa6, 0x12, 0xe3, 0x0d, 0x57, 0xf9, 0x38, 0xf1, 0x5e, + 0x09, 0x42, 0xf6, 0x74, 0x17, 0x57, 0x07, 0xca, 0xb9, 0x80, 0x15, 0xcb, 0x0c, 0xca, 0xcf, 0xff, + 0xfa, 0xe7, 0x9b, 0xd2, 0x0b, 0xe2, 0x9c, 0xf7, 0x90, 0xf9, 0x24, 0x10, 0x5d, 0xab, 0xca, 0xd5, + 0x83, 0xf0, 0x55, 0xe3, 0xae, 0x0c, 0x97, 0xc1, 0xa7, 0x00, 0xd1, 0x98, 0xc1, 0x2f, 0xf1, 0x3d, + 0x1d, 0x7a, 0x03, 0xe4, 0x85, 0xb4, 0x34, 0x50, 0x7c, 0x54, 0xfc, 0x40, 0x2a, 0x38, 0x08, 0xc4, + 0x3b, 0xf0, 0xc2, 0x18, 0x46, 0x21, 0x57, 0x0f, 0xf0, 0x03, 0x04, 0x4f, 0xc5, 0xf7, 0x2b, 0xbe, + 0xc4, 0xb7, 0xcc, 0x79, 0x31, 0x08, 0xd5, 0xa3, 0xa8, 0x06, 0x74, 0x13, 0xab, 0x03, 0x85, 0x8d, + 0x8e, 0x44, 0x4c, 0x3c, 0x70, 0xf0, 0x77, 0x08, 0xa6, 0x12, 0x5c, 0x48, 0xab, 0x16, 0x6f, 0x39, + 0xe7, 0x41, 0x73, 0x75, 0xa0, 0x9c, 0x8e, 0x97, 0x69, 0xb9, 0xb6, 0xe8, 0x47, 0x12, 0x3e, 0x38, + 0x79, 0x28, 0x45, 0xe5, 0x3a, 0x80, 0xa9, 0x44, 0x63, 0xa7, 0x45, 0xc6, 0xdb, 0xd1, 0xc2, 0xcc, + 0x21, 0xf6, 0xbc, 0xed, 0x3d, 0x63, 0x47, 0xaa, 0x55, 0xcd, 0xa8, 0xd6, 0x63, 0x04, 0x95, 0x8c, + 0x35, 0x8d, 0xaf, 0x1c, 0xa1, 0xab, 0xb9, 0x5c, 0x14, 0x8a, 0xcc, 0x0d, 0x91, 0x0e, 0x94, 0xe7, + 0x93, 0x1d, 0x3f, 0x3a, 0x8d, 0xfc, 0x54, 0x36, 0xc4, 0xda, 0x48, 0x91, 0xe3, 0xc9, 0xc8, 0xfc, + 0x89, 0xb9, 0x92, 0x36, 0xe0, 0xf0, 0xef, 0x08, 0xca, 0x69, 0x2b, 0x19, 0xbf, 0x91, 0xc7, 0x9c, + 0x13, 0x48, 0x5b, 0x89, 0xd7, 0xe9, 0x75, 0x5c, 0x4b, 0xab, 0x53, 0x4a, 0x66, 0x5e, 0x01, 0xff, + 0x46, 0x20, 0xa4, 0x2f, 0x27, 0x7c, 0x39, 0x97, 0x51, 0xfc, 0x8d, 0x2e, 0x5c, 0x29, 0x7e, 0x91, + 0x11, 0x73, 0x3d, 0x49, 0xcc, 0x30, 0xad, 0x42, 0x35, 0xc3, 0x5f, 0x95, 0xa0, 0x92, 0xb1, 0xbd, + 0xd2, 0xfa, 0x32, 0x7f, 0xe1, 0x15, 0x2b, 0xd0, 0x17, 0x68, 0xa0, 0xcc, 0x66, 0x75, 0x24, 0xa9, + 0xbd, 0x1b, 0x27, 0xfb, 0xa8, 0x8e, 0x54, 0xa8, 0x98, 0xe9, 0x9d, 0xfa, 0x13, 0x82, 0x4a, 0xc6, + 0xfe, 0x4b, 0x83, 0x23, 0x7f, 0x65, 0xa6, 0x8e, 0x90, 0x64, 0x6b, 0x56, 0xff, 0x47, 0x6b, 0x0a, + 0xdf, 0xa3, 0x87, 0xca, 0x5c, 0xea, 0xee, 0xfd, 0x53, 0xf9, 0x12, 0xed, 0x51, 0x6a, 0xbb, 0x2b, + 0xb2, 0xdc, 0xef, 0xf7, 0x47, 0x37, 0xb3, 0xd6, 0xa5, 0x7b, 0x72, 0xd3, 0xb0, 0xba, 0xad, 0x57, + 0x6c, 0x43, 0xa3, 0xbb, 0x96, 0xd3, 0x59, 0xce, 0x53, 0x8f, 0x7c, 0x15, 0x50, 0x95, 0x1c, 0xa2, + 0xb5, 0xd6, 0x7e, 0x45, 0x50, 0x6e, 0x5a, 0x1d, 0x2e, 0x8c, 0x6b, 0xf3, 0x69, 0xcf, 0x80, 0x4d, + 0x0f, 0xb3, 0x4d, 0xf4, 0xe1, 0x5b, 0xec, 0x5a, 0xdb, 0x32, 0x34, 0xb3, 0x2d, 0x59, 0x4e, 0x5b, + 0x6e, 0x13, 0xd3, 0x47, 0x54, 0x8e, 0xbc, 0x27, 0x7f, 0x90, 0x58, 0x8d, 0xbe, 0x7e, 0x28, 0x09, + 0x37, 0x02, 0x03, 0xeb, 0x5e, 0xc2, 0x52, 0xe4, 0x47, 0xda, 0xaa, 0x3f, 0x0c, 0x85, 0xdb, 0xbe, + 0x70, 0x3b, 0x12, 0x6e, 0x6f, 0xd5, 0x77, 0x26, 0x7c, 0x27, 0xf5, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x05, 0x93, 0x22, 0x5e, 0xce, 0x11, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServiceMonitoringServiceClient interface { + // Create a `Service`. + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Get the named `Service`. + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) + // List `Service`s for this workspace. + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) + // Soft delete this `Service`. + DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type serviceMonitoringServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient { + return &serviceMonitoringServiceClient{cc} +} + +func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) { + out := new(Service) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) { + out := new(ListServiceLevelObjectivesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) { + out := new(ServiceLevelObjective) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service. +type ServiceMonitoringServiceServer interface { + // Create a `Service`. + CreateService(context.Context, *CreateServiceRequest) (*Service, error) + // Get the named `Service`. + GetService(context.Context, *GetServiceRequest) (*Service, error) + // List `Service`s for this workspace. + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Update this `Service`. + UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) + // Soft delete this `Service`. + DeleteService(context.Context, *DeleteServiceRequest) (*empty.Empty, error) + // Create a `ServiceLevelObjective` for the given `Service`. + CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Get a `ServiceLevelObjective` by name. + GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // List the `ServiceLevelObjective`s for the given `Service`. + ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) + // Update the given `ServiceLevelObjective`. + UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) + // Delete the given `ServiceLevelObjective`. + DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*empty.Empty, error) +} + +// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations. +type UnimplementedServiceMonitoringServiceServer struct { +} + +func (*UnimplementedServiceMonitoringServiceServer) CreateService(ctx context.Context, req *CreateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetService(ctx context.Context, req *GetServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServices(ctx context.Context, req *ListServicesRequest) (*ListServicesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateService(ctx context.Context, req *UpdateServiceRequest) (*Service, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteService(ctx context.Context, req *DeleteServiceRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(ctx context.Context, req *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(ctx context.Context, req *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(ctx context.Context, req *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(ctx context.Context, req *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented") +} +func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(ctx context.Context, req *DeleteServiceLevelObjectiveRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented") +} + +func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) { + s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv) +} + +func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceLevelObjectivesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteServiceLevelObjectiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.ServiceMonitoringService", + HandlerType: (*ServiceMonitoringServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateService", + Handler: _ServiceMonitoringService_CreateService_Handler, + }, + { + MethodName: "GetService", + Handler: _ServiceMonitoringService_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _ServiceMonitoringService_ListServices_Handler, + }, + { + MethodName: "UpdateService", + Handler: _ServiceMonitoringService_UpdateService_Handler, + }, + { + MethodName: "DeleteService", + Handler: _ServiceMonitoringService_DeleteService_Handler, + }, + { + MethodName: "CreateServiceLevelObjective", + Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler, + }, + { + MethodName: "GetServiceLevelObjective", + Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler, + }, + { + MethodName: "ListServiceLevelObjectives", + Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler, + }, + { + MethodName: "UpdateServiceLevelObjective", + Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler, + }, + { + MethodName: "DeleteServiceLevelObjective", + Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/service_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go new file mode 100644 index 0000000000..8ba8246b2e --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/span_context.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The context of a span, attached to google.api.Distribution.Exemplars +// in google.api.Distribution values during aggregation. +// +// It contains the name of a span with format: +// projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] +type SpanContext struct { + // The resource name of the span in the following format: + // + // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // [TRACE_ID] is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // [SPAN_ID] is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SpanContext) Reset() { *m = SpanContext{} } +func (m *SpanContext) String() string { return proto.CompactTextString(m) } +func (*SpanContext) ProtoMessage() {} +func (*SpanContext) Descriptor() ([]byte, []int) { + return fileDescriptor_933032e252f1c5e4, []int{0} +} + +func (m *SpanContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SpanContext.Unmarshal(m, b) +} +func (m *SpanContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SpanContext.Marshal(b, m, deterministic) +} +func (m *SpanContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SpanContext.Merge(m, src) +} +func (m *SpanContext) XXX_Size() int { + return xxx_messageInfo_SpanContext.Size(m) +} +func (m *SpanContext) XXX_DiscardUnknown() { + xxx_messageInfo_SpanContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SpanContext proto.InternalMessageInfo + +func (m *SpanContext) GetSpanName() string { + if m != nil { + return m.SpanName + } + return "" +} + +func init() { + proto.RegisterType((*SpanContext)(nil), "google.monitoring.v3.SpanContext") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/span_context.proto", fileDescriptor_933032e252f1c5e4) +} + +var fileDescriptor_933032e252f1c5e4 = []byte{ + // 197 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0x2f, 0x2e, 0x48, 0xcc, 0x8b, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd4, 0x43, 0x28, 0xd4, 0x2b, 0x33, 0x56, 0xd2, 0xe2, + 0xe2, 0x0e, 0x2e, 0x48, 0xcc, 0x73, 0x86, 0x28, 0x15, 0x92, 0xe6, 0xe2, 0x04, 0x6b, 0xcd, 0x4b, + 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, + 0x3a, 0xad, 0x60, 0xe4, 0x92, 0x48, 0xce, 0xcf, 0xd5, 0xc3, 0x66, 0x90, 0x93, 0x00, 0x92, 0x31, + 0x01, 0x20, 0x0b, 0x03, 0x18, 0xa3, 0xec, 0xa0, 0x2a, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, + 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0xce, 0xd1, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, + 0xa3, 0x3a, 0xdd, 0x1a, 0xc1, 0x5b, 0xc5, 0x24, 0xe5, 0x0e, 0x31, 0xc0, 0x39, 0x27, 0xbf, 0x34, + 0x45, 0xcf, 0x17, 0x61, 0x61, 0x98, 0xf1, 0x29, 0x98, 0x64, 0x0c, 0x58, 0x32, 0x06, 0x21, 0x19, + 0x13, 0x66, 0x9c, 0xc4, 0x06, 0xb6, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x19, 0x01, + 0xcb, 0x1e, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go new file mode 100644 index 0000000000..3a024c6c4a --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go @@ -0,0 +1,1019 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime.proto + +package monitoring + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The regions from which an Uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in Uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 +) + +var UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", +} + +var UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, +} + +func (x UptimeCheckRegion) String() string { + return proto.EnumName(UptimeCheckRegion_name, int32(x)) +} + +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{0} +} + +// The supported resource types that can be used as values of +// `group_resource.resource_type`. +// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. +// The resource types `gae_app` and `uptime_url` are not valid here because +// group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances from Google Cloud Platform (GCP) or + // Amazon Web Services (AWS). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of Amazon ELB load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +var GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", +} + +var GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, +} + +func (x GroupResourceType) String() string { + return proto.EnumName(GroupResourceType_name, int32(x)) +} + +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1} +} + +// Operational states for an internal checker. +type InternalChecker_State int32 + +const ( + // An internal checker should never be in the unspecified state. + InternalChecker_UNSPECIFIED InternalChecker_State = 0 + // The checker is being created, provisioned, and configured. A checker in + // this state can be returned by `ListInternalCheckers` or + // `GetInternalChecker`, as well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + InternalChecker_CREATING InternalChecker_State = 1 + // The checker is running and available for use. A checker in this state + // can be returned by `ListInternalCheckers` or `GetInternalChecker` as + // well as by examining the [long running + // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. + // If a checker is being torn down, it is neither visible nor usable, so + // there is no "deleting" or "down" state. + InternalChecker_RUNNING InternalChecker_State = 2 +) + +var InternalChecker_State_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "CREATING", + 2: "RUNNING", +} + +var InternalChecker_State_value = map[string]int32{ + "UNSPECIFIED": 0, + "CREATING": 1, + "RUNNING": 2, +} + +func (x InternalChecker_State) String() string { + return proto.EnumName(InternalChecker_State_name, int32(x)) +} + +func (InternalChecker_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{0, 0} +} + +// Options to perform content matching. +type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32 + +const ( + // No content matcher type specified (maintained for backward + // compatibility, but deprecated for future use). + // Treated as `CONTAINS_STRING`. + UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0 + // Selects substring matching. The match succeeds if the output contains + // the `content` string. This is the default value for checks without + // a `matcher` option, or where the value of `matcher` is + // `CONTENT_MATCHER_OPTION_UNSPECIFIED`. + UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1 + // Selects negation of substring matching. The match succeeds if the + // output does _NOT_ contain the `content` string. + UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2 + // Selects regular-expression matching. The match succeeds of the output + // matches the regular expression specified in the `content` string. + UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3 + // Selects negation of regular-expression matching. The match succeeds if + // the output does _NOT_ match the regular expression specified in the + // `content` string. + UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4 +) + +var UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{ + 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED", + 1: "CONTAINS_STRING", + 2: "NOT_CONTAINS_STRING", + 3: "MATCHES_REGEX", + 4: "NOT_MATCHES_REGEX", +} + +var UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{ + "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0, + "CONTAINS_STRING": 1, + "NOT_CONTAINS_STRING": 2, + "MATCHES_REGEX": 3, + "NOT_MATCHES_REGEX": 4, +} + +func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string { + return proto.EnumName(UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name, int32(x)) +} + +func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 3, 0} +} + +// An internal checker allows Uptime checks to run on private/internal GCP +// resources. +// +// Deprecated: Do not use. +type InternalChecker struct { + // A unique resource name for this InternalChecker. The format is: + // + // `projects/[PROJECT_ID]/internalCheckers/[INTERNAL_CHECKER_ID]`. + // + // `[PROJECT_ID]` is the Stackdriver Workspace project for the + // Uptime check config associated with the internal checker. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The checker's human-readable name. The display name + // should be unique within a Stackdriver Workspace in order to make it easier + // to identify; however, uniqueness is not enforced. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the + // internal resource lives (ex: "default"). + Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // The GCP zone the Uptime check should egress from. Only respected for + // internal Uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` + // The GCP project ID where the internal checker lives. Not necessary + // the same as the Workspace project. + PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"` + // The current operational state of the internal checker. + State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalChecker) Reset() { *m = InternalChecker{} } +func (m *InternalChecker) String() string { return proto.CompactTextString(m) } +func (*InternalChecker) ProtoMessage() {} +func (*InternalChecker) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{0} +} + +func (m *InternalChecker) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InternalChecker.Unmarshal(m, b) +} +func (m *InternalChecker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InternalChecker.Marshal(b, m, deterministic) +} +func (m *InternalChecker) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalChecker.Merge(m, src) +} +func (m *InternalChecker) XXX_Size() int { + return xxx_messageInfo_InternalChecker.Size(m) +} +func (m *InternalChecker) XXX_DiscardUnknown() { + xxx_messageInfo_InternalChecker.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalChecker proto.InternalMessageInfo + +func (m *InternalChecker) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InternalChecker) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *InternalChecker) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *InternalChecker) GetGcpZone() string { + if m != nil { + return m.GcpZone + } + return "" +} + +func (m *InternalChecker) GetPeerProjectId() string { + if m != nil { + return m.PeerProjectId + } + return "" +} + +func (m *InternalChecker) GetState() InternalChecker_State { + if m != nil { + return m.State + } + return InternalChecker_UNSPECIFIED +} + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + // A unique resource name for this Uptime check configuration. The format is: + // + // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + // + // This field should be omitted when creating the Uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A human-friendly name for the Uptime check configuration. The display name + // should be unique within a Stackdriver Workspace in order to make it easier + // to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are valid to be assigned to Resource: + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of Uptime check request. + // + // Types that are valid to be assigned to CheckRequestType: + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often, in seconds, the Uptime check is performed. + // Currently, the only supported values are `60s` (1 minute), `300s` + // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, + // defaults to `60s`. + Period *duration.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The content that is expected to appear in the data returned by the target + // server against which the check is run. Currently, only the first entry + // in the `content_matchers` list is supported, and additional entries will + // be ignored. This field is optional and should only be specified if a + // content match is required as part of the/ Uptime check. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` + // The list of regions from which the check will be run. + // Some regions contain one location, and others contain more than one. + // If this field is specified, enough regions must be provided to include a + // minimum of 3 locations. Not specifying this field will result in Uptime + // checks running from all available regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // If this is `true`, then checks are made only from the 'internal_checkers'. + // If it is `false`, then checks are made only from the 'selected_regions'. + // It is an error to provide 'selected_regions' when is_internal is `true`, + // or to provide 'internal_checkers' when is_internal is `false`. + IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` // Deprecated: Do not use. + // The internal checkers that this check will egress from. If `is_internal` is + // `true` and this list is empty, the check will egress from all the + // InternalCheckers configured for the project that owns this + // `UptimeCheckConfig`. + InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} } +func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig) ProtoMessage() {} +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1} +} + +func (m *UptimeCheckConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig.Unmarshal(m, b) +} +func (m *UptimeCheckConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig.Merge(m, src) +} +func (m *UptimeCheckConfig) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig.Size(m) +} +func (m *UptimeCheckConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig proto.InternalMessageInfo + +func (m *UptimeCheckConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UptimeCheckConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} + +type UptimeCheckConfig_MonitoredResource struct { + MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` +} + +type UptimeCheckConfig_ResourceGroup_ struct { + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { + if x, ok := m.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (m *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := m.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_HttpCheck_ struct { + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` +} + +type UptimeCheckConfig_TcpCheck_ struct { + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (m *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetPeriod() *duration.Duration { + if m != nil { + return m.Period + } + return nil +} + +func (m *UptimeCheckConfig) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if m != nil { + return m.ContentMatchers + } + return nil +} + +func (m *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if m != nil { + return m.SelectedRegions + } + return nil +} + +// Deprecated: Do not use. +func (m *UptimeCheckConfig) GetIsInternal() bool { + if m != nil { + return m.IsInternal + } + return false +} + +// Deprecated: Do not use. +func (m *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { + if m != nil { + return m.InternalCheckers + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*UptimeCheckConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + // The group of resources being monitored. Should be only the `[GROUP_ID]`, + // and not the full-path `projects/[PROJECT_ID]/groups/[GROUP_ID]`. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConfig_ResourceGroup{} } +func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 0} +} + +func (m *UptimeCheckConfig_ResourceGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Merge(m, src) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Size(m) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_ResourceGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_ResourceGroup proto.InternalMessageInfo + +func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if m != nil { + return m.GroupId + } + return "" +} + +func (m *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if m != nil { + return m.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in an HTTP/HTTPS Uptime check request. +type UptimeCheckConfig_HttpCheck struct { + // If `true`, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + // Optional (defaults to "/"). The path to the page against which to run + // the check. Will be combined with the `host` (specified within the + // `monitored_resource`) and `port` to construct the full URL. If the + // provided path does not begin with "/", a "/" will be prepended + // automatically. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when + // `use_ssl` is `true`). The TCP port on the HTTP server against which to + // run the check. Will be combined with host (specified within the + // `monitored_resource`) and `path` to construct the full URL. + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` + // Boolean specifiying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if `mask_headers` is set to `true` then the headers + // will be obscured with `******.` + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` + // The list of headers to send as part of the Uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + // The maximum number of headers allowed is 100. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Boolean specifying whether to include SSL certificate validation as a + // part of the Uptime check. Only applies to checks where + // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`, + // setting `validate_ssl` to `true` has no effect. + ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} } +func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 1} +} + +func (m *UptimeCheckConfig_HttpCheck) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_HttpCheck.Merge(m, src) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Size(m) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_HttpCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_HttpCheck proto.InternalMessageInfo + +func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if m != nil { + return m.UseSsl + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if m != nil { + return m.AuthInfo + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if m != nil { + return m.MaskHeaders + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if m != nil { + return m.Headers + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool { + if m != nil { + return m.ValidateSsl + } + return false +} + +// The authentication parameters to provide to the specified resource or +// URL that requires a username and password. Currently, only +// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is +// supported in Uptime checks. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + // The username to use when authenticating with the HTTP server. + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to use when authenticating with the HTTP server. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *m = UptimeCheckConfig_HttpCheck_BasicAuthentication{} +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return proto.CompactTextString(m) +} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 1, 0} +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Merge(m, src) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Size(m) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication proto.InternalMessageInfo + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +// Information required for a TCP Uptime check request. +type UptimeCheckConfig_TcpCheck struct { + // The TCP port on the server against which to run the check. Will be + // combined with host (specified within the `monitored_resource`) to + // construct the full URL. Required. + Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} } +func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 2} +} + +func (m *UptimeCheckConfig_TcpCheck) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_TcpCheck.Merge(m, src) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Size(m) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_TcpCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_TcpCheck proto.InternalMessageInfo + +func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +// Optional. Used to perform content matching. This allows matching based on +// substrings and regular expressions, together with their negations. Only the +// first 4 MB of an HTTP or HTTPS check's response (and the first +// 1 MB of a TCP check's response) are examined for purposes of content +// matching. +type UptimeCheckConfig_ContentMatcher struct { + // String or regex content to match. Maximum 1024 bytes. An empty `content` + // string indicates no content matching is to be performed. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The type of content matcher that will be applied to the server output, + // compared to the `content` string when the check is run. + Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckConfig_ContentMatcher{} } +func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 3} +} + +func (m *UptimeCheckConfig_ContentMatcher) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Merge(m, src) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Size(m) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_ContentMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_ContentMatcher proto.InternalMessageInfo + +func (m *UptimeCheckConfig_ContentMatcher) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption { + if m != nil { + return m.Matcher + } + return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED +} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The IP address from which the Uptime check originates. This is a fully + // specified IP address (not an IP address range). Most IP addresses, as of + // this publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely, and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} } +func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckIp) ProtoMessage() {} +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{2} +} + +func (m *UptimeCheckIp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckIp.Unmarshal(m, b) +} +func (m *UptimeCheckIp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckIp.Marshal(b, m, deterministic) +} +func (m *UptimeCheckIp) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckIp.Merge(m, src) +} +func (m *UptimeCheckIp) XXX_Size() int { + return xxx_messageInfo_UptimeCheckIp.Size(m) +} +func (m *UptimeCheckIp) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckIp.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckIp proto.InternalMessageInfo + +func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if m != nil { + return m.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (m *UptimeCheckIp) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *UptimeCheckIp) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.UptimeCheckRegion", UptimeCheckRegion_name, UptimeCheckRegion_value) + proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value) + proto.RegisterEnum("google.monitoring.v3.InternalChecker_State", InternalChecker_State_name, InternalChecker_State_value) + proto.RegisterEnum("google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption", UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name, UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value) + proto.RegisterType((*InternalChecker)(nil), "google.monitoring.v3.InternalChecker") + proto.RegisterType((*UptimeCheckConfig)(nil), "google.monitoring.v3.UptimeCheckConfig") + proto.RegisterType((*UptimeCheckConfig_ResourceGroup)(nil), "google.monitoring.v3.UptimeCheckConfig.ResourceGroup") + proto.RegisterType((*UptimeCheckConfig_HttpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry") + proto.RegisterType((*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication") + proto.RegisterType((*UptimeCheckConfig_TcpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.TcpCheck") + proto.RegisterType((*UptimeCheckConfig_ContentMatcher)(nil), "google.monitoring.v3.UptimeCheckConfig.ContentMatcher") + proto.RegisterType((*UptimeCheckIp)(nil), "google.monitoring.v3.UptimeCheckIp") +} + +func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor_7ca0e36dfc8221d8) } + +var fileDescriptor_7ca0e36dfc8221d8 = []byte{ + // 1290 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcf, 0x6e, 0x1b, 0x37, + 0x13, 0xcf, 0x4a, 0xb6, 0x24, 0x8f, 0x6c, 0x4b, 0x66, 0x9c, 0x2f, 0x6b, 0x01, 0x09, 0x1c, 0x07, + 0x5f, 0x3e, 0x23, 0x01, 0xa4, 0x2f, 0x16, 0x5a, 0xb4, 0x0e, 0x90, 0x62, 0xa5, 0x6c, 0x2d, 0xa1, + 0xb6, 0x24, 0x50, 0x52, 0x9a, 0xa6, 0x41, 0x89, 0xcd, 0x8a, 0x96, 0xb6, 0x96, 0x96, 0xdb, 0x25, + 0xd7, 0xa9, 0x63, 0xe4, 0x09, 0x7a, 0xef, 0xbd, 0xe8, 0x2d, 0x8f, 0xd2, 0x57, 0xe8, 0x1b, 0xf4, + 0xd2, 0x43, 0x2f, 0x3d, 0x16, 0x24, 0x77, 0x65, 0xc9, 0x76, 0x11, 0x3b, 0x37, 0xfe, 0xe6, 0xcf, + 0x8f, 0x9c, 0xe1, 0xcc, 0x90, 0x70, 0x6f, 0xc8, 0xd8, 0x70, 0x4c, 0x2b, 0x13, 0xe6, 0x7b, 0x82, + 0x85, 0x9e, 0x3f, 0xac, 0x1c, 0x57, 0x2b, 0x51, 0x20, 0xbc, 0x09, 0x2d, 0x07, 0x21, 0x13, 0x0c, + 0xad, 0x6b, 0x93, 0xf2, 0x99, 0x49, 0xf9, 0xb8, 0x5a, 0xba, 0x1f, 0x3b, 0x3a, 0x81, 0x97, 0x38, + 0xd3, 0x01, 0x09, 0x29, 0x67, 0x51, 0xe8, 0xc6, 0xae, 0xa5, 0x8d, 0x19, 0xa3, 0x73, 0xaa, 0xbb, + 0xb1, 0x4a, 0xa1, 0xd7, 0xd1, 0x61, 0x65, 0x10, 0x85, 0x8e, 0xf0, 0x98, 0xaf, 0xf5, 0x5b, 0xbf, + 0xa4, 0xa0, 0xd0, 0xf4, 0x05, 0x0d, 0x7d, 0x67, 0x5c, 0x1f, 0x51, 0xf7, 0x88, 0x86, 0x08, 0xc1, + 0x82, 0xef, 0x4c, 0xa8, 0x69, 0x6c, 0x1a, 0xdb, 0x4b, 0x58, 0xad, 0xd1, 0x3d, 0x58, 0x1e, 0x78, + 0x3c, 0x18, 0x3b, 0x27, 0x44, 0xe9, 0x52, 0x4a, 0x97, 0x8f, 0x65, 0x2d, 0x69, 0x62, 0x42, 0xd6, + 0xa7, 0xe2, 0x0d, 0x0b, 0x8f, 0xcc, 0xb4, 0xd2, 0x26, 0x10, 0x6d, 0x40, 0x6e, 0xe8, 0x06, 0xe4, + 0x2d, 0xf3, 0xa9, 0xb9, 0xa0, 0x55, 0x43, 0x37, 0x78, 0xc9, 0x7c, 0x8a, 0x1e, 0x40, 0x21, 0xa0, + 0x34, 0x24, 0x41, 0xc8, 0xbe, 0xa7, 0xae, 0x20, 0xde, 0xc0, 0xcc, 0x28, 0x8b, 0x15, 0x29, 0xee, + 0x68, 0x69, 0x73, 0x80, 0x2c, 0x58, 0xe4, 0xc2, 0x11, 0xd4, 0xcc, 0x6e, 0x1a, 0xdb, 0xab, 0x3b, + 0x8f, 0xca, 0x97, 0x65, 0xab, 0x7c, 0x2e, 0x92, 0x72, 0x57, 0xba, 0x60, 0xed, 0xb9, 0x55, 0x85, + 0x45, 0x85, 0x51, 0x01, 0xf2, 0xfd, 0x56, 0xb7, 0x63, 0xd7, 0x9b, 0x5f, 0x36, 0xed, 0x67, 0xc5, + 0x1b, 0x68, 0x19, 0x72, 0x75, 0x6c, 0x5b, 0xbd, 0x66, 0x6b, 0xaf, 0x68, 0xa0, 0x3c, 0x64, 0x71, + 0xbf, 0xd5, 0x92, 0x20, 0xb5, 0x9b, 0x32, 0x8d, 0xad, 0xbf, 0x0b, 0xb0, 0xd6, 0x57, 0x57, 0xa5, + 0x78, 0xeb, 0xcc, 0x3f, 0xf4, 0x86, 0x1f, 0x9b, 0xa5, 0x16, 0xa0, 0x8b, 0xf7, 0xa8, 0x12, 0x96, + 0xdf, 0xb9, 0x93, 0x44, 0xe5, 0x04, 0x5e, 0xf9, 0x20, 0xb1, 0xc2, 0xb1, 0x51, 0xe3, 0x06, 0x5e, + 0x9b, 0x9c, 0x17, 0xa2, 0xef, 0x60, 0x35, 0x61, 0x21, 0xc3, 0x90, 0x45, 0x81, 0xca, 0x70, 0x7e, + 0xe7, 0x93, 0xcb, 0x33, 0x74, 0x21, 0x8e, 0x72, 0xc2, 0xb4, 0x27, 0x9d, 0x1b, 0x37, 0xf0, 0x4a, + 0x38, 0x2b, 0x40, 0x18, 0x60, 0x24, 0x44, 0x40, 0x5c, 0xe9, 0x62, 0x2e, 0x2a, 0xee, 0xc7, 0x57, + 0xe5, 0x6e, 0x08, 0x11, 0x28, 0xdc, 0x30, 0xf0, 0xd2, 0x28, 0x01, 0xa8, 0x0d, 0x4b, 0xc2, 0x4d, + 0x28, 0x33, 0x8a, 0xf2, 0xff, 0x57, 0xa5, 0xec, 0xb9, 0x53, 0xc6, 0x9c, 0x88, 0xd7, 0xe8, 0x31, + 0x64, 0x02, 0x1a, 0x7a, 0x6c, 0xa0, 0xca, 0x23, 0xbf, 0xb3, 0x91, 0xb0, 0x25, 0x65, 0x5f, 0x7e, + 0x16, 0x97, 0x3d, 0x8e, 0x0d, 0x51, 0x15, 0xb2, 0x92, 0x9a, 0x45, 0xc2, 0xcc, 0x7d, 0xc8, 0x27, + 0xb1, 0x44, 0x0e, 0x14, 0x5d, 0xe6, 0x0b, 0xea, 0x0b, 0x32, 0x71, 0x84, 0x3b, 0xa2, 0x21, 0x37, + 0x97, 0x36, 0xd3, 0xdb, 0xf9, 0x9d, 0x4f, 0xaf, 0x7a, 0xfe, 0xba, 0xf6, 0x3f, 0xd0, 0xee, 0xb8, + 0xe0, 0xce, 0x61, 0x8e, 0x30, 0x14, 0x39, 0x1d, 0x53, 0x57, 0xa8, 0xf2, 0x18, 0x7a, 0xcc, 0xe7, + 0x26, 0x6c, 0xa6, 0xb7, 0x57, 0x77, 0xfe, 0xf7, 0xc1, 0x2d, 0xb0, 0xb2, 0xc7, 0x85, 0x84, 0x40, + 0x63, 0x8e, 0xee, 0x43, 0xde, 0xe3, 0xc4, 0x8b, 0x9b, 0xc3, 0x2c, 0x6c, 0x1a, 0xdb, 0xb9, 0x5a, + 0xca, 0x34, 0x30, 0x78, 0x3c, 0x69, 0x19, 0xf4, 0x1c, 0xd6, 0x12, 0x0b, 0x7d, 0x33, 0x32, 0xb8, + 0x55, 0x15, 0xdc, 0x7f, 0xaf, 0xd4, 0x6d, 0x8a, 0xb1, 0xe8, 0xcd, 0x0b, 0x79, 0xe9, 0x47, 0x58, + 0x99, 0x2b, 0x31, 0x35, 0x0d, 0xe4, 0x42, 0xf6, 0xba, 0x11, 0x4f, 0x03, 0x89, 0x9b, 0x03, 0xb4, + 0x0f, 0xd3, 0xea, 0x23, 0xe2, 0x24, 0xd0, 0x0d, 0xf4, 0xaf, 0x91, 0x2b, 0xba, 0x84, 0xbb, 0x77, + 0x12, 0x50, 0xbc, 0x1c, 0xce, 0xa0, 0xd2, 0xef, 0x69, 0x58, 0x9a, 0x56, 0x20, 0xba, 0x0d, 0xd9, + 0x88, 0x53, 0xc2, 0xf9, 0x58, 0xed, 0x9a, 0xc3, 0x99, 0x88, 0xd3, 0x2e, 0x1f, 0xcb, 0x46, 0x0e, + 0x1c, 0x31, 0x8a, 0x9b, 0x55, 0xad, 0x95, 0x8c, 0x85, 0x42, 0xf5, 0xe5, 0x22, 0x56, 0x6b, 0xf4, + 0x1a, 0x96, 0x9c, 0x48, 0x8c, 0x88, 0xe7, 0x1f, 0xb2, 0xb8, 0xc9, 0xec, 0x6b, 0x37, 0x42, 0xb9, + 0xe6, 0x70, 0xcf, 0xb5, 0x22, 0x31, 0xa2, 0xbe, 0xf0, 0x5c, 0x5d, 0x5f, 0x39, 0xc9, 0xdb, 0xf4, + 0x0f, 0x99, 0x1c, 0x20, 0x13, 0x87, 0x1f, 0x91, 0x11, 0x75, 0x06, 0x32, 0xff, 0x8b, 0xea, 0xa4, + 0x79, 0x29, 0x6b, 0x68, 0x11, 0x7a, 0x01, 0xd9, 0x44, 0x9b, 0x51, 0xb7, 0xf3, 0xf4, 0xfa, 0x87, + 0x88, 0xb9, 0x6c, 0x5f, 0x84, 0x27, 0x38, 0xa1, 0x93, 0x9b, 0x1f, 0x3b, 0x63, 0x6f, 0xe0, 0x08, + 0x9d, 0xa6, 0xac, 0xde, 0x3c, 0x91, 0x75, 0xf9, 0xb8, 0x74, 0x00, 0x37, 0x2f, 0x09, 0x00, 0x95, + 0x20, 0x17, 0x71, 0x79, 0xed, 0xd3, 0x79, 0x38, 0xc5, 0x52, 0x17, 0x38, 0x9c, 0xbf, 0x61, 0xe1, + 0x20, 0x4e, 0xf1, 0x14, 0x97, 0x76, 0x61, 0x79, 0xf6, 0x28, 0xa8, 0x08, 0xe9, 0x23, 0x7a, 0x12, + 0x53, 0xc8, 0x25, 0x5a, 0x87, 0xc5, 0x63, 0x67, 0x1c, 0x25, 0xa3, 0x54, 0x83, 0xdd, 0xd4, 0x67, + 0x46, 0xe9, 0x2e, 0xe4, 0x92, 0x59, 0x30, 0xbd, 0x2e, 0xe3, 0xec, 0xba, 0x4a, 0xef, 0x53, 0xb0, + 0x3a, 0xdf, 0x6c, 0xf2, 0x85, 0x8a, 0xdb, 0x2d, 0x29, 0xbc, 0x18, 0x22, 0x0a, 0xd9, 0xb8, 0xa1, + 0xe3, 0x92, 0xfb, 0xea, 0xe3, 0xfa, 0xf9, 0x1c, 0x6c, 0x07, 0x7a, 0x7e, 0xc4, 0xdc, 0x5b, 0x3f, + 0x1b, 0xb0, 0x7e, 0x99, 0x05, 0x7a, 0x00, 0x5b, 0xf5, 0x76, 0xab, 0x67, 0xb7, 0x7a, 0xe4, 0xc0, + 0xea, 0xd5, 0x1b, 0x36, 0x26, 0xed, 0x4e, 0xaf, 0xd9, 0x6e, 0x91, 0xf9, 0x97, 0xea, 0x26, 0x14, + 0xa4, 0x9d, 0xd5, 0x6c, 0x75, 0x49, 0xb7, 0x87, 0xf5, 0x83, 0x75, 0x1b, 0x6e, 0xb6, 0xda, 0x3d, + 0x72, 0x5e, 0x91, 0x42, 0x6b, 0xb0, 0xa2, 0xd9, 0xba, 0x04, 0xdb, 0x7b, 0xf6, 0x8b, 0x62, 0x1a, + 0xdd, 0x82, 0x35, 0x69, 0x3b, 0x2f, 0x5e, 0xd8, 0xfd, 0xcb, 0xf8, 0xc3, 0xfa, 0xd3, 0x80, 0x47, + 0x33, 0xd1, 0xea, 0xf8, 0x9d, 0xc0, 0xe3, 0x65, 0x97, 0x4d, 0x2a, 0x17, 0x1f, 0xc0, 0x27, 0xf1, + 0x9b, 0xcd, 0x2b, 0xa7, 0xf1, 0xea, 0x5d, 0xfc, 0xa3, 0x99, 0xb1, 0xe2, 0x95, 0x53, 0x2d, 0xd3, + 0x33, 0x85, 0xb8, 0x4a, 0xfa, 0x0e, 0xd9, 0x2c, 0x1c, 0x3a, 0xbe, 0xf7, 0x56, 0xd5, 0x0f, 0xaf, + 0x9c, 0xce, 0xc2, 0x6b, 0xd0, 0x7c, 0x7e, 0xc8, 0xc6, 0xb2, 0x7c, 0x2a, 0xa7, 0x7a, 0x71, 0x0d, + 0x57, 0xe3, 0x61, 0x0d, 0x20, 0x97, 0x0c, 0x8c, 0xda, 0x3a, 0x20, 0xad, 0x0f, 0xe9, 0x0f, 0x11, + 0xe5, 0x42, 0xcd, 0x9f, 0xad, 0x9f, 0x0c, 0x58, 0x99, 0x89, 0xbc, 0x19, 0xa0, 0x2f, 0x20, 0xa3, + 0xc7, 0xb2, 0x2a, 0xa1, 0x6b, 0x4c, 0xe5, 0xd8, 0x4d, 0xf6, 0xc3, 0x98, 0xe9, 0xbe, 0x49, 0xfa, + 0x21, 0xc1, 0xe8, 0x0e, 0x80, 0x17, 0x10, 0x67, 0x30, 0x08, 0x29, 0xe7, 0xf1, 0x2f, 0x6a, 0xc9, + 0x0b, 0x2c, 0x2d, 0x78, 0x48, 0xe7, 0xfe, 0x21, 0x9a, 0x17, 0xfd, 0x07, 0x10, 0xb6, 0xf7, 0x2e, + 0x96, 0x4a, 0x16, 0xd2, 0xfd, 0xae, 0x55, 0x34, 0x10, 0x40, 0xc6, 0xee, 0xe3, 0x76, 0xc7, 0xd6, + 0x15, 0xd1, 0x6d, 0xf7, 0x7b, 0x0d, 0x62, 0x1d, 0xd8, 0xb8, 0x59, 0xb7, 0x8a, 0x69, 0x54, 0x84, + 0x65, 0xab, 0xdb, 0xb4, 0x48, 0xc7, 0x92, 0xae, 0xf5, 0xe2, 0xc2, 0xc3, 0x6f, 0x61, 0xed, 0xc2, + 0x68, 0x45, 0x77, 0x60, 0x03, 0xdb, 0xdd, 0x76, 0x1f, 0xd7, 0x6d, 0xd2, 0xfb, 0xa6, 0x63, 0x93, + 0x0b, 0x5f, 0xa8, 0x66, 0xab, 0xdb, 0xb3, 0x5a, 0x75, 0xbb, 0x68, 0xa0, 0x0d, 0xb8, 0x65, 0x7d, + 0xdd, 0x25, 0xf6, 0x7e, 0x8d, 0xec, 0xb7, 0xad, 0x67, 0xa4, 0x66, 0xed, 0x4b, 0x0d, 0x2e, 0xa6, + 0x6a, 0xbf, 0x1a, 0x60, 0xba, 0x6c, 0x72, 0x69, 0xd6, 0x6a, 0x79, 0x1d, 0x5e, 0x47, 0xbe, 0xc0, + 0x1d, 0xe3, 0xe5, 0xd3, 0xd8, 0x68, 0xc8, 0xc6, 0x8e, 0x3f, 0x2c, 0xb3, 0x70, 0x58, 0x19, 0x52, + 0x5f, 0xbd, 0xcf, 0x95, 0xb3, 0xf2, 0x9c, 0xff, 0x54, 0x3f, 0x39, 0x43, 0xef, 0x53, 0xa5, 0x3d, + 0x4d, 0x50, 0x1f, 0xb3, 0x68, 0x90, 0xfc, 0xa8, 0xe4, 0x5e, 0xcf, 0xab, 0xbf, 0x25, 0xca, 0x57, + 0x4a, 0xf9, 0xea, 0x4c, 0xf9, 0xea, 0x79, 0xf5, 0x75, 0x46, 0x6d, 0x52, 0xfd, 0x27, 0x00, 0x00, + 0xff, 0xff, 0x57, 0x4f, 0xe1, 0x77, 0xb8, 0x0b, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go new file mode 100644 index 0000000000..3f9631b0a0 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go @@ -0,0 +1,830 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime_service.proto + +package monitoring + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + _ "github.com/golang/protobuf/ptypes/duration" + empty "github.com/golang/protobuf/ptypes/empty" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + // Required. The project whose Uptime check configurations are listed. The format + // is `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} } +func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{0} +} + +func (m *ListUptimeCheckConfigsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Unmarshal(m, b) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckConfigsRequest.Merge(m, src) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Size(m) +} +func (m *ListUptimeCheckConfigsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckConfigsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckConfigsRequest proto.InternalMessageInfo + +func (m *ListUptimeCheckConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + // The returned Uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of Uptime check configurations for the project, + // irrespective of any pagination. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} } +func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{1} +} + +func (m *ListUptimeCheckConfigsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Unmarshal(m, b) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckConfigsResponse.Merge(m, src) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Size(m) +} +func (m *ListUptimeCheckConfigsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckConfigsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckConfigsResponse proto.InternalMessageInfo + +func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfigs + } + return nil +} + +func (m *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + // Required. The Uptime check configuration to retrieve. The format + // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} } +func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{2} +} + +func (m *GetUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *GetUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *GetUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUptimeCheckConfigRequest.Merge(m, src) +} +func (m *GetUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Size(m) +} +func (m *GetUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *GetUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + // Required. The project in which to create the Uptime check. The format + // is `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The new Uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} } +func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{3} +} + +func (m *CreateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUptimeCheckConfigRequest.Merge(m, src) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Size(m) +} +func (m *CreateUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *CreateUptimeCheckConfigRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + // Optional. If present, only the listed fields in the current Uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. If an `updateMask` has been specified, this field gives + // the values for the set of fields mentioned in the `updateMask`. If an + // `updateMask` has not been given, this Uptime check configuration replaces + // the current configuration. If a field is mentioned in `updateMask` but + // the corresonding field is omitted in this partial Uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + // + // The following fields can be updated: `display_name`, + // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and + // `selected_regions`. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} } +func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{4} +} + +func (m *UpdateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUptimeCheckConfigRequest.Merge(m, src) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Size(m) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + // Required. The Uptime check configuration to delete. The format + // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} } +func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{5} +} + +func (m *DeleteUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUptimeCheckConfigRequest.Merge(m, src) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Size(m) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *DeleteUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} } +func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{6} +} + +func (m *ListUptimeCheckIpsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Unmarshal(m, b) +} +func (m *ListUptimeCheckIpsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckIpsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckIpsRequest.Merge(m, src) +} +func (m *ListUptimeCheckIpsRequest) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Size(m) +} +func (m *ListUptimeCheckIpsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckIpsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckIpsRequest proto.InternalMessageInfo + +func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckIpsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} } +func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{7} +} + +func (m *ListUptimeCheckIpsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Unmarshal(m, b) +} +func (m *ListUptimeCheckIpsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckIpsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckIpsResponse.Merge(m, src) +} +func (m *ListUptimeCheckIpsResponse) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Size(m) +} +func (m *ListUptimeCheckIpsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckIpsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckIpsResponse proto.InternalMessageInfo + +func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if m != nil { + return m.UptimeCheckIps + } + return nil +} + +func (m *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*ListUptimeCheckConfigsRequest)(nil), "google.monitoring.v3.ListUptimeCheckConfigsRequest") + proto.RegisterType((*ListUptimeCheckConfigsResponse)(nil), "google.monitoring.v3.ListUptimeCheckConfigsResponse") + proto.RegisterType((*GetUptimeCheckConfigRequest)(nil), "google.monitoring.v3.GetUptimeCheckConfigRequest") + proto.RegisterType((*CreateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.CreateUptimeCheckConfigRequest") + proto.RegisterType((*UpdateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.UpdateUptimeCheckConfigRequest") + proto.RegisterType((*DeleteUptimeCheckConfigRequest)(nil), "google.monitoring.v3.DeleteUptimeCheckConfigRequest") + proto.RegisterType((*ListUptimeCheckIpsRequest)(nil), "google.monitoring.v3.ListUptimeCheckIpsRequest") + proto.RegisterType((*ListUptimeCheckIpsResponse)(nil), "google.monitoring.v3.ListUptimeCheckIpsResponse") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor_6222dd2aa0db8eee) +} + +var fileDescriptor_6222dd2aa0db8eee = []byte{ + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0xd6, 0x24, 0xa5, 0x62, 0xa7, 0xe2, 0xd7, 0x74, 0xd5, 0xcd, 0x7a, 0x69, 0x08, 0x46, 0x82, + 0x25, 0x6c, 0x6d, 0xd8, 0x70, 0xa1, 0x2b, 0x2a, 0x39, 0x01, 0x56, 0x15, 0xac, 0xb4, 0x4a, 0xd9, + 0x22, 0x50, 0xa4, 0x68, 0xd6, 0x99, 0x38, 0x43, 0x6c, 0x8f, 0x6b, 0x8f, 0xb3, 0x50, 0xd4, 0x0b, + 0x37, 0xce, 0xfb, 0x1f, 0x70, 0x82, 0x4a, 0xfc, 0x05, 0x08, 0x09, 0x71, 0x2c, 0x37, 0xb8, 0xe5, + 0x80, 0x7a, 0xe0, 0xc6, 0x7f, 0xc0, 0x09, 0x79, 0x3c, 0xc6, 0x4e, 0x62, 0x7b, 0x1d, 0x69, 0x6f, + 0xc9, 0xbc, 0xe7, 0xf7, 0xbe, 0xf7, 0xbd, 0xf9, 0x3e, 0x0d, 0x7c, 0xd3, 0x62, 0xcc, 0xb2, 0x89, + 0xee, 0x30, 0x97, 0x72, 0xe6, 0x53, 0xd7, 0xd2, 0x67, 0x1d, 0x3d, 0xf4, 0x38, 0x75, 0xc8, 0x30, + 0x20, 0xfe, 0x8c, 0x9a, 0x44, 0xf3, 0x7c, 0xc6, 0x19, 0xda, 0x8c, 0x53, 0xb5, 0x34, 0x55, 0x9b, + 0x75, 0x94, 0x97, 0x65, 0x01, 0xec, 0x51, 0x1d, 0xbb, 0x2e, 0xe3, 0x98, 0x53, 0xe6, 0x06, 0xf1, + 0x37, 0xca, 0x56, 0x26, 0x6a, 0xda, 0x94, 0xb8, 0x5c, 0x06, 0x5e, 0xc9, 0x04, 0xc6, 0x94, 0xd8, + 0xa3, 0xe1, 0x29, 0x99, 0xe0, 0x19, 0x65, 0xbe, 0x4c, 0xd8, 0xce, 0x24, 0xf8, 0x24, 0x60, 0xa1, + 0x9f, 0x00, 0x51, 0x5e, 0x2d, 0xc1, 0x2c, 0x53, 0x9a, 0x32, 0x45, 0xfc, 0x3b, 0x0d, 0xc7, 0xfa, + 0x28, 0xf4, 0x05, 0x30, 0x19, 0xdf, 0x59, 0x8e, 0x13, 0xc7, 0xe3, 0x5f, 0xcb, 0x60, 0x6b, 0x39, + 0x18, 0x03, 0x74, 0x70, 0x30, 0x8d, 0x33, 0xd4, 0x1f, 0x00, 0xbc, 0xf9, 0x09, 0x0d, 0xf8, 0x89, + 0xe8, 0xd9, 0x9b, 0x10, 0x73, 0xda, 0x63, 0xee, 0x98, 0x5a, 0x41, 0x9f, 0x3c, 0x08, 0x49, 0xc0, + 0xd1, 0xc7, 0xf0, 0xaa, 0x87, 0x7d, 0xe2, 0xf2, 0x06, 0x68, 0x81, 0xdd, 0x8d, 0x6e, 0xe7, 0xa9, + 0x51, 0xfb, 0xd7, 0xb8, 0x85, 0xde, 0xca, 0xb0, 0x17, 0xb7, 0xc1, 0x1e, 0x0d, 0x34, 0x93, 0x39, + 0xfa, 0x4a, 0xb1, 0xbe, 0x2c, 0x81, 0x76, 0xe0, 0x86, 0x87, 0x2d, 0x32, 0x0c, 0xe8, 0x43, 0xd2, + 0xa8, 0xb7, 0xc0, 0xee, 0x33, 0xfd, 0x67, 0xa3, 0x83, 0x7b, 0xf4, 0x21, 0x41, 0x37, 0x21, 0x14, + 0x41, 0xce, 0xa6, 0xc4, 0x6d, 0x5c, 0x89, 0xba, 0xf5, 0x45, 0xfa, 0xa7, 0xd1, 0x81, 0xfa, 0x1b, + 0x80, 0xcd, 0x22, 0xa8, 0x81, 0xc7, 0xdc, 0x80, 0xa0, 0xcf, 0xe1, 0xa6, 0x5c, 0xb8, 0x19, 0x85, + 0x87, 0x66, 0x1c, 0x6f, 0x80, 0x56, 0x7d, 0xf7, 0xda, 0xfe, 0x1b, 0x5a, 0xde, 0xde, 0xb5, 0x55, + 0xb4, 0x28, 0x5c, 0x69, 0x81, 0x5e, 0x87, 0x2f, 0xb8, 0xe4, 0x2b, 0x3e, 0xcc, 0x20, 0xac, 0x09, + 0x84, 0xcf, 0x45, 0xc7, 0xc7, 0x09, 0xca, 0x68, 0x08, 0xce, 0x38, 0xb6, 0xb3, 0x23, 0x6e, 0x88, + 0x93, 0x68, 0x46, 0x75, 0x0c, 0x77, 0x0e, 0xc9, 0xea, 0x08, 0x09, 0xd9, 0x87, 0xf0, 0x8a, 0x8b, + 0x1d, 0xb2, 0x48, 0x35, 0x5c, 0x8b, 0x6a, 0x51, 0x40, 0xfd, 0x1d, 0xc0, 0x66, 0xcf, 0x27, 0x98, + 0x93, 0xc2, 0x5e, 0x97, 0xba, 0xd8, 0x01, 0xbc, 0x9e, 0xc3, 0xbc, 0xa0, 0xa8, 0x3a, 0xf1, 0xdd, + 0xfa, 0x53, 0xa3, 0xd6, 0x7f, 0x69, 0x85, 0x7d, 0xf5, 0x67, 0x00, 0x9b, 0x27, 0xde, 0xa8, 0x6c, + 0x9a, 0x03, 0x78, 0x2d, 0x14, 0x19, 0xe2, 0x76, 0xcb, 0xc6, 0x4a, 0xd2, 0x38, 0x11, 0x80, 0xf6, + 0x51, 0x24, 0x80, 0x23, 0x1c, 0x4c, 0xfb, 0x30, 0x4e, 0x8f, 0x7e, 0x17, 0xa1, 0xaf, 0x5f, 0x0e, + 0x7a, 0x0a, 0x9b, 0x1f, 0x10, 0x9b, 0x94, 0x80, 0xbf, 0xb4, 0xb5, 0x7f, 0x06, 0xb7, 0x97, 0x24, + 0x72, 0xd7, 0xfb, 0x5f, 0xc9, 0x0b, 0xe2, 0xab, 0x95, 0x8a, 0xaf, 0xbe, 0x2c, 0xbe, 0x73, 0x00, + 0x95, 0xbc, 0xca, 0x52, 0x78, 0x47, 0xf0, 0xc5, 0x05, 0x02, 0xa9, 0x97, 0x88, 0xee, 0xb5, 0x0b, + 0xd9, 0xbb, 0xeb, 0xf5, 0x9f, 0x0f, 0x17, 0xca, 0x56, 0x15, 0xdb, 0xfe, 0x2f, 0x10, 0xa2, 0x4c, + 0xa5, 0x7b, 0xb1, 0xcb, 0xa3, 0x5f, 0x01, 0xbc, 0x91, 0xef, 0x14, 0xa8, 0x93, 0x0f, 0xa7, 0xd4, + 0x02, 0x95, 0x77, 0xd7, 0xfb, 0x28, 0xe6, 0x44, 0x3d, 0x98, 0x1b, 0x52, 0x1d, 0xdf, 0xfe, 0xf9, + 0xf7, 0x79, 0x6d, 0x0f, 0xb5, 0x23, 0x73, 0xff, 0x26, 0x3e, 0x7a, 0xdf, 0xf3, 0xd9, 0x97, 0xc4, + 0xe4, 0x81, 0xde, 0x7e, 0xa4, 0xe7, 0xd8, 0xcd, 0x4f, 0x00, 0x6e, 0xe6, 0x19, 0x05, 0x7a, 0x27, + 0x1f, 0x4b, 0x89, 0xa9, 0x28, 0x55, 0x2f, 0xb0, 0xfa, 0xde, 0xdc, 0x10, 0xd7, 0x68, 0x01, 0x6f, + 0x74, 0x90, 0x41, 0x9b, 0x03, 0x56, 0x6f, 0x3f, 0x42, 0x7f, 0x01, 0xb8, 0x55, 0xe0, 0x37, 0xa8, + 0x80, 0xbe, 0x72, 0x7b, 0xaa, 0x8e, 0x7a, 0x34, 0x37, 0x94, 0x98, 0xd4, 0xbd, 0x1c, 0x11, 0x8b, + 0x59, 0x0c, 0x75, 0x0d, 0xee, 0x6f, 0xe7, 0x79, 0x01, 0xfa, 0x07, 0xc0, 0xad, 0x02, 0x0b, 0x2a, + 0x1a, 0xb0, 0xdc, 0xb1, 0xaa, 0x0f, 0xf8, 0x60, 0x6e, 0x5c, 0x2f, 0x9a, 0xec, 0x64, 0xff, 0x8e, + 0x98, 0x2c, 0x27, 0xae, 0x55, 0xda, 0x5c, 0xfe, 0xb4, 0xdf, 0x03, 0xb8, 0x55, 0xe0, 0x59, 0x45, + 0xd3, 0x96, 0x5b, 0x9c, 0x72, 0x63, 0xc5, 0x8a, 0x3f, 0x8c, 0x1e, 0x2a, 0x4b, 0x77, 0xae, 0xbd, + 0xce, 0x9d, 0x3b, 0x07, 0x10, 0xad, 0x7a, 0x12, 0xd2, 0x2b, 0xa9, 0x35, 0xf5, 0x45, 0xe5, 0xed, + 0xea, 0x1f, 0x48, 0x69, 0x2b, 0x02, 0xed, 0x26, 0x42, 0xe9, 0x73, 0x2d, 0xc9, 0x51, 0x7e, 0x04, + 0x4f, 0x8c, 0xed, 0x42, 0xe7, 0xfe, 0xc3, 0xf8, 0x0e, 0x4c, 0x38, 0xf7, 0x82, 0xdb, 0xba, 0x7e, + 0x76, 0x76, 0xb6, 0xec, 0xeb, 0x38, 0xe4, 0x13, 0xdd, 0xb4, 0x59, 0x38, 0xba, 0xe5, 0xd9, 0x98, + 0x8f, 0x99, 0xef, 0xec, 0x5d, 0x94, 0x9e, 0xf6, 0x5a, 0x23, 0x55, 0xf3, 0x09, 0x1e, 0x75, 0x1f, + 0x03, 0xd8, 0x30, 0x99, 0x93, 0x3b, 0x7f, 0x57, 0x3a, 0xab, 0x34, 0xd5, 0xe3, 0x68, 0x6d, 0xc7, + 0xe0, 0x8b, 0x3b, 0x32, 0xd7, 0x62, 0x36, 0x76, 0x2d, 0x8d, 0xf9, 0x96, 0x6e, 0x11, 0x57, 0x2c, + 0x55, 0x4f, 0x5b, 0x2e, 0xbe, 0x68, 0x0f, 0xd2, 0x7f, 0x8f, 0x6b, 0xca, 0x61, 0x5c, 0xa0, 0x17, + 0x4d, 0xa9, 0x1d, 0xa5, 0x2d, 0xef, 0x77, 0x9e, 0x24, 0xc1, 0x81, 0x08, 0x0e, 0xd2, 0xe0, 0xe0, + 0x7e, 0xe7, 0xf4, 0xaa, 0x68, 0xd2, 0xf9, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xbe, 0xa4, 0xd7, + 0xe9, 0x0b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// UptimeCheckServiceClient is the client API for UptimeCheckService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UptimeCheckServiceClient interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UptimeCheckServiceServer is the server API for UptimeCheckService service. +type UptimeCheckServiceServer interface { + // Lists the existing valid Uptime check configurations for the project + // (leaving out any invalid configurations). + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single Uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new Uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an Uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `updateMask`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an Uptime check configuration. Note that this method will fail + // if the Uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error) + // Returns the list of IP addresses that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations. +type UnimplementedUptimeCheckServiceServer struct { +} + +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(ctx context.Context, req *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(ctx context.Context, req *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(ctx context.Context, req *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(ctx context.Context, req *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(ctx context.Context, req *DeleteUptimeCheckConfigRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented") +} +func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(ctx context.Context, req *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented") +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/test/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 0000000000..01db4cfd04 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +package status + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_24d244abaf643bfe, []int{0} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } + +var fileDescriptor_24d244abaf643bfe = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x44, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x7e, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x0e, 0x0a, + 0x70, 0x4e, 0x62, 0x03, 0x2b, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x28, 0x45, 0xb1, + 0x13, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go b/test/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go new file mode 100644 index 0000000000..0ee0bd15a4 --- /dev/null +++ b/test/vendor/google.golang.org/genproto/googleapis/type/calendarperiod/calendar_period.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/type/calendar_period.proto + +package calendarperiod + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A `CalendarPeriod` represents the abstract concept of a time period that has +// a canonical start. Grammatically, "the start of the current +// `CalendarPeriod`." All calendar times begin at midnight UTC. +type CalendarPeriod int32 + +const ( + // Undefined period, raises an error. + CalendarPeriod_CALENDAR_PERIOD_UNSPECIFIED CalendarPeriod = 0 + // A day. + CalendarPeriod_DAY CalendarPeriod = 1 + // A week. Weeks begin on Monday, following + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_WEEK CalendarPeriod = 2 + // A fortnight. The first calendar fortnight of the year begins at the start + // of week 1 according to + // [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + CalendarPeriod_FORTNIGHT CalendarPeriod = 3 + // A month. + CalendarPeriod_MONTH CalendarPeriod = 4 + // A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each + // year. + CalendarPeriod_QUARTER CalendarPeriod = 5 + // A half-year. Half-years start on dates 1-Jan and 1-Jul. + CalendarPeriod_HALF CalendarPeriod = 6 + // A year. + CalendarPeriod_YEAR CalendarPeriod = 7 +) + +var CalendarPeriod_name = map[int32]string{ + 0: "CALENDAR_PERIOD_UNSPECIFIED", + 1: "DAY", + 2: "WEEK", + 3: "FORTNIGHT", + 4: "MONTH", + 5: "QUARTER", + 6: "HALF", + 7: "YEAR", +} + +var CalendarPeriod_value = map[string]int32{ + "CALENDAR_PERIOD_UNSPECIFIED": 0, + "DAY": 1, + "WEEK": 2, + "FORTNIGHT": 3, + "MONTH": 4, + "QUARTER": 5, + "HALF": 6, + "YEAR": 7, +} + +func (x CalendarPeriod) String() string { + return proto.EnumName(CalendarPeriod_name, int32(x)) +} + +func (CalendarPeriod) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_52aec33199a3da0e, []int{0} +} + +func init() { + proto.RegisterEnum("google.type.CalendarPeriod", CalendarPeriod_name, CalendarPeriod_value) +} + +func init() { proto.RegisterFile("google/type/calendar_period.proto", fileDescriptor_52aec33199a3da0e) } + +var fileDescriptor_52aec33199a3da0e = []byte{ + // 248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0xb1, 0x4f, 0x83, 0x40, + 0x14, 0x87, 0x6d, 0x69, 0x8b, 0x7d, 0x8d, 0x7a, 0x39, 0x47, 0x07, 0xe3, 0xea, 0x00, 0x83, 0xa3, + 0xd3, 0x15, 0x8e, 0x42, 0xac, 0x70, 0x9e, 0xd7, 0x98, 0xba, 0x10, 0x6c, 0x2f, 0x97, 0x26, 0xc8, + 0x23, 0xd8, 0x41, 0x27, 0xff, 0x17, 0xff, 0x52, 0x73, 0xc0, 0x50, 0xb6, 0xbb, 0xbc, 0xef, 0x97, + 0x7c, 0x1f, 0xdc, 0x19, 0x44, 0x53, 0x6a, 0xff, 0xf8, 0x53, 0x6b, 0x7f, 0x57, 0x94, 0xba, 0xda, + 0x17, 0x4d, 0x5e, 0xeb, 0xe6, 0x80, 0x7b, 0xaf, 0x6e, 0xf0, 0x88, 0x74, 0xd1, 0x21, 0x9e, 0x45, + 0xee, 0x7f, 0xe1, 0x32, 0xe8, 0x29, 0xd1, 0x42, 0xf4, 0x16, 0x6e, 0x02, 0xb6, 0xe6, 0x69, 0xc8, + 0x64, 0x2e, 0xb8, 0x4c, 0xb2, 0x30, 0xdf, 0xa4, 0xaf, 0x82, 0x07, 0x49, 0x94, 0xf0, 0x90, 0x9c, + 0x51, 0x17, 0x9c, 0x90, 0x6d, 0xc9, 0x88, 0x9e, 0xc3, 0xe4, 0x8d, 0xf3, 0x27, 0x32, 0xa6, 0x17, + 0x30, 0x8f, 0x32, 0xa9, 0xd2, 0x64, 0x15, 0x2b, 0xe2, 0xd0, 0x39, 0x4c, 0x9f, 0xb3, 0x54, 0xc5, + 0x64, 0x42, 0x17, 0xe0, 0xbe, 0x6c, 0x98, 0x54, 0x5c, 0x92, 0xa9, 0x1d, 0xc4, 0x6c, 0x1d, 0x91, + 0x99, 0x7d, 0x6d, 0x39, 0x93, 0xc4, 0x5d, 0x7e, 0xc3, 0xd5, 0x0e, 0x3f, 0xbd, 0x13, 0xa7, 0xe5, + 0xf5, 0xd0, 0x48, 0x58, 0x6b, 0x31, 0x7a, 0x8f, 0x7b, 0xc6, 0x60, 0x59, 0x54, 0xc6, 0xc3, 0xc6, + 0xf8, 0x46, 0x57, 0x6d, 0x93, 0xdf, 0x9d, 0x8a, 0xfa, 0xf0, 0x35, 0x2c, 0xef, 0xc2, 0x1f, 0x87, + 0xdf, 0xbf, 0xb1, 0xb3, 0x52, 0xe2, 0x63, 0xd6, 0x4e, 0x1f, 0xfe, 0x03, 0x00, 0x00, 0xff, 0xff, + 0x91, 0x18, 0xaa, 0x3f, 0x33, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/test/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go new file mode 100644 index 0000000000..e6a070b8fb --- /dev/null +++ b/test/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -0,0 +1,282 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package field_mask + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} + +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldMask.Unmarshal(m, b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return xxx_messageInfo_FieldMask.Size(m) +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c, + 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, + 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, + 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, + 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/grpc/AUTHORS b/test/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 0000000000..e491a9e7f7 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/test/vendor/google.golang.org/grpc/LICENSE b/test/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/google.golang.org/grpc/attributes/attributes.go b/test/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 0000000000..68ffc62013 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// All APIs in this package are EXPERIMENTAL. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + return a.m[key] +} diff --git a/test/vendor/google.golang.org/grpc/backoff.go b/test/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 0000000000..ff7c3ee6f4 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This API is EXPERIMENTAL. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/test/vendor/google.golang.org/grpc/backoff/backoff.go b/test/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 0000000000..0787d0b50c --- /dev/null +++ b/test/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/test/vendor/google.golang.org/grpc/balancer.go b/test/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 0000000000..a8eb0f4760 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,391 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} diff --git a/test/vendor/google.golang.org/grpc/balancer/balancer.go b/test/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 0000000000..9258858ed7 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker V2Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to notify gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + // + // Deprecated: use UpdateState instead + UpdateBalancerState(s connectivity.State, p Picker) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +// +// Deprecated: use V2Picker instead +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will + // block until UpdateBalancerState() is called and will call pick on the + // new picker. The done function returned from Pick(), if not nil, will be + // called with nil error, no bytes sent and no bytes received. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure or implements IsTransientFailure() + // bool, returning true: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with the error's status code, or Unknown if it is + // not a status error. + // + // The returned done() function will be called once the rpc has finished, + // with the final status of that RPC. If the SubConn returned is not a + // valid SubConn type, done may not be called. done may be nil if balancer + // doesn't care about the RPC status. + Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) +} + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +type transientFailureError struct { + error +} + +func (e *transientFailureError) IsTransientFailure() bool { return true } + +// TransientFailureError wraps err in an error implementing +// IsTransientFailure() bool, returning true. +func TransientFailureError(err error) error { + return &transientFailureError{error: err} +} + +// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type V2Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error implements IsTransientFailure() bool, returning true, + // wait for ready RPCs will wait, but non-wait for ready RPCs will be + // terminated with this error's Error() string and status code + // Unavailable. + // + // - Any other errors terminate all RPCs with the code and message + // provided. If the error is not a status error, it will be converted by + // gRPC to a status error with code Unknown. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateSubConnState will be called instead. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateClientConnState will be called instead. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// V2Balancer is defined for documentation purposes. If a Balancer also +// implements V2Balancer, its UpdateClientConnState method will be called +// instead of HandleResolvedAddrs and its UpdateSubConnState will be called +// instead of HandleSubConnStateChange. +type V2Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/test/vendor/google.golang.org/grpc/balancer/base/balancer.go b/test/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 0000000000..d7d72918ad --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + v2PickerBuilder: bb.v2PickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + if bb.pickerBuilder != nil { + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + } else { + bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + v2Picker balancer.V2Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not implemented") +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.v2Picker, + }) + } +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.Err (log if not nil) once implemented. + // TODO: handle s.ResolverState.ServiceConfig? + if grpclog.V(2) { + grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + } + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + if b.pickerBuilder != nil { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + } else { + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) + } + return + } + if b.pickerBuilder != nil { + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) + } else { + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) + } +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Set or clear the last connection error accordingly. + b.connErr = state.ConnectionError + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) + } +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). +func NewErrPickerV2(err error) balancer.V2Picker { + return &errPickerV2{err: err} +} + +type errPickerV2 struct { + err error // Pick() always returns this err. +} + +func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/test/vendor/google.golang.org/grpc/balancer/base/base.go b/test/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 0000000000..4192918b9e --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// V2PickerBuilder creates balancer.V2Picker. +type V2PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.V2Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return NewBalancerBuilderWithConfig(name, pb, Config{}) +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. +func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} + +// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. +func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + v2PickerBuilder: pb, + config: config, + } +} diff --git a/test/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go new file mode 100644 index 0000000000..930f90ca57 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -0,0 +1,772 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{0} +} + +func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) +} +func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) +} +func (m *LoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceRequest.Merge(m, src) +} +func (m *LoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_LoadBalanceRequest.Size(m) +} +func (m *LoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` +} + +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +type InitialLoadBalanceRequest struct { + // The name of the load balanced service (e.g., service.googleapis.com). Its + // length should be less than 256 bytes. + // The name might include a port number. How to handle the port number is up + // to the balancer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{1} +} + +func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) +} +func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) +} +func (m *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceRequest.Merge(m, src) +} +func (m *InitialLoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) +} +func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains the number of calls finished for a particular load balance token. +type ClientStatsPerToken struct { + // See Server.load_balance_token. + LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // The total number of RPCs that finished associated with the token. + NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStatsPerToken) Reset() { *m = ClientStatsPerToken{} } +func (m *ClientStatsPerToken) String() string { return proto.CompactTextString(m) } +func (*ClientStatsPerToken) ProtoMessage() {} +func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{2} +} + +func (m *ClientStatsPerToken) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStatsPerToken.Unmarshal(m, b) +} +func (m *ClientStatsPerToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStatsPerToken.Marshal(b, m, deterministic) +} +func (m *ClientStatsPerToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStatsPerToken.Merge(m, src) +} +func (m *ClientStatsPerToken) XXX_Size() int { + return xxx_messageInfo_ClientStatsPerToken.Size(m) +} +func (m *ClientStatsPerToken) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStatsPerToken.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStatsPerToken proto.InternalMessageInfo + +func (m *ClientStatsPerToken) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *ClientStatsPerToken) GetNumCalls() int64 { + if m != nil { + return m.NumCalls + } + return 0 +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` + // The list of dropped calls. + CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{3} +} + +func (m *ClientStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStats.Unmarshal(m, b) +} +func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) +} +func (m *ClientStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStats.Merge(m, src) +} +func (m *ClientStats) XXX_Size() int { + return xxx_messageInfo_ClientStats.Size(m) +} +func (m *ClientStats) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStats proto.InternalMessageInfo + +func (m *ClientStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +func (m *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { + if m != nil { + return m.CallsFinishedWithDrop + } + return nil +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + // *LoadBalanceResponse_FallbackResponse + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{4} +} + +func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) +} +func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) +} +func (m *LoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceResponse.Merge(m, src) +} +func (m *LoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_LoadBalanceResponse.Size(m) +} +func (m *LoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` +} + +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` +} + +type LoadBalanceResponse_FallbackResponse struct { + FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +func (m *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { + return x.FallbackResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + (*LoadBalanceResponse_FallbackResponse)(nil), + } +} + +type FallbackResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FallbackResponse) Reset() { *m = FallbackResponse{} } +func (m *FallbackResponse) String() string { return proto.CompactTextString(m) } +func (*FallbackResponse) ProtoMessage() {} +func (*FallbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{5} +} + +func (m *FallbackResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FallbackResponse.Unmarshal(m, b) +} +func (m *FallbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FallbackResponse.Marshal(b, m, deterministic) +} +func (m *FallbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FallbackResponse.Merge(m, src) +} +func (m *FallbackResponse) XXX_Size() int { + return xxx_messageInfo_FallbackResponse.Size(m) +} +func (m *FallbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FallbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FallbackResponse proto.InternalMessageInfo + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate,proto3" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *duration.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{6} +} + +func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) +} +func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) +} +func (m *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceResponse.Merge(m, src) +} +func (m *InitialLoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) +} +func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *duration.Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{7} +} + +func (m *ServerList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerList.Unmarshal(m, b) +} +func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) +} +func (m *ServerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerList.Merge(m, src) +} +func (m *ServerList) XXX_Size() int { + return xxx_messageInfo_ServerList.Size(m) +} +func (m *ServerList) XXX_DiscardUnknown() { + xxx_messageInfo_ServerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerList proto.InternalMessageInfo + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When the drop field is not true, use the other +// fields. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // An opaque but printable token for load reporting. The client must include + // the token of the picked server into the initial metadata when it starts a + // call to that server. The token is used by the server to verify the request + // and to allow the server to report load to the gRPC LB system. The token is + // also used in client stats for reporting dropped calls. + // + // Its length can be variable but must be less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client. + // If the request is dropped, there will be a corresponding entry in + // ClientStats.calls_finished_with_drop. + Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{8} +} + +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (m *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(m, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDrop() bool { + if m != nil { + return m.Drop + } + return false +} + +func init() { + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStatsPerToken)(nil), "grpc.lb.v1.ClientStatsPerToken") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*FallbackResponse)(nil), "grpc.lb.v1.FallbackResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +func init() { proto.RegisterFile("grpc/lb/v1/load_balancer.proto", fileDescriptor_7cd3f6d792743fdf) } + +var fileDescriptor_7cd3f6d792743fdf = []byte{ + // 785 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x6e, 0xdb, 0x36, + 0x14, 0x8e, 0x6a, 0x27, 0x75, 0x8e, 0xb3, 0xc5, 0x61, 0xb7, 0x4e, 0x71, 0xd3, 0x24, 0x13, 0xb0, + 0x22, 0x18, 0x3a, 0x79, 0xc9, 0x76, 0xb1, 0x01, 0xbb, 0xd8, 0xdc, 0x20, 0x48, 0xd3, 0x5e, 0x04, + 0x74, 0x80, 0x0e, 0x05, 0x06, 0x8e, 0x92, 0x68, 0x87, 0x08, 0x4d, 0x6a, 0x14, 0xed, 0x62, 0xd7, + 0x7b, 0x81, 0x3d, 0xc9, 0xb0, 0x57, 0xd8, 0x9b, 0x0d, 0x22, 0x29, 0x4b, 0xb1, 0x63, 0xf4, 0x4a, + 0xe4, 0x39, 0x1f, 0xbf, 0xf3, 0x7f, 0x04, 0x87, 0x13, 0x9d, 0xa7, 0x03, 0x91, 0x0c, 0xe6, 0xa7, + 0x03, 0xa1, 0x68, 0x46, 0x12, 0x2a, 0xa8, 0x4c, 0x99, 0x8e, 0x73, 0xad, 0x8c, 0x42, 0x50, 0xea, + 0x63, 0x91, 0xc4, 0xf3, 0xd3, 0xfe, 0xe1, 0x44, 0xa9, 0x89, 0x60, 0x03, 0xab, 0x49, 0x66, 0xe3, + 0x41, 0x36, 0xd3, 0xd4, 0x70, 0x25, 0x1d, 0xb6, 0x7f, 0xb4, 0xac, 0x37, 0x7c, 0xca, 0x0a, 0x43, + 0xa7, 0xb9, 0x03, 0x44, 0xff, 0x05, 0x80, 0xde, 0x2a, 0x9a, 0x0d, 0x9d, 0x0d, 0xcc, 0xfe, 0x98, + 0xb1, 0xc2, 0xa0, 0x6b, 0xd8, 0xe5, 0x92, 0x1b, 0x4e, 0x05, 0xd1, 0x4e, 0x14, 0x06, 0xc7, 0xc1, + 0x49, 0xf7, 0xec, 0xab, 0xb8, 0xb6, 0x1e, 0xbf, 0x76, 0x90, 0xd5, 0xf7, 0x97, 0x1b, 0xf8, 0x53, + 0xff, 0xbe, 0x62, 0xfc, 0x09, 0x76, 0x52, 0xc1, 0x99, 0x34, 0xa4, 0x30, 0xd4, 0x14, 0xe1, 0x23, + 0x4b, 0xf7, 0x45, 0x93, 0xee, 0x95, 0xd5, 0x8f, 0x4a, 0xf5, 0xe5, 0x06, 0xee, 0xa6, 0xf5, 0x75, + 0xf8, 0x0c, 0xf6, 0x9b, 0xa9, 0xa8, 0x9c, 0x22, 0xe6, 0xcf, 0x9c, 0x45, 0x03, 0xd8, 0x5f, 0xeb, + 0x09, 0x42, 0xd0, 0x96, 0x74, 0xca, 0xac, 0xfb, 0xdb, 0xd8, 0x9e, 0xa3, 0xdf, 0xe1, 0x49, 0xc3, + 0xd6, 0x35, 0xd3, 0x37, 0xea, 0x8e, 0x49, 0xf4, 0x12, 0xd0, 0x3d, 0x23, 0xa6, 0x94, 0xfa, 0x87, + 0x3d, 0x51, 0x53, 0x3b, 0xf4, 0x33, 0xd8, 0x96, 0xb3, 0x29, 0x49, 0xa9, 0x10, 0x2e, 0x9a, 0x16, + 0xee, 0xc8, 0xd9, 0xf4, 0x55, 0x79, 0x8f, 0xfe, 0x6d, 0x41, 0xb7, 0x61, 0x02, 0xfd, 0x00, 0xdb, + 0x8b, 0xcc, 0xfb, 0x4c, 0xf6, 0x63, 0x57, 0x9b, 0xb8, 0xaa, 0x4d, 0x7c, 0x53, 0x21, 0x70, 0x0d, + 0x46, 0x5f, 0xc3, 0xde, 0xc2, 0x4c, 0x99, 0x3a, 0x6d, 0x58, 0xe6, 0xcd, 0xed, 0x56, 0xe6, 0x46, + 0x4e, 0x5c, 0x06, 0x50, 0x63, 0xc7, 0x5c, 0xf2, 0xe2, 0x96, 0x65, 0x61, 0xcb, 0x82, 0x7b, 0x15, + 0xf8, 0xc2, 0xcb, 0xd1, 0x6f, 0xf0, 0xcd, 0x2a, 0x9a, 0x7c, 0xe0, 0xe6, 0x96, 0xf8, 0x4a, 0x8d, + 0x29, 0x17, 0x2c, 0x23, 0x46, 0x91, 0x82, 0xc9, 0x2c, 0xdc, 0xb2, 0x44, 0x2f, 0x96, 0x89, 0xde, + 0x71, 0x73, 0xeb, 0x62, 0xbd, 0xb0, 0xf8, 0x1b, 0x35, 0x62, 0x32, 0x43, 0x97, 0xf0, 0xe5, 0x03, + 0xf4, 0x77, 0x52, 0x7d, 0x90, 0x44, 0xb3, 0x94, 0xf1, 0x39, 0xcb, 0xc2, 0xc7, 0x96, 0xf2, 0xf9, + 0x32, 0xe5, 0x9b, 0x12, 0x85, 0x3d, 0x08, 0xfd, 0x0a, 0xe1, 0x43, 0x4e, 0x66, 0x5a, 0xe5, 0x61, + 0xe7, 0xb8, 0x75, 0xd2, 0x3d, 0x3b, 0x5a, 0xd3, 0x46, 0x55, 0x69, 0xf1, 0xe7, 0xe9, 0xb2, 0xc7, + 0xe7, 0x5a, 0xe5, 0x57, 0xed, 0x4e, 0xbb, 0xb7, 0x79, 0xd5, 0xee, 0x6c, 0xf6, 0xb6, 0xa2, 0xbf, + 0x1f, 0xc1, 0x93, 0x7b, 0xfd, 0x53, 0xe4, 0x4a, 0x16, 0x0c, 0x8d, 0xa0, 0x57, 0x8f, 0x82, 0x93, + 0xf9, 0x0a, 0xbe, 0xf8, 0xd8, 0x2c, 0x38, 0xf4, 0xe5, 0x06, 0xde, 0x5d, 0x0c, 0x83, 0x27, 0xfd, + 0x11, 0xba, 0x05, 0xd3, 0x73, 0xa6, 0x89, 0xe0, 0x85, 0xf1, 0xc3, 0xf0, 0xb4, 0xc9, 0x37, 0xb2, + 0xea, 0xb7, 0xdc, 0x0e, 0x13, 0x14, 0x8b, 0x1b, 0x7a, 0x03, 0x7b, 0x63, 0x2a, 0x44, 0x42, 0xd3, + 0xbb, 0xda, 0xa1, 0x96, 0x25, 0x38, 0x68, 0x12, 0x5c, 0x78, 0x50, 0xc3, 0x8d, 0xde, 0x78, 0x49, + 0x36, 0x3c, 0x80, 0xfe, 0xd2, 0x5c, 0x39, 0x85, 0x1b, 0x2c, 0x04, 0xbd, 0x65, 0x96, 0xe8, 0x9f, + 0x00, 0xfa, 0xeb, 0x63, 0x45, 0xdf, 0xc3, 0xd3, 0x7b, 0x3b, 0x8b, 0x64, 0x4c, 0xb0, 0x09, 0x35, + 0xd5, 0x00, 0x7e, 0xd6, 0x98, 0x23, 0x7d, 0xee, 0x75, 0xe8, 0x3d, 0x1c, 0x34, 0x97, 0x03, 0xd1, + 0x2c, 0x57, 0xda, 0x10, 0x2e, 0x0d, 0xd3, 0x73, 0x2a, 0x7c, 0x7e, 0xf6, 0x57, 0x26, 0xe6, 0xdc, + 0x6f, 0x3b, 0xbc, 0xdf, 0x58, 0x16, 0xd8, 0x3e, 0x7e, 0xed, 0xdf, 0x46, 0x3f, 0x03, 0xd4, 0xb9, + 0x44, 0x2f, 0xe1, 0xb1, 0xcb, 0x65, 0x11, 0x06, 0xb6, 0x75, 0xd0, 0x6a, 0xd2, 0x71, 0x05, 0xb9, + 0x6a, 0x77, 0x5a, 0xbd, 0x76, 0xf4, 0x57, 0x00, 0x5b, 0x4e, 0x83, 0x9e, 0x03, 0xf0, 0x9c, 0xd0, + 0x2c, 0xd3, 0xac, 0x28, 0x6c, 0x48, 0x3b, 0x78, 0x9b, 0xe7, 0xbf, 0x38, 0x41, 0xb9, 0x6c, 0x4a, + 0xdb, 0xd6, 0xdf, 0x4d, 0x6c, 0xcf, 0x6b, 0xb6, 0x4a, 0x6b, 0xcd, 0x56, 0x41, 0xd0, 0xb6, 0x7d, + 0xdd, 0x3e, 0x0e, 0x4e, 0x3a, 0xd8, 0x9e, 0x5d, 0x7f, 0x9e, 0x25, 0xb0, 0xd3, 0x48, 0xb8, 0x46, + 0x18, 0xba, 0xfe, 0x5c, 0x8a, 0xd1, 0x61, 0x33, 0x8e, 0xd5, 0x3d, 0xd8, 0x3f, 0x5a, 0xab, 0x77, + 0x95, 0x3b, 0x09, 0xbe, 0x0d, 0x86, 0xef, 0xe0, 0x13, 0xae, 0x1a, 0xc0, 0xe1, 0x5e, 0xd3, 0xe4, + 0x75, 0x99, 0xf6, 0xeb, 0xe0, 0xfd, 0xa9, 0x2f, 0xc3, 0x44, 0x09, 0x2a, 0x27, 0xb1, 0xd2, 0x93, + 0x81, 0xfd, 0x65, 0x55, 0x35, 0xb7, 0x37, 0x91, 0xd8, 0x0f, 0x11, 0x09, 0x99, 0x9f, 0x26, 0x5b, + 0xb6, 0x64, 0xdf, 0xfd, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x09, 0x7b, 0x39, 0x1e, 0xdc, 0x06, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancerServer is the server API for LoadBalancer service. +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +// UnimplementedLoadBalancerServer can be embedded to have forward compatible implementations. +type UnimplementedLoadBalancerServer struct { +} + +func (*UnimplementedLoadBalancerServer) BalanceLoad(srv LoadBalancer_BalanceLoadServer) error { + return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} diff --git a/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go new file mode 100644 index 0000000000..219ca7235b --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -0,0 +1,488 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package grpclb defines a grpclb balancer. +// +// To install grpclb balancer, import this package as: +// import _ "google.golang.org/grpc/balancer/grpclb" +package grpclb + +import ( + "context" + "errors" + "sync" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +const ( + lbTokenKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") + +func convertDuration(d *durationpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { + desc := &grpc.StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + grpc.ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) + dns.EnableSRVLookups = true +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a fixed scheme. This + // scheme will be used to dial to remote LB, so we can send filtered + // address updates to remote LB ClientConn using this manual resolver. + r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + target: opt.Target.Endpoint, + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: r, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + clientStats: newRPCStats(), + backoff: backoff.DefaultExponential, // TODO: make backoff configurable. + } + + var err error + if opt.CredsBundle != nil { + lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + } + lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + } + } + + return lb +} + +var _ balancer.V2Balancer = (*lbBalancer)(nil) // Assert that we implement V2Balancer + +type lbBalancer struct { + cc *lbCacheClientConn + target string + opt balancer.BuildOptions + + usePickFirst bool + + // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb + // servers. If it's nil, use the TransportCredentials from BuildOptions + // instead. + grpclbClientConnCreds credentials.Bundle + // grpclbBackendCreds is the creds bundle to be used for addresses that are + // returned by grpclb server. If it's nil, don't set anything when creating + // SubConns. + grpclbBackendCreds credentials.Bundle + + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *lbManualResolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *remoteBalancerCCWrapper + // backoff for calling remote balancer. + backoff backoff.Strategy + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // Backend addresses. It's kept so the addresses are available when + // switching between round_robin and pickfirst. + backendAddrs []resolver.Address + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be gerenated. + backendAddrsWithoutMetadata []resolver.Address + // Roundrobin functionalities. + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.V2Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + remoteBalancerConnected bool + serverListReceived bool + inFallback bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker(resetDrop bool) { + if lb.state == connectivity.TransientFailure { + lb.picker = &errPicker{err: balancer.ErrTransientFailure} + return + } + + if lb.state == connectivity.Connecting { + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + + var readySCs []balancer.SubConn + if lb.usePickFirst { + for _, sc := range lb.subConns { + readySCs = append(readySCs, sc) + break + } + } else { + for _, a := range lb.backendAddrsWithoutMetadata { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + } + + if len(readySCs) <= 0 { + // If there's no ready SubConns, always re-pick. This is to avoid drops + // unless at least one SubConn is ready. Otherwise we may drop more + // often than want because of drops + re-picks(which become re-drops). + // + // This doesn't seem to be necessary after the connecting check above. + // Kept for safety. + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + if lb.inFallback { + lb.picker = newRRPicker(readySCs) + return + } + if resetDrop { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker, ok := lb.picker.(*lbPicker) + if !ok { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker.updateReadySCs(readySCs) +} + +// aggregateSubConnStats calculate the aggregated state of SubConns in +// lb.SubConns. These SubConns are subconns in use (when switching between +// fallback and grpclb). lb.scState contains states for all SubConns, including +// those in cache (SubConns are cached for 10 seconds after remove). +// +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { + var numConnecting uint64 + + for _, sc := range lb.subConns { + if state, ok := lb.scStates[sc]; ok { + switch state { + case connectivity.Ready: + return connectivity.Ready + case connectivity.Connecting: + numConnecting++ + } + } + } + if numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} + +func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not used") +} + +func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + s := scs.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + } + // Force regenerate picker if + // - this sc became ready from not-ready + // - this sc became not-ready from ready + lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) + + // Enter fallback when the aggregated state is not Ready and the connection + // to remote balancer is lost. + if lb.state != connectivity.Ready { + if !lb.inFallback && !lb.remoteBalancerConnected { + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + } +} + +// updateStateAndPicker re-calculate the aggregated state, and regenerate picker +// if overall state is changed. +// +// If forceRegeneratePicker is true, picker will be regenerated. +func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { + oldAggrState := lb.state + lb.state = lb.aggregateSubConnStates() + // Regenerate picker when one of the following happens: + // - caller wants to regenerate + // - the aggregated state changed + if forceRegeneratePicker || (lb.state != oldAggrState) { + lb.regeneratePicker(resetDrop) + } + + lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.inFallback || lb.serverListReceived { + lb.mu.Unlock() + return + } + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + lb.mu.Unlock() +} + +// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB +// clientConn. The remoteLB clientConn will handle creating/removing remoteLB +// connections. +func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not used") +} + +func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { + lb.mu.Lock() + defer lb.mu.Unlock() + + newUsePickFirst := childIsPickFirst(gc) + if lb.usePickFirst == newUsePickFirst { + return + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + } + lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) +} + +func (lb *lbBalancer) ResolverError(error) { + // Ignore resolver errors. GRPCLB is not selected unless the resolver + // works at least once. +} + +func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + } + gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) + lb.handleServiceConfig(gc) + + addrs := ccs.ResolverState.Addresses + if len(addrs) == 0 { + // There should be at least one address, either grpclb server or + // fallback. Empty address is not valid. + return balancer.ErrBadResolverState + } + + var remoteBalancerAddrs, backendAddrs []resolver.Address + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + a.Type = resolver.Backend + remoteBalancerAddrs = append(remoteBalancerAddrs, a) + } else { + backendAddrs = append(backendAddrs, a) + } + } + + if len(remoteBalancerAddrs) == 0 { + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + lb.ccRemoteLB = nil + } + } else if lb.ccRemoteLB == nil { + // First time receiving resolved addresses, create a cc to remote + // balancers. + lb.newRemoteBalancerCCWrapper() + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + if lb.ccRemoteLB != nil { + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) + } + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + if len(remoteBalancerAddrs) == 0 || lb.inFallback { + // If there's no remote balancer address in ClientConn update, grpclb + // enters fallback mode immediately. + // + // If a new update is received while grpclb is in fallback, update the + // list of backends being used to the new fallback backends. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() + return nil +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + } + lb.cc.close() +} diff --git a/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go new file mode 100644 index 0000000000..aac3719631 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "encoding/json" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/serviceconfig" +) + +const ( + roundRobinName = roundrobin.Name + pickFirstName = grpc.PickFirstBalancerName +) + +type grpclbServiceConfig struct { + serviceconfig.LoadBalancingConfig + ChildPolicy *[]map[string]json.RawMessage +} + +func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + ret := &grpclbServiceConfig{} + if err := json.Unmarshal(lbConfig, ret); err != nil { + return nil, err + } + return ret, nil +} + +func childIsPickFirst(sc *grpclbServiceConfig) bool { + if sc == nil { + return false + } + childConfigs := sc.ChildPolicy + if childConfigs == nil { + return false + } + for _, childC := range *childConfigs { + // If round_robin exists before pick_first, return false + if _, ok := childC[roundRobinName]; ok { + return false + } + // If pick_first is before round_robin, return true + if _, ok := childC[pickFirstName]; ok { + return true + } + } + return false +} diff --git a/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go new file mode 100644 index 0000000000..39bc5cc71e --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -0,0 +1,202 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/status" +) + +// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map +// instead of a slice. +type rpcStats struct { + // Only access the following fields atomically. + numCallsStarted int64 + numCallsFinished int64 + numCallsFinishedWithClientFailedToSend int64 + numCallsFinishedKnownReceived int64 + + mu sync.Mutex + // map load_balance_token -> num_calls_dropped + numCallsDropped map[string]int64 +} + +func newRPCStats() *rpcStats { + return &rpcStats{ + numCallsDropped: make(map[string]int64), + } +} + +func isZeroStats(stats *lbpb.ClientStats) bool { + return len(stats.CallsFinishedWithDrop) == 0 && + stats.NumCallsStarted == 0 && + stats.NumCallsFinished == 0 && + stats.NumCallsFinishedWithClientFailedToSend == 0 && + stats.NumCallsFinishedKnownReceived == 0 +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), + } + s.mu.Lock() + dropped := s.numCallsDropped + s.numCallsDropped = make(map[string]int64) + s.mu.Unlock() + for token, count := range dropped { + stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ + LoadBalanceToken: token, + NumCalls: count, + }) + } + return stats +} + +func (s *rpcStats) drop(token string) { + atomic.AddInt64(&s.numCallsStarted, 1) + s.mu.Lock() + s.numCallsDropped[token]++ + s.mu.Unlock() + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +type errPicker struct { + // Pick always returns this err. + err error +} + +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func newRRPicker(readySCs []balancer.SubConn) *rrPicker { + return &rrPicker{ + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + } +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return balancer.PickResult{SubConn: sc}, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { + return &lbPicker{ + serverList: serverList, + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + stats: stats, + } +} + +func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.Drop { + p.stats.drop(s.LoadBalanceToken) + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { + p.mu.Lock() + defer p.mu.Unlock() + + p.subConns = readySCs + p.subConnsNext = p.subConnsNext % len(readySCs) +} diff --git a/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go new file mode 100644 index 0000000000..e70ce75007 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -0,0 +1,407 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "fmt" + "io" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// processServerList updates balaner's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: processing server list: %+v", l) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + } + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for i, s := range l.Servers { + if s.Drop { + continue + } + + md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := resolver.Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + i, ipStr, s.Port, s.LoadBalanceToken) + } + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. If we are in fallback, + // this is also exiting fallback. + lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) +} + +// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes +// balancer state and picker. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { + opts := balancer.NewSubConnOptions{} + if !fallback { + opts.CredsBundle = lb.grpclbBackendCreds + } + + lb.backendAddrs = backendAddrs + lb.backendAddrsWithoutMetadata = nil + + fallbackModeChanged := lb.inFallback != fallback + lb.inFallback = fallback + if fallbackModeChanged && lb.inFallback { + // Clear previous received list when entering fallback, so if the server + // comes back and sends the same list again, the new addresses will be + // used. + lb.fullServerList = nil + } + + balancingPolicyChanged := lb.usePickFirst != pickFirst + oldUsePickFirst := lb.usePickFirst + lb.usePickFirst = pickFirst + + if fallbackModeChanged || balancingPolicyChanged { + // Remove all SubConns when switching balancing policy or switching + // fallback mode. + // + // For fallback mode switching with pickfirst, we want to recreate the + // SubConn because the creds could be different. + for a, sc := range lb.subConns { + if oldUsePickFirst { + // If old SubConn were created for pickfirst, bypass cache and + // remove directly. + lb.cc.cc.RemoveSubConn(sc) + } else { + lb.cc.RemoveSubConn(sc) + } + delete(lb.subConns, a) + } + } + + if lb.usePickFirst { + var sc balancer.SubConn + for _, sc = range lb.subConns { + break + } + if sc != nil { + sc.UpdateAddresses(backendAddrs) + sc.Connect() + return + } + // This bypasses the cc wrapper with SubConn cache. + sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + return + } + sc.Connect() + lb.subConns[backendAddrs[0]] = sc + lb.scStates[sc] = connectivity.Idle + return + } + + // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutMD := addr + addrWithoutMD.Metadata = nil + addrsSet[addrWithoutMD] = struct{}{} + lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutMD) + + if _, ok := lb.subConns[addrWithoutMD]; !ok { + // Use addrWithMD to create the SubConn. + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + lb.cc.RemoveSubConn(sc) + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + + // Regenerate and update picker after refreshing subconns because with + // cache, even if SubConn was newed/removed, there might be no state + // changes (the subconn will be kept in cache, not actually + // newed/removed). + lb.updateStateAndPicker(true, true) +} + +type remoteBalancerCCWrapper struct { + cc *grpc.ClientConn + lb *lbBalancer + backoff backoff.Strategy + done chan struct{} + + // waitgroup to wait for all goroutines to exit. + wg sync.WaitGroup +} + +func (lb *lbBalancer) newRemoteBalancerCCWrapper() { + var dopts []grpc.DialOption + if creds := lb.opt.DialCreds; creds != nil { + dopts = append(dopts, grpc.WithTransportCredentials(creds)) + } else if bundle := lb.grpclbClientConnCreds; bundle != nil { + dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) + } else { + dopts = append(dopts, grpc.WithInsecure()) + } + if lb.opt.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) + dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) + if channelz.IsOn() { + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) + } + + // Enable Keepalive for grpclb client. + dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 20 * time.Second, + Timeout: 10 * time.Second, + PermitWithoutStream: true, + })) + + // The dial target is not important. + // + // The grpclb server addresses will set field ServerName, and creds will + // receive ServerName as authority. + cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + if err != nil { + grpclog.Fatalf("failed to dial: %v", err) + } + ccw := &remoteBalancerCCWrapper{ + cc: cc, + lb: lb, + backoff: lb.backoff, + done: make(chan struct{}), + } + lb.ccRemoteLB = ccw + ccw.wg.Add(1) + go ccw.watchRemoteBalancer() +} + +// close closed the ClientConn to remote balancer, and waits until all +// goroutines to finish. +func (ccw *remoteBalancerCCWrapper) close() { + close(ccw.done) + ccw.cc.Close() + ccw.wg.Wait() +} + +func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + if err == io.EOF { + return errServerTerminatedConnection + } + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + ccw.lb.processServerList(serverList) + } + } +} + +func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + lastZero := false + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := ccw.lb.clientStats.toClientStats() + zero := isZeroStats(stats) + if zero && lastZero { + // Quash redundant empty load reports. + continue + } + lastZero = zero + t := time.Now() + stats.Timestamp = ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { + lbClient := &loadBalancerClient{cc: ccw.cc} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) + if err != nil { + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + } + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = true + ccw.lb.mu.Unlock() + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: ccw.lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return true, fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + if initResp.LoadBalancerDelegate != "" { + return true, fmt.Errorf("grpclb: Delegation is not supported") + } + + ccw.wg.Add(1) + go func() { + defer ccw.wg.Done() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + ccw.sendLoadReport(stream, d) + } + }() + // No backoff if init req/resp handshake was successful. + return false, ccw.readServerList(stream) +} + +func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { + defer ccw.wg.Done() + var retryCount int + for { + doBackoff, err := ccw.callRemoteBalancer() + select { + case <-ccw.done: + return + default: + if err != nil { + if err == errServerTerminatedConnection { + grpclog.Info(err) + } else { + grpclog.Warning(err) + } + } + } + // Trigger a re-resolve when the stream errors. + ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = false + ccw.lb.fullServerList = nil + // Enter fallback when connection to remote balancer is lost, and the + // aggregated state is not Ready. + if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { + // Entering fallback. + ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) + } + ccw.lb.mu.Unlock() + + if !doBackoff { + retryCount = 0 + continue + } + + timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff + select { + case <-timer.C: + case <-ccw.done: + timer.Stop() + return + } + retryCount++ + } +} diff --git a/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go new file mode 100644 index 0000000000..636725e541 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// The parent ClientConn should re-resolve when grpclb loses connection to the +// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, +// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's +// ResolveNow, and eventually results in re-resolve happening in parent +// ClientConn's resolver (DNS for example). +// +// parent +// ClientConn +// +-----------------------------------------------------------------+ +// | parent +---------------------------------+ | +// | DNS ClientConn | grpclb | | +// | resolver balancerWrapper | | | +// | + + | grpclb grpclb | | +// | | | | ManualResolver ClientConn | | +// | | | | + + | | +// | | | | | | Transient | | +// | | | | | | Failure | | +// | | | | | <--------- | | | +// | | | <--------------- | ResolveNow | | | +// | | <--------- | ResolveNow | | | | | +// | | ResolveNow | | | | | | +// | | | | | | | | +// | + + | + + | | +// | +---------------------------------+ | +// +-----------------------------------------------------------------+ + +// lbManualResolver is used by the ClientConn inside grpclb. It's a manual +// resolver with a special ResolveNow() function. +// +// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, +// so when grpclb client lose contact with remote balancers, the parent +// ClientConn's resolver will re-resolve. +type lbManualResolver struct { + scheme string + ccr resolver.ClientConn + + ccb balancer.ClientConn +} + +func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + r.ccr = cc + return r, nil +} + +func (r *lbManualResolver) Scheme() string { + return r.scheme +} + +// ResolveNow calls resolveNow on the parent ClientConn. +func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ccb.ResolveNow(o) +} + +// Close is a noop for Resolver. +func (*lbManualResolver) Close() {} + +// UpdateState calls cc.UpdateState. +func (r *lbManualResolver) UpdateState(s resolver.State) { + r.ccr.UpdateState(s) +} + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being removed. +// +// Its new and remove methods are updated to do cache first. +type lbCacheClientConn struct { + cc balancer.ClientConn + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + cc: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutMD := addrs[0] + addrWithoutMD.Metadata = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc, nil + } + + scNew, err := ccc.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for the + // same address, and those SubConns are all removed. We remove sc + // immediately here. + delete(ccc.subConnToAddr, sc) + ccc.cc.RemoveSubConn(sc) + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry.abortDeleting { + return + } + ccc.cc.RemoveSubConn(sc) + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { + ccc.cc.UpdateState(s) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + // Only cancel all existing timers. There's no need to remove SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } + ccc.mu.Unlock() +} diff --git a/test/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/test/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 0000000000..d4d645501c --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { + grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/test/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/test/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 0000000000..824f28e740 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + scBuffer *buffer.Unbounded + done *grpcsync.Event + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + scBuffer: buffer.NewUnbounded(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.scBuffer.Get(): + ccb.scBuffer.Load() + if ccb.done.HasFired() { + break + } + ccb.balancerMu.Lock() + su := t.(*scStateUpdate) + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) + } else { + ccb.balancer.HandleSubConnStateChange(su.sc, su.state) + } + ccb.balancerMu.Unlock() + case <-ccb.done.Done(): + } + + if ccb.done.HasFired() { + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + return + } + } +} + +func (ccb *ccBalancerWrapper) close() { + ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.scBuffer.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + return ub.UpdateClientConnState(*ccs) + } + ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) + return nil +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ccb.balancerMu.Lock() + ub.ResolverError(err) + ccb.balancerMu.Unlock() + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(p) + ccb.cc.csMgr.updateState(s) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/test/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/test/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 0000000000..db04b08b84 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,334 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: opts.Target.Endpoint, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.RecordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(info.Ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return balancer.PickResult{}, toRPCErr(err) + } + if p != nil { + result.Done = func(balancer.DoneInfo) { p() } + defer func() { + if err != nil { + p() + } + }() + } + + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, result.SubConn = range bw.conns { + return result, nil + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + var ok1 bool + result.SubConn, ok1 = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + s, ok2 := bw.connSt[result.SubConn] + if !ok1 || !ok2 { + // This can only happen due to a race where Get() returned an address + // that was subsequently removed by Notify. In this case we should + // retry always. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + switch s.s { + case connectivity.Ready, connectivity.Idle: + return result, nil + case connectivity.Shutdown, connectivity.TransientFailure: + // If the returned sc has been shut down or is in transient failure, + // return error, and this RPC will fail or wait for another picker (if + // non-failfast). + return balancer.PickResult{}, balancer.ErrTransientFailure + default: + // For other states (connecting or unknown), the v1 balancer would + // traditionally wait until ready and then issue the RPC. Returning + // ErrNoSubConnAvailable will be a slight improvement in that it will + // allow the balancer to choose another address in case others are + // connected. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} diff --git a/test/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/test/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 0000000000..f393bb6618 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +var GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", +} +var GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, +} + +func (x GrpcLogEntry_EventType) String() string { + return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) +} +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +var GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", +} +var GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, +} + +func (x GrpcLogEntry_Logger) String() string { + return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) +} +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +var Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", +} +var Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, +} + +func (x Address_Type) String() string { + return proto.EnumName(Address_Type_name, int32(x)) +} +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + // The timestamp of the binary log message + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are valid to be assigned to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } +func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } +func (*GrpcLogEntry) ProtoMessage() {} +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} +} +func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) +} +func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) +} +func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +} +func (m *GrpcLogEntry) XXX_Size() int { + return xxx_messageInfo_GrpcLogEntry.Size(m) +} +func (m *GrpcLogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo + +func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *GrpcLogEntry) GetCallId() uint64 { + if m != nil { + return m.CallId + } + return 0 +} + +func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if m != nil { + return m.SequenceIdWithinCall + } + return 0 +} + +func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if m != nil { + return m.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if m != nil { + return m.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (m *GrpcLogEntry) GetMessage() *Message { + if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (m *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (m *GrpcLogEntry) GetPayloadTruncated() bool { + if m != nil { + return m.PayloadTruncated + } + return false +} + +func (m *GrpcLogEntry) GetPeer() *Address { + if m != nil { + return m.Peer + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } +} + +func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientHeader); err != nil { + return err + } + case *GrpcLogEntry_ServerHeader: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerHeader); err != nil { + return err + } + case *GrpcLogEntry_Message: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Message); err != nil { + return err + } + case *GrpcLogEntry_Trailer: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Trailer); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GrpcLogEntry) + switch tag { + case 6: // payload.client_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ClientHeader{msg} + return true, err + case 7: // payload.server_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ServerHeader{msg} + return true, err + case 8: // payload.message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Message) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Message{msg} + return true, err + case 9: // payload.trailer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Trailer) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Trailer{msg} + return true, err + default: + return false, nil + } +} + +func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + s := proto.Size(x.ClientHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_ServerHeader: + s := proto.Size(x.ServerHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Message: + s := proto.Size(x.Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Trailer: + s := proto.Size(x.Trailer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ClientHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientHeader) Reset() { *m = ClientHeader{} } +func (m *ClientHeader) String() string { return proto.CompactTextString(m) } +func (*ClientHeader) ProtoMessage() {} +func (*ClientHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} +} +func (m *ClientHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientHeader.Unmarshal(m, b) +} +func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) +} +func (dst *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(dst, src) +} +func (m *ClientHeader) XXX_Size() int { + return xxx_messageInfo_ClientHeader.Size(m) +} +func (m *ClientHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ClientHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientHeader proto.InternalMessageInfo + +func (m *ClientHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ClientHeader) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +func (m *ClientHeader) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *ClientHeader) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ServerHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHeader) Reset() { *m = ServerHeader{} } +func (m *ServerHeader) String() string { return proto.CompactTextString(m) } +func (*ServerHeader) ProtoMessage() {} +func (*ServerHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} +} +func (m *ServerHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHeader.Unmarshal(m, b) +} +func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) +} +func (dst *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(dst, src) +} +func (m *ServerHeader) XXX_Size() int { + return xxx_messageInfo_ServerHeader.Size(m) +} +func (m *ServerHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHeader proto.InternalMessageInfo + +func (m *ServerHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Trailer struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Trailer) Reset() { *m = Trailer{} } +func (m *Trailer) String() string { return proto.CompactTextString(m) } +func (*Trailer) ProtoMessage() {} +func (*Trailer) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} +} +func (m *Trailer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Trailer.Unmarshal(m, b) +} +func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) +} +func (dst *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(dst, src) +} +func (m *Trailer) XXX_Size() int { + return xxx_messageInfo_Trailer.Size(m) +} +func (m *Trailer) XXX_DiscardUnknown() { + xxx_messageInfo_Trailer.DiscardUnknown(m) +} + +var xxx_messageInfo_Trailer proto.InternalMessageInfo + +func (m *Trailer) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Trailer) GetStatusCode() uint32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +func (m *Trailer) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Trailer) GetStatusDetails() []byte { + if m != nil { + return m.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetEntry() []*MetadataEntry { + if m != nil { + return m.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } +func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } +func (*MetadataEntry) ProtoMessage() {} +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} +} +func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) +} +func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) +} +func (dst *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(dst, src) +} +func (m *MetadataEntry) XXX_Size() int { + return xxx_messageInfo_MetadataEntry.Size(m) +} +func (m *MetadataEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MetadataEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo + +func (m *MetadataEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *MetadataEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Address information +type Address struct { + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Address) Reset() { *m = Address{} } +func (m *Address) String() string { return proto.CompactTextString(m) } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} +} +func (m *Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Address.Unmarshal(m, b) +} +func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Address.Marshal(b, m, deterministic) +} +func (dst *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(dst, src) +} +func (m *Address) XXX_Size() int { + return xxx_messageInfo_Address.Size(m) +} +func (m *Address) XXX_DiscardUnknown() { + xxx_messageInfo_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Address proto.InternalMessageInfo + +func (m *Address) GetType() Address_Type { + if m != nil { + return m.Type + } + return Address_TYPE_UNKNOWN +} + +func (m *Address) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Address) GetIpPort() uint32 { + if m != nil { + return m.IpPort + } + return 0 +} + +func init() { + proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") + proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") + proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") + proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") + proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") + proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") + proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") + proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) +} + +func init() { + proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) +} + +var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, + 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, + 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, + 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, + 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, + 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, + 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, + 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, + 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, + 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, + 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, + 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, + 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, + 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, + 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, + 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, + 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, + 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, + 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, + 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, + 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, + 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, + 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, + 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, + 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, + 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, + 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, + 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, + 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, + 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, + 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, + 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, + 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, + 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, + 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, + 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, + 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, + 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, + 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, + 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, + 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, + 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, + 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, + 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, + 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, + 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, + 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, + 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, + 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, + 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, + 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, + 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, + 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, + 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, + 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, + 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, + 0xd4, 0x07, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/grpc/call.go b/test/vendor/google.golang.org/grpc/call.go new file mode 100644 index 0000000000..9e20e4d385 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/test/vendor/google.golang.org/grpc/clientconn.go b/test/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 0000000000..f58740b250 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1568 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtINFO, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + }) + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) + } + } + + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if channelz.IsOn() { + channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel Connectivity change to %v", state), + Severity: channelz.CtINFO, + }) + } + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) + } + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + cc.applyServiceConfigAndBalancer(sc, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + } + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if channelz.IsOn() { + if builder == nil { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), + Severity: channelz.CtWarning, + }) + } else { + channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), + Severity: channelz.CtINFO, + }) + } + } + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return MethodConfig{} + } + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtINFO, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtINFO, + } + } + channelz.AddTraceEvent(cc.channelzID, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + + updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) + ac.state = s + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: updateMsg, + Severity: channelz.CtINFO, + }) + } + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. + // + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), + Severity: channelz.CtINFO, + }) + } + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.blockingpicker.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, firstConnErr +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + authority := ac.cc.authority + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName != "" { + authority = addr.ServerName + } + + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: authority, + } + + once := sync.Once{} + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + return nil, nil, err + } + + select { + case <-time.After(time.Until(connectDeadline)): + // We didn't get the preface in time. + newTr.Close() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + grpclog.Error("Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", + Severity: channelz.CtError, + }) + } + grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if cc.parsedTarget.Scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(cc.parsedTarget.Scheme) +} diff --git a/test/vendor/google.golang.org/grpc/codec.go b/test/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 0000000000..1297765478 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/test/vendor/google.golang.org/grpc/codes/code_string.go b/test/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 0000000000..0b206a5782 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/test/vendor/google.golang.org/grpc/codes/codes.go b/test/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 0000000000..02738839dd --- /dev/null +++ b/test/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,198 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/test/vendor/google.golang.org/grpc/connectivity/connectivity.go b/test/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 0000000000..34ec36fbf6 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "context" + + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/alts.go b/test/vendor/google.golang.org/grpc/credentials/alts/alts.go new file mode 100644 index 0000000000..72c7f0b23f --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -0,0 +1,330 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package alts implements the ALTS credential support by gRPC library, which +// encapsulates all the state needed by a client to authenticate with a server +// using ALTS and make various assertions, e.g., about the client's identity, +// role, or whether it is authorized to make a particular call. +// This package is experimental. +package alts + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/grpclog" +) + +const ( + // hypervisorHandshakerServiceAddress represents the default ALTS gRPC + // handshaker service address in the hypervisor. + hypervisorHandshakerServiceAddress = "metadata.google.internal:8080" + // defaultTimeout specifies the server handshake timeout. + defaultTimeout = 30.0 * time.Second + // The following constants specify the minimum and maximum acceptable + // protocol versions. + protocolVersionMaxMajor = 2 + protocolVersionMaxMinor = 1 + protocolVersionMinMajor = 2 + protocolVersionMinMinor = 1 +) + +var ( + once sync.Once + maxRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMaxMajor, + Minor: protocolVersionMaxMinor, + } + minRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMinMajor, + Minor: protocolVersionMinMinor, + } + // ErrUntrustedPlatform is returned from ClientHandshake and + // ServerHandshake is running on a platform where the trustworthiness of + // the handshaker service is not guaranteed. + ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") +) + +// AuthInfo exposes security information from the ALTS handshake to the +// application. This interface is to be implemented by ALTS. Users should not +// need a brand new implementation of this interface. For situations like +// testing, any new implementation should embed this interface. This allows +// ALTS to add new methods to this interface. +type AuthInfo interface { + // ApplicationProtocol returns application protocol negotiated for the + // ALTS connection. + ApplicationProtocol() string + // RecordProtocol returns the record protocol negotiated for the ALTS + // connection. + RecordProtocol() string + // SecurityLevel returns the security level of the created ALTS secure + // channel. + SecurityLevel() altspb.SecurityLevel + // PeerServiceAccount returns the peer service account. + PeerServiceAccount() string + // LocalServiceAccount returns the local service account. + LocalServiceAccount() string + // PeerRPCVersions returns the RPC version supported by the peer. + PeerRPCVersions() *altspb.RpcProtocolVersions +} + +// ClientOptions contains the client-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ClientOptions struct { + // TargetServiceAccounts contains a list of expected target service + // accounts. + TargetServiceAccounts []string + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultClientOptions creates a new ClientOptions object with the default +// values. +func DefaultClientOptions() *ClientOptions { + return &ClientOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// ServerOptions contains the server-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ServerOptions struct { + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultServerOptions creates a new ServerOptions object with the default +// values. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// altsTC is the credentials required for authenticating a connection using ALTS. +// It implements credentials.TransportCredentials interface. +type altsTC struct { + info *credentials.ProtocolInfo + side core.Side + accounts []string + hsAddress string +} + +// NewClientCreds constructs a client-side ALTS TransportCredentials object. +func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { + return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) +} + +// NewServerCreds constructs a server-side ALTS TransportCredentials object. +func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { + return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) +} + +func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { + once.Do(func() { + vmOnGCP = isRunningOnGCP() + }) + + if hsAddress == "" { + hsAddress = hypervisorHandshakerServiceAddress + } + return &altsTC{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: "alts", + SecurityVersion: "1.0", + }, + side: side, + accounts: accounts, + hsAddress: hsAddress, + } +} + +// ClientHandshake implements the client side handshake protocol. +func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it is shared with other handshakes. + + // Possible context leak: + // The cancel function for the child context we create will only be + // called a non-nil error is returned. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + opts := handshaker.DefaultClientHandshakerOptions() + opts.TargetName = addr + opts.TargetServiceAccounts = g.accounts + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + chs.Close() + } + }() + secConn, authInfo, err := chs.ClientHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +// ServerHandshake implements the server side ALTS handshaker. +func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it's shared with other handshakes. + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + opts := handshaker.DefaultServerHandshakerOptions() + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + shs.Close() + } + }() + secConn, authInfo, err := shs.ServerHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +func (g *altsTC) Info() credentials.ProtocolInfo { + return *g.info +} + +func (g *altsTC) Clone() credentials.TransportCredentials { + info := *g.info + var accounts []string + if g.accounts != nil { + accounts = make([]string, len(g.accounts)) + copy(accounts, g.accounts) + } + return &altsTC{ + info: &info, + side: g.side, + hsAddress: g.hsAddress, + accounts: accounts, + } +} + +func (g *altsTC) OverrideServerName(serverNameOverride string) error { + g.info.ServerName = serverNameOverride + return nil +} + +// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. +func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { + switch { + case v1.GetMajor() > v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): + return 1 + case v1.GetMajor() < v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): + return -1 + } + return 0 +} + +// checkRPCVersions performs a version check between local and peer rpc protocol +// versions. This function returns true if the check passes which means both +// parties agreed on a common rpc protocol to use, and false otherwise. The +// function also returns the highest common RPC protocol version both parties +// agreed on. +func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { + if local == nil || peer == nil { + grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") + return false, nil + } + + // maxCommonVersion is MIN(local.max, peer.max). + maxCommonVersion := local.GetMaxRpcVersion() + if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { + maxCommonVersion = peer.GetMaxRpcVersion() + } + + // minCommonVersion is MAX(local.min, peer.min). + minCommonVersion := peer.GetMinRpcVersion() + if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { + minCommonVersion = local.GetMinRpcVersion() + } + + if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { + return false, nil + } + return true, maxCommonVersion +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go new file mode 100644 index 0000000000..9c53d6b53f --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provide authentication information returned by handshakers. +package authinfo + +import ( + "google.golang.org/grpc/credentials" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +var _ credentials.AuthInfo = (*altsAuthInfo)(nil) + +// altsAuthInfo exposes security information from the ALTS handshake to the +// application. altsAuthInfo is immutable and implements credentials.AuthInfo. +type altsAuthInfo struct { + p *altspb.AltsContext + credentials.CommonAuthInfo +} + +// New returns a new altsAuthInfo object given handshaker results. +func New(result *altspb.HandshakerResult) credentials.AuthInfo { + return newAuthInfo(result) +} + +func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { + return &altsAuthInfo{ + p: &altspb.AltsContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + RecordProtocol: result.GetRecordProtocol(), + // TODO: assign security level from result. + SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, + PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), + LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), + PeerRpcVersions: result.GetPeerRpcVersions(), + }, + CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + } +} + +// AuthType identifies the context as providing ALTS authentication information. +func (s *altsAuthInfo) AuthType() string { + return "alts" +} + +// ApplicationProtocol returns the context's application protocol. +func (s *altsAuthInfo) ApplicationProtocol() string { + return s.p.GetApplicationProtocol() +} + +// RecordProtocol returns the context's record protocol. +func (s *altsAuthInfo) RecordProtocol() string { + return s.p.GetRecordProtocol() +} + +// SecurityLevel returns the context's security level. +func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { + return s.p.GetSecurityLevel() +} + +// PeerServiceAccount returns the context's peer service account. +func (s *altsAuthInfo) PeerServiceAccount() string { + return s.p.GetPeerServiceAccount() +} + +// LocalServiceAccount returns the context's local service account. +func (s *altsAuthInfo) LocalServiceAccount() string { + return s.p.GetLocalServiceAccount() +} + +// PeerRPCVersions returns the context's peer RPC versions. +func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { + return s.p.GetPeerRpcVersions() +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/common.go new file mode 100644 index 0000000000..33fba81239 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package internal contains common core functionality for ALTS. +package internal + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +const ( + // ClientSide identifies the client in this communication. + ClientSide Side = iota + // ServerSide identifies the server in this communication. + ServerSide +) + +// PeerNotRespondingError is returned when a peer server is not responding +// after a channel has been established. It is treated as a temporary connection +// error and re-connection to the server should be attempted. +var PeerNotRespondingError = &peerNotRespondingError{} + +// Side identifies the party's role: client or server. +type Side int + +type peerNotRespondingError struct{} + +// Return an error message for the purpose of logging. +func (e *peerNotRespondingError) Error() string { + return "peer server is not responding and re-connection should be attempted." +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e *peerNotRespondingError) Temporary() bool { + return true +} + +// Handshaker defines a ALTS handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a client-side handshaking and + // returns a secure connection and corresponding auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a server-side handshaking and + // returns a secure connection and corresponding auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the caller + // obtains the secure connection. + Close() +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go new file mode 100644 index 0000000000..43726e877b --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "strconv" +) + +// rekeyAEAD holds the necessary information for an AEAD based on +// AES-GCM that performs nonce-based key derivation and XORs the +// nonce with a random mask. +type rekeyAEAD struct { + kdfKey []byte + kdfCounter []byte + nonceMask []byte + nonceBuf []byte + gcmAEAD cipher.AEAD +} + +// KeySizeError signals that the given key does not have the correct size. +type KeySizeError int + +func (k KeySizeError) Error() string { + return "alts/conn: invalid key size " + strconv.Itoa(int(k)) +} + +// newRekeyAEAD creates a new instance of aes128gcm with rekeying. +// The key argument should be 44 bytes, the first 32 bytes are used as a key +// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// the counter. +func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { + k := len(key) + if k != kdfKeyLen+nonceLen { + return nil, KeySizeError(k) + } + return &rekeyAEAD{ + kdfKey: key[:kdfKeyLen], + kdfCounter: make([]byte, kdfCounterLen), + nonceMask: key[kdfKeyLen:], + nonceBuf: make([]byte, nonceLen), + gcmAEAD: nil, + }, nil +} + +// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Seal for aes128gcm. +func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if err := s.rekeyIfRequired(nonce); err != nil { + panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) +} + +// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Open for aes128gcm. +func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if err := s.rekeyIfRequired(nonce); err != nil { + return nil, err + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) +} + +// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil +// or cannot be used with given nonce. +func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { + newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] + if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { + return nil + } + copy(s.kdfCounter, newKdfCounter) + a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) + if err != nil { + return err + } + s.gcmAEAD, err = cipher.NewGCM(a) + return err +} + +// maskNonce XORs the given nonce with the mask and stores the result in dst. +func maskNonce(dst, nonce, mask []byte) { + nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) + nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) + mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) + mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) + binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) + binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) +} + +// NonceSize returns the required nonce size. +func (s *rekeyAEAD) NonceSize() int { + return s.gcmAEAD.NonceSize() +} + +// Overhead returns the ciphertext overhead. +func (s *rekeyAEAD) Overhead() int { + return s.gcmAEAD.Overhead() +} + +// hkdfExpand computes the first 16 bytes of the HKDF-expand function +// defined in RFC5869. +func hkdfExpand(key, info []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(info) + mac.Write([]byte{0x01}[:]) + return mac.Sum(nil)[:aeadKeyLen] +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go new file mode 100644 index 0000000000..04e0adb6c9 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/aes" + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCM = 5 +) + +// aes128gcm is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcm struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + aead cipher.AEAD +} + +// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. +func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aes128gcm{ + inCounter: NewInCounter(side, overflowLenAES128GCM), + outCounter: NewOutCounter(side, overflowLenAES128GCM), + aead: a, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.aead.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcm) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go new file mode 100644 index 0000000000..6a9035ea25 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCMRekey = 8 + nonceLen = 12 + aeadKeyLen = 16 + kdfKeyLen = 32 + kdfCounterOffset = 2 + kdfCounterLen = 6 + sizeUint64 = 8 +) + +// aes128gcmRekey is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcmRekey struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + inAEAD cipher.AEAD + outAEAD cipher.AEAD +} + +// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying +// for ALTS record. The key argument should be 44 bytes, the first 32 bytes +// are used as a key for HKDF-expand and the remainining 12 bytes are used +// as a random mask for the counter. +func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { + inCounter := NewInCounter(side, overflowLenAES128GCMRekey) + outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) + inAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + outAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + return &aes128gcmRekey{ + inCounter, + outCounter, + inAEAD, + outAEAD, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcmRekey) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go new file mode 100644 index 0000000000..1795d0c9e3 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + // GcmTagSize is the GCM tag size is the difference in length between + // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto + // library. + GcmTagSize = 16 +) + +// ErrAuth occurs on authentication failure. +var ErrAuth = errors.New("message authentication failed") + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// ParseFramedMsg parse the provided buffer and returns a frame of the format +// msgLength+msg and any remaining bytes in that buffer. +func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { + // If the size field is not complete, return the provided buffer as + // remaining buffer. + if len(b) < MsgLenFieldSize { + return nil, b, nil + } + msgLenField := b[:MsgLenFieldSize] + length := binary.LittleEndian.Uint32(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) + } + if len(b) < int(length)+4 { // account for the first 4 msg length bytes. + // Frame is not complete yet. + return nil, b, nil + } + return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go new file mode 100644 index 0000000000..9f00aca0b6 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "errors" +) + +const counterLen = 12 + +var ( + errInvalidCounter = errors.New("invalid counter") +) + +// Counter is a 96-bit, little-endian counter. +type Counter struct { + value [counterLen]byte + invalid bool + overflowLen int +} + +// Value returns the current value of the counter as a byte slice. +func (c *Counter) Value() ([]byte, error) { + if c.invalid { + return nil, errInvalidCounter + } + return c.value[:], nil +} + +// Inc increments the counter and checks for overflow. +func (c *Counter) Inc() { + // If the counter is already invalid, there is no need to increase it. + if c.invalid { + return + } + i := 0 + for ; i < c.overflowLen; i++ { + c.value[i]++ + if c.value[i] != 0 { + break + } + } + if i == c.overflowLen { + c.invalid = true + } +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go new file mode 100644 index 0000000000..fd5a53d9a7 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package conn contains an implementation of a secure channel created by gRPC +// handshakers. +package conn + +import ( + "encoding/binary" + "fmt" + "math" + "net" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. +type ALTSRecordCrypto interface { + // Encrypt encrypts the plaintext and computes the tag (if any) of dst + // and plaintext, dst and plaintext do not overlap. + Encrypt(dst, plaintext []byte) ([]byte, error) + // EncryptionOverhead returns the tag size (if any) in bytes. + EncryptionOverhead() int + // Decrypt decrypts ciphertext and verify the tag (if any). dst and + // ciphertext may alias exactly or not at all. To reuse ciphertext's + // storage for the decrypted output, use ciphertext[:0] as dst. + Decrypt(dst, ciphertext []byte) ([]byte, error) +} + +// ALTSRecordFunc is a function type for factory functions that create +// ALTSRecordCrypto instances. +type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) + +const ( + // MsgLenFieldSize is the byte size of the frame length field of a + // framed message. + MsgLenFieldSize = 4 + // The byte size of the message type field of a framed message. + msgTypeFieldSize = 4 + // The bytes size limit for a ALTS record message. + altsRecordLengthLimit = 1024 * 1024 // 1 MiB + // The default bytes size of a ALTS record message. + altsRecordDefaultLength = 4 * 1024 // 4KiB + // Message type value included in ALTS record framing. + altsRecordMsgType = uint32(0x06) + // The initial write buffer size. + altsWriteBufferInitialSize = 32 * 1024 // 32KiB + // The maximum write buffer size. This *must* be multiple of + // altsRecordDefaultLength. + altsWriteBufferMaxSize = 512 * 1024 // 512KiB +) + +var ( + protocols = make(map[string]ALTSRecordFunc) +) + +// RegisterProtocol register a ALTS record encryption protocol. +func RegisterProtocol(protocol string, f ALTSRecordFunc) error { + if _, ok := protocols[protocol]; ok { + return fmt.Errorf("protocol %v is already registered", protocol) + } + protocols[protocol] = f + return nil +} + +// conn represents a secured connection. It implements the net.Conn interface. +type conn struct { + net.Conn + crypto ALTSRecordCrypto + // buf holds data that has been read from the connection and decrypted, + // but has not yet been returned by Read. + buf []byte + payloadLengthLimit int + // protected holds data read from the network but have not yet been + // decrypted. This data might not compose a complete frame. + protected []byte + // writeBuf is a buffer used to contain encrypted frames before being + // written to the network. + writeBuf []byte + // nextFrame stores the next frame (in protected buffer) info. + nextFrame []byte + // overhead is the calculated overhead of each frame. + overhead int +} + +// NewConn creates a new secure channel instance given the other party role and +// handshaking result. +func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { + newCrypto := protocols[recordProtocol] + if newCrypto == nil { + return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) + } + crypto, err := newCrypto(side, key) + if err != nil { + return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) + } + overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() + payloadLengthLimit := altsRecordDefaultLength - overhead + if protected == nil { + // We pre-allocate protected to be of size + // 2*altsRecordDefaultLength-1 during initialization. We only + // read from the network into protected when protected does not + // contain a complete frame, which is at most + // altsRecordDefaultLength-1 (bytes). And we read at most + // altsRecordDefaultLength (bytes) data into protected at one + // time. Therefore, 2*altsRecordDefaultLength-1 is large enough + // to buffer data read from the network. + protected = make([]byte, 0, 2*altsRecordDefaultLength-1) + } + + altsConn := &conn{ + Conn: c, + crypto: crypto, + payloadLengthLimit: payloadLengthLimit, + protected: protected, + writeBuf: make([]byte, altsWriteBufferInitialSize), + nextFrame: protected, + overhead: overhead, + } + return altsConn, nil +} + +// Read reads and decrypts a frame from the underlying connection, and copies the +// decrypted payload into b. If the size of the payload is greater than len(b), +// Read retains the remaining bytes in an internal buffer, and subsequent calls +// to Read will read from this buffer until it is exhausted. +func (p *conn) Read(b []byte) (n int, err error) { + if len(p.buf) == 0 { + var framedMsg []byte + framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) + if err != nil { + return n, err + } + // Check whether the next frame to be decrypted has been + // completely received yet. + if len(framedMsg) == 0 { + copy(p.protected, p.nextFrame) + p.protected = p.protected[:len(p.nextFrame)] + // Always copy next incomplete frame to the beginning of + // the protected buffer and reset nextFrame to it. + p.nextFrame = p.protected + } + // Check whether a complete frame has been received yet. + for len(framedMsg) == 0 { + if len(p.protected) == cap(p.protected) { + tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) + copy(tmp, p.protected) + p.protected = tmp + } + n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) + if err != nil { + return 0, err + } + p.protected = p.protected[:len(p.protected)+n] + framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) + if err != nil { + return 0, err + } + } + // Now we have a complete frame, decrypted it. + msg := framedMsg[MsgLenFieldSize:] + msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) + if msgType&0xff != altsRecordMsgType { + return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", + msgType, altsRecordMsgType) + } + ciphertext := msg[msgTypeFieldSize:] + + // Decrypt requires that if the dst and ciphertext alias, they + // must alias exactly. Code here used to use msg[:0], but msg + // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than + // ciphertext, so they alias inexactly. Using ciphertext[:0] + // arranges the appropriate aliasing without needing to copy + // ciphertext or use a separate destination buffer. For more info + // check: https://golang.org/pkg/crypto/cipher/#AEAD. + p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) + if err != nil { + return 0, err + } + } + + n = copy(b, p.buf) + p.buf = p.buf[n:] + return n, nil +} + +// Write encrypts, frames, and writes bytes from b to the underlying connection. +func (p *conn) Write(b []byte) (n int, err error) { + n = len(b) + // Calculate the output buffer size with framing and encryption overhead. + numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) + size := len(b) + numOfFrames*p.overhead + // If writeBuf is too small, increase its size up to the maximum size. + partialBSize := len(b) + if size > altsWriteBufferMaxSize { + size = altsWriteBufferMaxSize + const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength + partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit + } + if len(p.writeBuf) < size { + p.writeBuf = make([]byte, size) + } + + for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { + partialBEnd := partialBStart + partialBSize + if partialBEnd > len(b) { + partialBEnd = len(b) + } + partialB := b[partialBStart:partialBEnd] + writeBufIndex := 0 + for len(partialB) > 0 { + payloadLen := len(partialB) + if payloadLen > p.payloadLengthLimit { + payloadLen = p.payloadLengthLimit + } + buf := partialB[:payloadLen] + partialB = partialB[payloadLen:] + + // Write buffer contains: length, type, payload, and tag + // if any. + + // 1. Fill in type field. + msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + binary.LittleEndian.PutUint32(msg, altsRecordMsgType) + + // 2. Encrypt the payload and create a tag if any. + msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) + if err != nil { + return n, err + } + + // 3. Fill in the size field. + binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + + // 4. Increase writeBufIndex. + writeBufIndex += len(buf) + p.overhead + } + nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + if err != nil { + // We need to calculate the actual data size that was + // written. This means we need to remove header, + // encryption overheads, and any partially-written + // frame data. + numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) + return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err + } + } + return n, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go new file mode 100644 index 0000000000..84821fa254 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import core "google.golang.org/grpc/credentials/alts/internal" + +// NewOutCounter returns an outgoing counter initialized to the starting sequence +// number for the client/server side of a connection. +func NewOutCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ServerSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// NewInCounter returns an incoming counter initialized to the starting sequence +// number for the client/server side of a connection. This is used in ALTS record +// to check that incoming counters are as expected, since ALTS record guarantees +// that messages are unwrapped in the same order that the peer wrapped them. +func NewInCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ClientSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// CounterFromValue creates a new counter given an initial value. +func CounterFromValue(value []byte, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + copy(c.value[:], value) + return +} + +// CounterSide returns the connection side (client/server) a sequence counter is +// associated with. +func CounterSide(c []byte) core.Side { + if c[counterLen-1]&0x80 == 0x80 { + return core.ServerSide + } + return core.ClientSide +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go new file mode 100644 index 0000000000..8bc7ceee0a --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -0,0 +1,375 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker provides ALTS handshaking functionality for GCP. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/authinfo" + "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +const ( + // The maximum byte size of receive frames. + frameLimit = 64 * 1024 // 64 KB + rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" + // maxPendingHandshakes represents the maximum number of concurrent + // handshakes. + maxPendingHandshakes = 100 +) + +var ( + hsProtocol = altspb.HandshakeProtocol_ALTS + appProtocols = []string{"grpc"} + recordProtocols = []string{rekeyRecordProtocolName} + keyLength = map[string]int{ + rekeyRecordProtocolName: 44, + } + altsRecordFuncs = map[string]conn.ALTSRecordFunc{ + // ALTS handshaker protocols. + rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { + return conn.NewAES128GCMRekey(s, keyData) + }, + } + // control number of concurrent created (but not closed) handshakers. + mu sync.Mutex + concurrentHandshakes = int64(0) + // errDropped occurs when maxPendingHandshakes is reached. + errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") + // errOutOfBound occurs when the handshake service returns a consumed + // bytes value larger than the buffer that was passed to it originally. + errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") +) + +func init() { + for protocol, f := range altsRecordFuncs { + if err := conn.RegisterProtocol(protocol, f); err != nil { + panic(err) + } + } +} + +func acquire() bool { + mu.Lock() + // If we need n to be configurable, we can pass it as an argument. + n := int64(1) + success := maxPendingHandshakes-concurrentHandshakes >= n + if success { + concurrentHandshakes += n + } + mu.Unlock() + return success +} + +func release() { + mu.Lock() + // If we need n to be configurable, we can pass it as an argument. + n := int64(1) + concurrentHandshakes -= n + if concurrentHandshakes < 0 { + mu.Unlock() + panic("bad release") + } + mu.Unlock() +} + +// ClientHandshakerOptions contains the client handshaker options that can +// provided by the caller. +type ClientHandshakerOptions struct { + // ClientIdentity is the handshaker client local identity. + ClientIdentity *altspb.Identity + // TargetName is the server service account name for secure name + // checking. + TargetName string + // TargetServiceAccounts contains a list of expected target service + // accounts. One of these accounts should match one of the accounts in + // the handshaker results. Otherwise, the handshake fails. + TargetServiceAccounts []string + // RPCVersions specifies the gRPC versions accepted by the client. + RPCVersions *altspb.RpcProtocolVersions +} + +// ServerHandshakerOptions contains the server handshaker options that can +// provided by the caller. +type ServerHandshakerOptions struct { + // RPCVersions specifies the gRPC versions accepted by the server. + RPCVersions *altspb.RpcProtocolVersions +} + +// DefaultClientHandshakerOptions returns the default client handshaker options. +func DefaultClientHandshakerOptions() *ClientHandshakerOptions { + return &ClientHandshakerOptions{} +} + +// DefaultServerHandshakerOptions returns the default client handshaker options. +func DefaultServerHandshakerOptions() *ServerHandshakerOptions { + return &ServerHandshakerOptions{} +} + +// TODO: add support for future local and remote endpoint in both client options +// and server options (server options struct does not exist now. When +// caller can provide endpoints, it should be created. + +// altsHandshaker is used to complete a ALTS handshaking between client and +// server. This handshaker talks to the ALTS handshaker service in the metadata +// server. +type altsHandshaker struct { + // RPC stream used to access the ALTS Handshaker service. + stream altsgrpc.HandshakerService_DoHandshakeClient + // the connection to the peer. + conn net.Conn + // client handshake options. + clientOpts *ClientHandshakerOptions + // server handshake options. + serverOpts *ServerHandshakerOptions + // defines the side doing the handshake, client or server. + side core.Side +} + +// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + side: core.ClientSide, + }, nil +} + +// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + side: core.ServerSide, + }, nil +} + +// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// done, ClientHandshake returns a secure connection. +func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire() { + return nil, nil, errDropped + } + defer release() + + if h.side != core.ClientSide { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") + } + + // Create target identities from service account list. + targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) + for _, account := range h.clientOpts.TargetServiceAccounts { + targetIdentities = append(targetIdentities, &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: account, + }, + }) + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ClientStart{ + ClientStart: &altspb.StartClientHandshakeReq{ + HandshakeSecurityProtocol: hsProtocol, + ApplicationProtocols: appProtocols, + RecordProtocols: recordProtocols, + TargetIdentities: targetIdentities, + LocalIdentity: h.clientOpts.ClientIdentity, + TargetName: h.clientOpts.TargetName, + RpcVersions: h.clientOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// done, ServerHandshake returns a secure connection. +func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire() { + return nil, nil, errDropped + } + defer release() + + if h.side != core.ServerSide { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") + } + + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + + // Prepare server parameters. + // TODO: currently only ALTS parameters are provided. Might need to use + // more options in the future. + params := make(map[int32]*altspb.ServerHandshakeParameters) + params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ + RecordProtocols: recordProtocols, + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ServerStart{ + ServerStart: &altspb.StartServerHandshakeReq{ + ApplicationProtocols: appProtocols, + HandshakeParameters: params, + InBytes: p[:n], + RpcVersions: h.serverOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check of the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errOutOfBound + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + // The handshaker returns a 128 bytes key. It should be truncated based + // on the returned record protocol. + keyLen, ok := keyLength[result.RecordProtocol] + if !ok { + return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) + } + sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) + if err != nil { + return nil, nil, err + } + return sc, result, nil +} + +func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone processes the handshake until the handshaker service returns +// the results. Handshaker service takes care of frame parsing, so we read +// whatever received from the network and send it to the handshaker service. +func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, extra, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service, and + // nothing is received from the peer, then we are stuck. + // This covers the case when the peer is not responding. Note + // that handshaker service connection issues are caught in + // accessHandshakerService before we even get here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, core.PeerNotRespondingError + } + // Append extra bytes from the previous interaction with the + // handshaker service with the current buffer read from conn. + p := append(extra, buf[:n]...) + // From here on, p and extra point to the same slice. + resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_Next{ + Next: &altspb.NextHandshakeMessageReq{ + InBytes: p, + }, + }, + }) + if err != nil { + return nil, nil, err + } + // Set extra based on handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errOutOfBound + } + extra = p[resp.GetBytesConsumed():] + } +} + +// Close terminates the Handshaker. It should be called when the caller obtains +// the secure connection. +func (h *altsHandshaker) Close() { + h.stream.CloseSend() +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go new file mode 100644 index 0000000000..0c7b568354 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service manages connections between the VM application and the ALTS +// handshaker service. +package service + +import ( + "sync" + + grpc "google.golang.org/grpc" +) + +var ( + // hsConn represents a connection to hypervisor handshaker service. + hsConn *grpc.ClientConn + mu sync.Mutex + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +// Dial dials the handshake service in the hypervisor. If a connection has +// already been established, this function returns it. Otherwise, a new +// connection is created. +func Dial(hsAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + if hsConn == nil { + // Create a new connection to the handshaker service. Note that + // this connection stays open until the application is closed. + var err error + hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + if err != nil { + return nil, err + } + } + return hsConn, nil +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go new file mode 100644 index 0000000000..38c4832dfd --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -0,0 +1,152 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/altscontext.proto + +package grpc_gcp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AltsContext struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // The security level of the created secure channel. + SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` + // The peer service account. + PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` + // The local service account. + LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // Additional attributes of the peer. + PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AltsContext) Reset() { *m = AltsContext{} } +func (m *AltsContext) String() string { return proto.CompactTextString(m) } +func (*AltsContext) ProtoMessage() {} +func (*AltsContext) Descriptor() ([]byte, []int) { + return fileDescriptor_6647a41e53a575a3, []int{0} +} + +func (m *AltsContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AltsContext.Unmarshal(m, b) +} +func (m *AltsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AltsContext.Marshal(b, m, deterministic) +} +func (m *AltsContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_AltsContext.Merge(m, src) +} +func (m *AltsContext) XXX_Size() int { + return xxx_messageInfo_AltsContext.Size(m) +} +func (m *AltsContext) XXX_DiscardUnknown() { + xxx_messageInfo_AltsContext.DiscardUnknown(m) +} + +var xxx_messageInfo_AltsContext proto.InternalMessageInfo + +func (m *AltsContext) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *AltsContext) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *AltsContext) GetSecurityLevel() SecurityLevel { + if m != nil { + return m.SecurityLevel + } + return SecurityLevel_SECURITY_NONE +} + +func (m *AltsContext) GetPeerServiceAccount() string { + if m != nil { + return m.PeerServiceAccount + } + return "" +} + +func (m *AltsContext) GetLocalServiceAccount() string { + if m != nil { + return m.LocalServiceAccount + } + return "" +} + +func (m *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +func (m *AltsContext) GetPeerAttributes() map[string]string { + if m != nil { + return m.PeerAttributes + } + return nil +} + +func init() { + proto.RegisterType((*AltsContext)(nil), "grpc.gcp.AltsContext") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.AltsContext.PeerAttributesEntry") +} + +func init() { proto.RegisterFile("grpc/gcp/altscontext.proto", fileDescriptor_6647a41e53a575a3) } + +var fileDescriptor_6647a41e53a575a3 = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4d, 0x6f, 0x13, 0x31, + 0x10, 0x86, 0xb5, 0x0d, 0x2d, 0xe0, 0x88, 0xb4, 0xb8, 0xa9, 0x58, 0x45, 0x42, 0x8a, 0xb8, 0xb0, + 0x5c, 0x76, 0x21, 0x5c, 0x10, 0x07, 0x50, 0x8a, 0x38, 0x20, 0x71, 0x88, 0xb6, 0x12, 0x07, 0x2e, + 0x2b, 0x77, 0x3a, 0xb2, 0x2c, 0x5c, 0x8f, 0x35, 0x76, 0x22, 0xf2, 0xb3, 0xf9, 0x07, 0x68, 0xed, + 0xcd, 0x07, 0x1f, 0xb7, 0x9d, 0x79, 0x9f, 0x19, 0xbf, 0xb3, 0x33, 0x62, 0xa6, 0xd9, 0x43, 0xa3, + 0xc1, 0x37, 0xca, 0xc6, 0x00, 0xe4, 0x22, 0xfe, 0x8c, 0xb5, 0x67, 0x8a, 0x24, 0x1f, 0xf5, 0x5a, + 0xad, 0xc1, 0xcf, 0xaa, 0x3d, 0x15, 0x59, 0xb9, 0xe0, 0x89, 0x63, 0x17, 0x10, 0xd6, 0x6c, 0xe2, + 0xb6, 0x03, 0xba, 0xbf, 0x27, 0x97, 0x6b, 0x5e, 0xfc, 0x1a, 0x89, 0xf1, 0xd2, 0xc6, 0xf0, 0x29, + 0x77, 0x92, 0x6f, 0xc4, 0x54, 0x79, 0x6f, 0x0d, 0xa8, 0x68, 0xc8, 0x75, 0x09, 0x02, 0xb2, 0x65, + 0x31, 0x2f, 0xaa, 0xc7, 0xed, 0xe5, 0x91, 0xb6, 0x1a, 0x24, 0xf9, 0x52, 0x9c, 0x33, 0x02, 0xf1, + 0xdd, 0x81, 0x3e, 0x49, 0xf4, 0x24, 0xa7, 0xf7, 0xe0, 0x07, 0x31, 0xd9, 0x9b, 0xb0, 0xb8, 0x41, + 0x5b, 0x8e, 0xe6, 0x45, 0x35, 0x59, 0x3c, 0xab, 0x77, 0xc6, 0xeb, 0x9b, 0x41, 0xff, 0xda, 0xcb, + 0xed, 0x93, 0x70, 0x1c, 0xca, 0xd7, 0x62, 0xea, 0x11, 0xb9, 0x0b, 0xc8, 0x1b, 0x03, 0xd8, 0x29, + 0x00, 0x5a, 0xbb, 0x58, 0x3e, 0x48, 0xaf, 0xc9, 0x5e, 0xbb, 0xc9, 0xd2, 0x32, 0x2b, 0x72, 0x21, + 0xae, 0x2c, 0x81, 0xb2, 0xff, 0x94, 0x9c, 0xe6, 0x71, 0x92, 0xf8, 0x57, 0xcd, 0x17, 0xf1, 0x34, + 0xbd, 0xc2, 0x1e, 0xba, 0x0d, 0x72, 0x30, 0xe4, 0x42, 0x79, 0x36, 0x2f, 0xaa, 0xf1, 0xe2, 0xf9, + 0xc1, 0x68, 0xeb, 0x61, 0x37, 0xd7, 0xb7, 0x01, 0x6a, 0xcf, 0xfb, 0xba, 0xd6, 0xc3, 0x2e, 0x21, + 0x5b, 0x91, 0x52, 0x9d, 0x8a, 0x91, 0xcd, 0xed, 0x3a, 0x62, 0x28, 0x1f, 0xce, 0x47, 0xd5, 0x78, + 0xf1, 0xea, 0xd0, 0xe8, 0xe8, 0xe7, 0xd7, 0x2b, 0x44, 0x5e, 0xee, 0xd9, 0xcf, 0x2e, 0xf2, 0xb6, + 0x9d, 0xf8, 0x3f, 0x92, 0xb3, 0xa5, 0xb8, 0xfc, 0x0f, 0x26, 0x2f, 0xc4, 0xe8, 0x07, 0x6e, 0x87, + 0x35, 0xf5, 0x9f, 0x72, 0x2a, 0x4e, 0x37, 0xca, 0xae, 0x71, 0x58, 0x46, 0x0e, 0xde, 0x9f, 0xbc, + 0x2b, 0xae, 0xad, 0xb8, 0x32, 0x94, 0x1d, 0xf4, 0x47, 0x54, 0x1b, 0x17, 0x91, 0x9d, 0xb2, 0xd7, + 0x17, 0x47, 0x66, 0xd2, 0x74, 0xab, 0xe2, 0xfb, 0x47, 0x4d, 0xa4, 0x2d, 0xd6, 0x9a, 0xac, 0x72, + 0xba, 0x26, 0xd6, 0x4d, 0x3a, 0x2e, 0x60, 0xbc, 0x43, 0x17, 0x8d, 0xb2, 0x21, 0x9d, 0x62, 0xb3, + 0xeb, 0xd2, 0xa4, 0x2b, 0x48, 0x50, 0xa7, 0xc1, 0xdf, 0x9e, 0xa5, 0xf8, 0xed, 0xef, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9b, 0x8c, 0xe4, 0x6a, 0xba, 0x02, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go new file mode 100644 index 0000000000..4d1fc4c4d8 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -0,0 +1,1105 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/handshaker.proto + +package grpc_gcp + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type HandshakeProtocol int32 + +const ( + // Default value. + HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 + // TLS handshake protocol. + HandshakeProtocol_TLS HandshakeProtocol = 1 + // Application Layer Transport Security handshake protocol. + HandshakeProtocol_ALTS HandshakeProtocol = 2 +) + +var HandshakeProtocol_name = map[int32]string{ + 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", + 1: "TLS", + 2: "ALTS", +} + +var HandshakeProtocol_value = map[string]int32{ + "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, + "TLS": 1, + "ALTS": 2, +} + +func (x HandshakeProtocol) String() string { + return proto.EnumName(HandshakeProtocol_name, int32(x)) +} + +func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{0} +} + +type NetworkProtocol int32 + +const ( + NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 + NetworkProtocol_TCP NetworkProtocol = 1 + NetworkProtocol_UDP NetworkProtocol = 2 +) + +var NetworkProtocol_name = map[int32]string{ + 0: "NETWORK_PROTOCOL_UNSPECIFIED", + 1: "TCP", + 2: "UDP", +} + +var NetworkProtocol_value = map[string]int32{ + "NETWORK_PROTOCOL_UNSPECIFIED": 0, + "TCP": 1, + "UDP": 2, +} + +func (x NetworkProtocol) String() string { + return proto.EnumName(NetworkProtocol_name, int32(x)) +} + +func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{1} +} + +type Endpoint struct { + // IP address. It should contain an IPv4 or IPv6 string literal, e.g. + // "192.168.0.1" or "2001:db8::1". + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Port number. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // Network protocol (e.g., TCP, UDP) associated with this endpoint. + Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{0} +} + +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Endpoint.Unmarshal(m, b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return xxx_messageInfo_Endpoint.Size(m) +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *Endpoint) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *Endpoint) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Endpoint) GetProtocol() NetworkProtocol { + if m != nil { + return m.Protocol + } + return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +} + +type Identity struct { + // Types that are valid to be assigned to IdentityOneof: + // *Identity_ServiceAccount + // *Identity_Hostname + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional attributes of the identity. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{1} +} + +func (m *Identity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Identity.Unmarshal(m, b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Identity.Marshal(b, m, deterministic) +} +func (m *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(m, src) +} +func (m *Identity) XXX_Size() int { + return xxx_messageInfo_Identity.Size(m) +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_ServiceAccount struct { + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` +} + +type Identity_Hostname struct { + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (m *Identity) GetServiceAccount() string { + if x, ok := m.GetIdentityOneof().(*Identity_ServiceAccount); ok { + return x.ServiceAccount + } + return "" +} + +func (m *Identity) GetHostname() string { + if x, ok := m.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (m *Identity) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Identity) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Identity_ServiceAccount)(nil), + (*Identity_Hostname)(nil), + } +} + +type StartClientHandshakeReq struct { + // Handshake security protocol requested by the client. + HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` + // The application protocols supported by the client, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The record protocols supported by the client, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, handshake will fail. + TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, the + // handshaker chooses a default local identity. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // (Optional) Local endpoint information of the connection to the server, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote server, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) If target name is provided, a secure naming check is performed + // to verify that the peer authenticated identity is indeed authorized to run + // the target name. + TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + // (Optional) RPC protocol versions supported by the client. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the client. + MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClientHandshakeReq) Reset() { *m = StartClientHandshakeReq{} } +func (m *StartClientHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartClientHandshakeReq) ProtoMessage() {} +func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{2} +} + +func (m *StartClientHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClientHandshakeReq.Unmarshal(m, b) +} +func (m *StartClientHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClientHandshakeReq.Marshal(b, m, deterministic) +} +func (m *StartClientHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClientHandshakeReq.Merge(m, src) +} +func (m *StartClientHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartClientHandshakeReq.Size(m) +} +func (m *StartClientHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartClientHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClientHandshakeReq proto.InternalMessageInfo + +func (m *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { + if m != nil { + return m.HandshakeSecurityProtocol + } + return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +} + +func (m *StartClientHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetIdentities() []*Identity { + if m != nil { + return m.TargetIdentities + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetName() string { + if m != nil { + return m.TargetName + } + return "" +} + +func (m *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +func (m *StartClientHandshakeReq) GetMaxFrameSize() uint32 { + if m != nil { + return m.MaxFrameSize + } + return 0 +} + +type ServerHandshakeParameters struct { + // The record protocols supported by the server, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, the handshaker chooses a default local identity. + LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHandshakeParameters) Reset() { *m = ServerHandshakeParameters{} } +func (m *ServerHandshakeParameters) String() string { return proto.CompactTextString(m) } +func (*ServerHandshakeParameters) ProtoMessage() {} +func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{3} +} + +func (m *ServerHandshakeParameters) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHandshakeParameters.Unmarshal(m, b) +} +func (m *ServerHandshakeParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHandshakeParameters.Marshal(b, m, deterministic) +} +func (m *ServerHandshakeParameters) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHandshakeParameters.Merge(m, src) +} +func (m *ServerHandshakeParameters) XXX_Size() int { + return xxx_messageInfo_ServerHandshakeParameters.Size(m) +} +func (m *ServerHandshakeParameters) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHandshakeParameters.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHandshakeParameters proto.InternalMessageInfo + +func (m *ServerHandshakeParameters) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *ServerHandshakeParameters) GetLocalIdentities() []*Identity { + if m != nil { + return m.LocalIdentities + } + return nil +} + +type StartServerHandshakeReq struct { + // The application protocols supported by the server, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // Handshake parameters (record protocols and local identities supported by + // the server) mapped by the handshake protocol. Each handshake security + // protocol (e.g., TLS or ALTS) has its own set of record protocols and local + // identities. Since protobuf does not support enum as key to the map, the key + // to handshake_parameters is the integer value of HandshakeProtocol enum. + HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple HandshakReq messages. + InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // (Optional) Local endpoint information of the connection to the client, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote client, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) RPC protocol versions supported by the server. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the server. + MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartServerHandshakeReq) Reset() { *m = StartServerHandshakeReq{} } +func (m *StartServerHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartServerHandshakeReq) ProtoMessage() {} +func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{4} +} + +func (m *StartServerHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartServerHandshakeReq.Unmarshal(m, b) +} +func (m *StartServerHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartServerHandshakeReq.Marshal(b, m, deterministic) +} +func (m *StartServerHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartServerHandshakeReq.Merge(m, src) +} +func (m *StartServerHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartServerHandshakeReq.Size(m) +} +func (m *StartServerHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartServerHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartServerHandshakeReq proto.InternalMessageInfo + +func (m *StartServerHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { + if m != nil { + return m.HandshakeParameters + } + return nil +} + +func (m *StartServerHandshakeReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +func (m *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +func (m *StartServerHandshakeReq) GetMaxFrameSize() uint32 { + if m != nil { + return m.MaxFrameSize + } + return 0 +} + +type NextHandshakeMessageReq struct { + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple NextHandshakerMessageReq + // messages. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NextHandshakeMessageReq) Reset() { *m = NextHandshakeMessageReq{} } +func (m *NextHandshakeMessageReq) String() string { return proto.CompactTextString(m) } +func (*NextHandshakeMessageReq) ProtoMessage() {} +func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{5} +} + +func (m *NextHandshakeMessageReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NextHandshakeMessageReq.Unmarshal(m, b) +} +func (m *NextHandshakeMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NextHandshakeMessageReq.Marshal(b, m, deterministic) +} +func (m *NextHandshakeMessageReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextHandshakeMessageReq.Merge(m, src) +} +func (m *NextHandshakeMessageReq) XXX_Size() int { + return xxx_messageInfo_NextHandshakeMessageReq.Size(m) +} +func (m *NextHandshakeMessageReq) XXX_DiscardUnknown() { + xxx_messageInfo_NextHandshakeMessageReq.DiscardUnknown(m) +} + +var xxx_messageInfo_NextHandshakeMessageReq proto.InternalMessageInfo + +func (m *NextHandshakeMessageReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +type HandshakerReq struct { + // Types that are valid to be assigned to ReqOneof: + // *HandshakerReq_ClientStart + // *HandshakerReq_ServerStart + // *HandshakerReq_Next + ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerReq) Reset() { *m = HandshakerReq{} } +func (m *HandshakerReq) String() string { return proto.CompactTextString(m) } +func (*HandshakerReq) ProtoMessage() {} +func (*HandshakerReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{6} +} + +func (m *HandshakerReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerReq.Unmarshal(m, b) +} +func (m *HandshakerReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerReq.Marshal(b, m, deterministic) +} +func (m *HandshakerReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerReq.Merge(m, src) +} +func (m *HandshakerReq) XXX_Size() int { + return xxx_messageInfo_HandshakerReq.Size(m) +} +func (m *HandshakerReq) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerReq.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerReq proto.InternalMessageInfo + +type isHandshakerReq_ReqOneof interface { + isHandshakerReq_ReqOneof() +} + +type HandshakerReq_ClientStart struct { + ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type HandshakerReq_ServerStart struct { + ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type HandshakerReq_Next struct { + Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} + +func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (m *HandshakerReq) GetClientStart() *StartClientHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (m *HandshakerReq) GetServerStart() *StartServerHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (m *HandshakerReq) GetNext() *NextHandshakeMessageReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_Next); ok { + return x.Next + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HandshakerReq) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HandshakerReq_ClientStart)(nil), + (*HandshakerReq_ServerStart)(nil), + (*HandshakerReq_Next)(nil), + } +} + +type HandshakerResult struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // Cryptographic key data. The key data may be more than the key length + // required for the record protocol, thus the client of the handshaker + // service needs to truncate the key data into the right key length. + KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used in the handshake. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // Indicate whether the handshaker service client should keep the channel + // between the handshaker service open, e.g., in order to handle + // post-handshake messages in the future. + KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // The maximum frame size of the peer. + MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResult) Reset() { *m = HandshakerResult{} } +func (m *HandshakerResult) String() string { return proto.CompactTextString(m) } +func (*HandshakerResult) ProtoMessage() {} +func (*HandshakerResult) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{7} +} + +func (m *HandshakerResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResult.Unmarshal(m, b) +} +func (m *HandshakerResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResult.Marshal(b, m, deterministic) +} +func (m *HandshakerResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResult.Merge(m, src) +} +func (m *HandshakerResult) XXX_Size() int { + return xxx_messageInfo_HandshakerResult.Size(m) +} +func (m *HandshakerResult) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResult.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResult proto.InternalMessageInfo + +func (m *HandshakerResult) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *HandshakerResult) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *HandshakerResult) GetKeyData() []byte { + if m != nil { + return m.KeyData + } + return nil +} + +func (m *HandshakerResult) GetPeerIdentity() *Identity { + if m != nil { + return m.PeerIdentity + } + return nil +} + +func (m *HandshakerResult) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *HandshakerResult) GetKeepChannelOpen() bool { + if m != nil { + return m.KeepChannelOpen + } + return false +} + +func (m *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +func (m *HandshakerResult) GetMaxFrameSize() uint32 { + if m != nil { + return m.MaxFrameSize + } + return 0 +} + +type HandshakerStatus struct { + // The status code. This could be the gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerStatus) Reset() { *m = HandshakerStatus{} } +func (m *HandshakerStatus) String() string { return proto.CompactTextString(m) } +func (*HandshakerStatus) ProtoMessage() {} +func (*HandshakerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{8} +} + +func (m *HandshakerStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerStatus.Unmarshal(m, b) +} +func (m *HandshakerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerStatus.Marshal(b, m, deterministic) +} +func (m *HandshakerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerStatus.Merge(m, src) +} +func (m *HandshakerStatus) XXX_Size() int { + return xxx_messageInfo_HandshakerStatus.Size(m) +} +func (m *HandshakerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerStatus proto.InternalMessageInfo + +func (m *HandshakerStatus) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *HandshakerStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +type HandshakerResp struct { + // Frames to be given to the peer for the NextHandshakeMessageReq. May be + // empty if no out_frames have to be sent to the peer or if in_bytes in the + // HandshakerReq are incomplete. All the non-empty out frames must be sent to + // the peer even if the handshaker status is not OK as these frames may + // contain the alert frames. + OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes consumed by the handshaker. It is possible + // that part of in_bytes in HandshakerReq was unrelated to the handshake + // process. + BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set iff the handshake was successful. out_frames may still be set + // to frames that needs to be forwarded to the peer. + Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + // Status of the handshaker. + Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResp) Reset() { *m = HandshakerResp{} } +func (m *HandshakerResp) String() string { return proto.CompactTextString(m) } +func (*HandshakerResp) ProtoMessage() {} +func (*HandshakerResp) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{9} +} + +func (m *HandshakerResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResp.Unmarshal(m, b) +} +func (m *HandshakerResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResp.Marshal(b, m, deterministic) +} +func (m *HandshakerResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResp.Merge(m, src) +} +func (m *HandshakerResp) XXX_Size() int { + return xxx_messageInfo_HandshakerResp.Size(m) +} +func (m *HandshakerResp) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResp.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResp proto.InternalMessageInfo + +func (m *HandshakerResp) GetOutFrames() []byte { + if m != nil { + return m.OutFrames + } + return nil +} + +func (m *HandshakerResp) GetBytesConsumed() uint32 { + if m != nil { + return m.BytesConsumed + } + return 0 +} + +func (m *HandshakerResp) GetResult() *HandshakerResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *HandshakerResp) GetStatus() *HandshakerStatus { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterEnum("grpc.gcp.HandshakeProtocol", HandshakeProtocol_name, HandshakeProtocol_value) + proto.RegisterEnum("grpc.gcp.NetworkProtocol", NetworkProtocol_name, NetworkProtocol_value) + proto.RegisterType((*Endpoint)(nil), "grpc.gcp.Endpoint") + proto.RegisterType((*Identity)(nil), "grpc.gcp.Identity") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.Identity.AttributesEntry") + proto.RegisterType((*StartClientHandshakeReq)(nil), "grpc.gcp.StartClientHandshakeReq") + proto.RegisterType((*ServerHandshakeParameters)(nil), "grpc.gcp.ServerHandshakeParameters") + proto.RegisterType((*StartServerHandshakeReq)(nil), "grpc.gcp.StartServerHandshakeReq") + proto.RegisterMapType((map[int32]*ServerHandshakeParameters)(nil), "grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry") + proto.RegisterType((*NextHandshakeMessageReq)(nil), "grpc.gcp.NextHandshakeMessageReq") + proto.RegisterType((*HandshakerReq)(nil), "grpc.gcp.HandshakerReq") + proto.RegisterType((*HandshakerResult)(nil), "grpc.gcp.HandshakerResult") + proto.RegisterType((*HandshakerStatus)(nil), "grpc.gcp.HandshakerStatus") + proto.RegisterType((*HandshakerResp)(nil), "grpc.gcp.HandshakerResp") +} + +func init() { proto.RegisterFile("grpc/gcp/handshaker.proto", fileDescriptor_54c074f40c7c7e99) } + +var fileDescriptor_54c074f40c7c7e99 = []byte{ + // 1203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xce, 0xda, 0x4e, 0xe2, 0x1c, 0xc7, 0x3f, 0x99, 0xa6, 0xea, 0x26, 0x6d, 0xc1, 0x18, 0x10, + 0x6e, 0x2f, 0x6c, 0x70, 0x41, 0xa5, 0x45, 0x55, 0x6b, 0x3b, 0x8e, 0x1c, 0x5a, 0x1c, 0x6b, 0x9d, + 0x82, 0x44, 0x2f, 0x56, 0xd3, 0xf5, 0xd4, 0x59, 0x79, 0x3d, 0xb3, 0x9d, 0x19, 0x87, 0xb8, 0xf7, + 0xbc, 0x04, 0xf7, 0xbc, 0x06, 0x2f, 0xc1, 0x33, 0x20, 0xf1, 0x18, 0x68, 0x67, 0x7f, 0x6d, 0xaf, + 0xab, 0x22, 0xb8, 0xdb, 0x39, 0xf3, 0x7d, 0x67, 0xce, 0x9c, 0xf3, 0x9d, 0xb3, 0x03, 0x47, 0x13, + 0xee, 0x5a, 0xcd, 0x89, 0xe5, 0x36, 0x2f, 0x31, 0x1d, 0x8b, 0x4b, 0x3c, 0x25, 0xbc, 0xe1, 0x72, + 0x26, 0x19, 0xca, 0x7b, 0x5b, 0x8d, 0x89, 0xe5, 0x1e, 0xd7, 0x23, 0x90, 0xe4, 0x98, 0x0a, 0x97, + 0x71, 0x69, 0x0a, 0x62, 0xcd, 0xb9, 0x2d, 0x17, 0xa6, 0xc5, 0x66, 0x33, 0x46, 0x7d, 0x4e, 0x4d, + 0x42, 0xbe, 0x47, 0xc7, 0x2e, 0xb3, 0xa9, 0x44, 0x77, 0x01, 0x6c, 0xd7, 0xc4, 0xe3, 0x31, 0x27, + 0x42, 0xe8, 0x5a, 0x55, 0xab, 0xef, 0x19, 0x7b, 0xb6, 0xdb, 0xf6, 0x0d, 0x08, 0x41, 0xce, 0x73, + 0xa4, 0x67, 0xaa, 0x5a, 0x7d, 0xdb, 0x50, 0xdf, 0xe8, 0x1b, 0xc8, 0x2b, 0x3f, 0x16, 0x73, 0xf4, + 0x6c, 0x55, 0xab, 0x97, 0x5a, 0x47, 0x8d, 0x30, 0x8a, 0xc6, 0x80, 0xc8, 0x5f, 0x18, 0x9f, 0x0e, + 0x03, 0x80, 0x11, 0x41, 0x6b, 0x7f, 0x6b, 0x90, 0x3f, 0x1b, 0x13, 0x2a, 0x6d, 0xb9, 0x40, 0xf7, + 0xa0, 0x2c, 0x08, 0xbf, 0xb2, 0x2d, 0x62, 0x62, 0xcb, 0x62, 0x73, 0x2a, 0xfd, 0xb3, 0xfb, 0x5b, + 0x46, 0x29, 0xd8, 0x68, 0xfb, 0x76, 0x74, 0x07, 0xf2, 0x97, 0x4c, 0x48, 0x8a, 0x67, 0x44, 0x85, + 0xe1, 0x61, 0x22, 0x0b, 0xea, 0x00, 0x60, 0x29, 0xb9, 0xfd, 0x7a, 0x2e, 0x89, 0xd0, 0xb3, 0xd5, + 0x6c, 0xbd, 0xd0, 0xaa, 0xc5, 0xe1, 0x84, 0x07, 0x36, 0xda, 0x11, 0xa8, 0x47, 0x25, 0x5f, 0x18, + 0x09, 0xd6, 0xf1, 0x13, 0x28, 0xaf, 0x6c, 0xa3, 0x0a, 0x64, 0xa7, 0x64, 0x11, 0xe4, 0xc3, 0xfb, + 0x44, 0x87, 0xb0, 0x7d, 0x85, 0x9d, 0x79, 0x10, 0x83, 0xe1, 0x2f, 0x1e, 0x67, 0xbe, 0xd5, 0x3a, + 0x15, 0x28, 0xd9, 0xc1, 0x31, 0x26, 0xa3, 0x84, 0xbd, 0xa9, 0xfd, 0x99, 0x83, 0x5b, 0x23, 0x89, + 0xb9, 0xec, 0x3a, 0x36, 0xa1, 0xb2, 0x1f, 0x16, 0xcd, 0x20, 0x6f, 0xd1, 0x2b, 0xb8, 0x1d, 0x15, + 0x31, 0xae, 0x4f, 0x94, 0x50, 0x4d, 0x25, 0xf4, 0x76, 0x7c, 0x83, 0x88, 0x1c, 0xa5, 0xf4, 0x28, + 0xe2, 0x8f, 0x02, 0x7a, 0xb8, 0x85, 0x1e, 0xc0, 0x4d, 0xec, 0xba, 0x8e, 0x6d, 0x61, 0x69, 0x33, + 0x1a, 0x79, 0x15, 0x7a, 0xa6, 0x9a, 0xad, 0xef, 0x19, 0x87, 0x89, 0xcd, 0x90, 0x23, 0xd0, 0x3d, + 0xa8, 0x70, 0x62, 0x31, 0x3e, 0x4e, 0xe0, 0xb3, 0x0a, 0x5f, 0xf6, 0xed, 0x31, 0xf4, 0x29, 0x1c, + 0x48, 0xcc, 0x27, 0x44, 0x9a, 0xc1, 0x8d, 0x6d, 0x22, 0xf4, 0x9c, 0x4a, 0x3a, 0x5a, 0x4f, 0xba, + 0x51, 0xf1, 0xc1, 0x67, 0x11, 0x16, 0x3d, 0x82, 0x92, 0xc3, 0x2c, 0xec, 0x84, 0xfc, 0x85, 0xbe, + 0x5d, 0xd5, 0x36, 0xb0, 0x8b, 0x0a, 0x19, 0x49, 0x26, 0xa2, 0x92, 0x40, 0xbb, 0xfa, 0xce, 0x2a, + 0x35, 0x54, 0x75, 0x40, 0x8d, 0x44, 0xfe, 0x1d, 0x94, 0x39, 0x99, 0x31, 0x49, 0x62, 0xee, 0xee, + 0x46, 0x6e, 0xc9, 0x87, 0x46, 0xe4, 0x8f, 0xa1, 0x10, 0xdc, 0x59, 0x49, 0x30, 0xaf, 0xca, 0x0f, + 0xbe, 0x69, 0xe0, 0x49, 0xf0, 0x19, 0xec, 0x73, 0xd7, 0x32, 0xaf, 0x08, 0x17, 0x36, 0xa3, 0x42, + 0xdf, 0x53, 0xae, 0xef, 0xc6, 0xae, 0x0d, 0xd7, 0x0a, 0x53, 0xf8, 0x63, 0x00, 0x32, 0x0a, 0xdc, + 0xb5, 0xc2, 0x05, 0xfa, 0x0c, 0x4a, 0x33, 0x7c, 0x6d, 0xbe, 0xe1, 0x78, 0x46, 0x4c, 0x61, 0xbf, + 0x23, 0x3a, 0x54, 0xb5, 0x7a, 0xd1, 0xd8, 0x9f, 0xe1, 0xeb, 0x53, 0xcf, 0x38, 0xb2, 0xdf, 0x91, + 0xda, 0xaf, 0x1a, 0x1c, 0x8d, 0x08, 0xbf, 0x22, 0x3c, 0xd6, 0x04, 0xf6, 0x76, 0x25, 0xe1, 0xe9, + 0x55, 0xd4, 0xd2, 0xab, 0xf8, 0x04, 0x2a, 0x4b, 0x45, 0xf0, 0x8a, 0x98, 0xd9, 0x58, 0xc4, 0x72, + 0xb2, 0x0c, 0x36, 0x11, 0xb5, 0xdf, 0x43, 0x75, 0xaf, 0x04, 0xe3, 0xa9, 0x7b, 0xa3, 0x00, 0xb5, + 0xf7, 0x08, 0x70, 0x06, 0x87, 0x71, 0x4b, 0xb8, 0xd1, 0x95, 0x82, 0x98, 0x1e, 0xc7, 0x31, 0x6d, + 0x38, 0xb5, 0x91, 0x92, 0x0f, 0xbf, 0xcb, 0x6f, 0x5c, 0xa6, 0x64, 0xea, 0x08, 0xf2, 0x36, 0x35, + 0x5f, 0x2f, 0xfc, 0x81, 0xa1, 0xd5, 0xf7, 0x8d, 0x5d, 0x9b, 0x76, 0xbc, 0x65, 0x8a, 0xc6, 0x72, + 0xff, 0x41, 0x63, 0xdb, 0x1f, 0xac, 0xb1, 0x55, 0x09, 0xed, 0xfc, 0x0f, 0x12, 0xda, 0x5d, 0x97, + 0xd0, 0xf1, 0x14, 0xf4, 0x4d, 0xb9, 0x4a, 0x8e, 0xbc, 0x6d, 0x7f, 0xe4, 0x3d, 0x4a, 0x8e, 0xbc, + 0x42, 0xeb, 0xd3, 0x44, 0x21, 0x36, 0xc9, 0x30, 0x31, 0x17, 0x6b, 0x5f, 0xc3, 0xad, 0x01, 0xb9, + 0x8e, 0xa7, 0xdf, 0x0f, 0x44, 0x08, 0x3c, 0x51, 0x32, 0x49, 0x96, 0x40, 0x5b, 0x2a, 0x41, 0xed, + 0x2f, 0x0d, 0x8a, 0x11, 0x85, 0x7b, 0xe0, 0x53, 0xd8, 0xb7, 0xd4, 0x1c, 0x35, 0x85, 0x57, 0x7f, + 0x45, 0x28, 0xb4, 0x3e, 0x59, 0x91, 0xc5, 0xfa, 0xa8, 0xed, 0x6f, 0x19, 0x05, 0x9f, 0xa8, 0x00, + 0x9e, 0x1f, 0xa1, 0xe2, 0x0e, 0xfc, 0x64, 0x52, 0xfd, 0xac, 0xcb, 0xcb, 0xf3, 0xe3, 0x13, 0x7d, + 0x3f, 0x0f, 0x21, 0x47, 0xc9, 0xb5, 0x54, 0xda, 0x59, 0xe2, 0x6f, 0xb8, 0x6d, 0x7f, 0xcb, 0x50, + 0x84, 0x4e, 0x01, 0xf6, 0x38, 0x79, 0x1b, 0xfc, 0x23, 0x7e, 0xcb, 0x42, 0x25, 0x79, 0x4f, 0x31, + 0x77, 0x24, 0xfa, 0x0a, 0x0e, 0xd3, 0xda, 0x27, 0xf8, 0x0f, 0xdd, 0x48, 0xe9, 0x1e, 0xf4, 0x05, + 0x94, 0x57, 0xfa, 0x3e, 0xf8, 0x43, 0x95, 0x96, 0xdb, 0xde, 0xcb, 0xf9, 0x94, 0x2c, 0xcc, 0x31, + 0x96, 0x38, 0x94, 0xfd, 0x94, 0x2c, 0x4e, 0xb0, 0xc4, 0xe8, 0x21, 0x14, 0x5d, 0x42, 0x78, 0x3c, + 0x94, 0x73, 0x1b, 0x87, 0xf2, 0xbe, 0x07, 0x5c, 0x9f, 0xc9, 0xff, 0x7e, 0x9c, 0xdf, 0x87, 0x83, + 0x29, 0x21, 0xae, 0x69, 0x5d, 0x62, 0x4a, 0x89, 0x63, 0x32, 0x97, 0x50, 0xa5, 0xfb, 0xbc, 0x51, + 0xf6, 0x36, 0xba, 0xbe, 0xfd, 0xdc, 0x25, 0x14, 0x9d, 0xc1, 0x81, 0x8a, 0x6f, 0xa9, 0x47, 0x76, + 0x3f, 0xa4, 0x47, 0xca, 0x1e, 0xcf, 0x78, 0x6f, 0x9f, 0xe4, 0x53, 0x46, 0xed, 0xb3, 0x64, 0x6d, + 0x46, 0x12, 0xcb, 0xb9, 0x7a, 0x0a, 0x59, 0x6c, 0x4c, 0x54, 0x2d, 0x8a, 0x86, 0xfa, 0x46, 0x3a, + 0xec, 0x8e, 0x89, 0xc4, 0xb6, 0xfa, 0xc3, 0x7a, 0x49, 0x0f, 0x97, 0xb5, 0x3f, 0x34, 0x28, 0x2d, + 0x95, 0xd7, 0xf5, 0x9e, 0x5a, 0x6c, 0x2e, 0xfd, 0xa3, 0x43, 0xd9, 0xef, 0xb1, 0xb9, 0x54, 0xc7, + 0x0a, 0xf4, 0x39, 0x94, 0x54, 0x43, 0x98, 0x16, 0xa3, 0x62, 0x3e, 0x23, 0x63, 0xe5, 0xb2, 0x68, + 0x14, 0x95, 0xb5, 0x1b, 0x18, 0x51, 0x0b, 0x76, 0xb8, 0x12, 0x4b, 0xa0, 0xbf, 0xe3, 0x94, 0xa7, + 0x42, 0x20, 0x27, 0x23, 0x40, 0x7a, 0x1c, 0xa1, 0x2e, 0x11, 0x14, 0x36, 0x95, 0xe3, 0x5f, 0xd3, + 0x08, 0x90, 0xf7, 0xbf, 0x87, 0x83, 0xb5, 0xa7, 0x07, 0xaa, 0xc1, 0x47, 0xfd, 0xf6, 0xe0, 0x64, + 0xd4, 0x6f, 0x3f, 0xef, 0x99, 0x43, 0xe3, 0xfc, 0xe2, 0xbc, 0x7b, 0xfe, 0xc2, 0x7c, 0x39, 0x18, + 0x0d, 0x7b, 0xdd, 0xb3, 0xd3, 0xb3, 0xde, 0x49, 0x65, 0x0b, 0xed, 0x42, 0xf6, 0xe2, 0xc5, 0xa8, + 0xa2, 0xa1, 0x3c, 0xe4, 0xda, 0x2f, 0x2e, 0x46, 0x95, 0xcc, 0xfd, 0x1e, 0x94, 0x57, 0xde, 0x85, + 0xa8, 0x0a, 0x77, 0x06, 0xbd, 0x8b, 0x9f, 0xce, 0x8d, 0xe7, 0xef, 0xf3, 0xd3, 0x1d, 0x56, 0x34, + 0xef, 0xe3, 0xe5, 0xc9, 0xb0, 0x92, 0x69, 0xbd, 0x4a, 0x84, 0xc4, 0x47, 0xfe, 0x2b, 0x11, 0x9d, + 0x42, 0xe1, 0x84, 0x45, 0x66, 0x74, 0x2b, 0x3d, 0x1d, 0x6f, 0x8f, 0xf5, 0x0d, 0x79, 0x72, 0x6b, + 0x5b, 0x75, 0xed, 0x4b, 0xad, 0x33, 0x85, 0x9b, 0x36, 0xf3, 0x31, 0xd8, 0x91, 0xa2, 0x61, 0x53, + 0x49, 0x38, 0xc5, 0x4e, 0xa7, 0x1c, 0xc3, 0x55, 0xf4, 0x43, 0xed, 0xe7, 0xa7, 0x13, 0xc6, 0x26, + 0x0e, 0x69, 0x4c, 0x98, 0x83, 0xe9, 0xa4, 0xc1, 0xf8, 0xa4, 0xa9, 0x1e, 0xdf, 0x16, 0x27, 0x4a, + 0xde, 0xd8, 0x11, 0x4d, 0xcf, 0x49, 0x33, 0x74, 0xd2, 0x54, 0xbd, 0xa9, 0x40, 0xe6, 0xc4, 0x72, + 0x5f, 0xef, 0xa8, 0xf5, 0x83, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xf9, 0x9d, 0xf2, 0xd9, + 0x0b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) +} + +type handshakerServiceClient struct { + cc *grpc.ClientConn +} + +func NewHandshakerServiceClient(cc *grpc.ClientConn) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { + stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + if err != nil { + return nil, err + } + x := &handshakerServiceDoHandshakeClient{stream} + return x, nil +} + +type HandshakerService_DoHandshakeClient interface { + Send(*HandshakerReq) error + Recv() (*HandshakerResp, error) + grpc.ClientStream +} + +type handshakerServiceDoHandshakeClient struct { + grpc.ClientStream +} + +func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { + m := new(HandshakerResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerServiceServer is the server API for HandshakerService service. +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(HandshakerService_DoHandshakeServer) error +} + +// UnimplementedHandshakerServiceServer can be embedded to have forward compatible implementations. +type UnimplementedHandshakerServiceServer struct { +} + +func (*UnimplementedHandshakerServiceServer) DoHandshake(srv HandshakerService_DoHandshakeServer) error { + return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") +} + +func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { + s.RegisterService(&_HandshakerService_serviceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) +} + +type HandshakerService_DoHandshakeServer interface { + Send(*HandshakerResp) error + Recv() (*HandshakerReq, error) + grpc.ServerStream +} + +type handshakerServiceDoHandshakeServer struct { + grpc.ServerStream +} + +func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { + m := new(HandshakerReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _HandshakerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go new file mode 100644 index 0000000000..992805165d --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -0,0 +1,184 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/transport_security_common.proto + +package grpc_gcp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The security level of the created channel. The list is sorted in increasing +// level of security. This order must always be maintained. +type SecurityLevel int32 + +const ( + SecurityLevel_SECURITY_NONE SecurityLevel = 0 + SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 + SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 +) + +var SecurityLevel_name = map[int32]string{ + 0: "SECURITY_NONE", + 1: "INTEGRITY_ONLY", + 2: "INTEGRITY_AND_PRIVACY", +} + +var SecurityLevel_value = map[string]int32{ + "SECURITY_NONE": 0, + "INTEGRITY_ONLY": 1, + "INTEGRITY_AND_PRIVACY": 2, +} + +func (x SecurityLevel) String() string { + return proto.EnumName(SecurityLevel_name, int32(x)) +} + +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b97e31e3cc23582a, []int{0} +} + +// Max and min supported RPC protocol versions. +type RpcProtocolVersions struct { + // Maximum supported RPC version. + MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` + // Minimum supported RPC version. + MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions) Reset() { *m = RpcProtocolVersions{} } +func (m *RpcProtocolVersions) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions) ProtoMessage() {} +func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_b97e31e3cc23582a, []int{0} +} + +func (m *RpcProtocolVersions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions.Unmarshal(m, b) +} +func (m *RpcProtocolVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions.Marshal(b, m, deterministic) +} +func (m *RpcProtocolVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions.Merge(m, src) +} +func (m *RpcProtocolVersions) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions.Size(m) +} +func (m *RpcProtocolVersions) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions proto.InternalMessageInfo + +func (m *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MaxRpcVersion + } + return nil +} + +func (m *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MinRpcVersion + } + return nil +} + +// RPC version contains a major version and a minor version. +type RpcProtocolVersions_Version struct { + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions_Version) Reset() { *m = RpcProtocolVersions_Version{} } +func (m *RpcProtocolVersions_Version) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions_Version) ProtoMessage() {} +func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { + return fileDescriptor_b97e31e3cc23582a, []int{0, 0} +} + +func (m *RpcProtocolVersions_Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions_Version.Unmarshal(m, b) +} +func (m *RpcProtocolVersions_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions_Version.Marshal(b, m, deterministic) +} +func (m *RpcProtocolVersions_Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions_Version.Merge(m, src) +} +func (m *RpcProtocolVersions_Version) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions_Version.Size(m) +} +func (m *RpcProtocolVersions_Version) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions_Version proto.InternalMessageInfo + +func (m *RpcProtocolVersions_Version) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *RpcProtocolVersions_Version) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func init() { + proto.RegisterEnum("grpc.gcp.SecurityLevel", SecurityLevel_name, SecurityLevel_value) + proto.RegisterType((*RpcProtocolVersions)(nil), "grpc.gcp.RpcProtocolVersions") + proto.RegisterType((*RpcProtocolVersions_Version)(nil), "grpc.gcp.RpcProtocolVersions.Version") +} + +func init() { + proto.RegisterFile("grpc/gcp/transport_security_common.proto", fileDescriptor_b97e31e3cc23582a) +} + +var fileDescriptor_b97e31e3cc23582a = []byte{ + // 323 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x41, 0x4b, 0x3b, 0x31, + 0x10, 0xc5, 0xff, 0x5b, 0xf8, 0xab, 0x44, 0x56, 0xeb, 0x6a, 0x41, 0xc5, 0x83, 0x08, 0x42, 0xf1, + 0x90, 0x05, 0xc5, 0xb3, 0xb4, 0xb5, 0x48, 0xa1, 0x6e, 0xeb, 0xb6, 0x16, 0xea, 0x25, 0xc4, 0x18, + 0x42, 0x24, 0x9b, 0x09, 0xb3, 0xb1, 0xd4, 0xaf, 0xec, 0xa7, 0x90, 0x4d, 0xbb, 0x14, 0xc1, 0x8b, + 0xb7, 0xbc, 0xc7, 0xcc, 0x6f, 0x32, 0xf3, 0x48, 0x5b, 0xa1, 0x13, 0xa9, 0x12, 0x2e, 0xf5, 0xc8, + 0x6d, 0xe9, 0x00, 0x3d, 0x2b, 0xa5, 0xf8, 0x40, 0xed, 0x3f, 0x99, 0x80, 0xa2, 0x00, 0x4b, 0x1d, + 0x82, 0x87, 0x64, 0xa7, 0xaa, 0xa4, 0x4a, 0xb8, 0x8b, 0xaf, 0x88, 0x1c, 0xe6, 0x4e, 0x8c, 0x2b, + 0x5b, 0x80, 0x99, 0x49, 0x2c, 0x35, 0xd8, 0x32, 0x79, 0x24, 0xfb, 0x05, 0x5f, 0x32, 0x74, 0x82, + 0x2d, 0x56, 0xde, 0x71, 0x74, 0x1e, 0xb5, 0x77, 0xaf, 0x2f, 0x69, 0xdd, 0x4b, 0x7f, 0xe9, 0xa3, + 0xeb, 0x47, 0x1e, 0x17, 0x7c, 0x99, 0x3b, 0xb1, 0x96, 0x01, 0xa7, 0xed, 0x0f, 0x5c, 0xe3, 0x6f, + 0x38, 0x6d, 0x37, 0xb8, 0xd3, 0x5b, 0xb2, 0x5d, 0x93, 0x8f, 0xc8, 0xff, 0x82, 0xbf, 0x03, 0x86, + 0xef, 0xc5, 0xf9, 0x4a, 0x04, 0x57, 0x5b, 0xc0, 0x30, 0xa5, 0x72, 0x2b, 0x71, 0xf5, 0x44, 0xe2, + 0xc9, 0xfa, 0x1e, 0x43, 0xb9, 0x90, 0x26, 0x39, 0x20, 0xf1, 0xa4, 0xdf, 0x7b, 0xce, 0x07, 0xd3, + 0x39, 0xcb, 0x46, 0x59, 0xbf, 0xf9, 0x2f, 0x49, 0xc8, 0xde, 0x20, 0x9b, 0xf6, 0x1f, 0x82, 0x37, + 0xca, 0x86, 0xf3, 0x66, 0x94, 0x9c, 0x90, 0xd6, 0xc6, 0xeb, 0x64, 0xf7, 0x6c, 0x9c, 0x0f, 0x66, + 0x9d, 0xde, 0xbc, 0xd9, 0xe8, 0x2e, 0x49, 0x4b, 0xc3, 0x6a, 0x07, 0x6e, 0x7c, 0x49, 0xb5, 0xf5, + 0x12, 0x2d, 0x37, 0xdd, 0xb3, 0x69, 0x9d, 0x41, 0x3d, 0xb2, 0x17, 0x12, 0x08, 0x2b, 0x8e, 0xa3, + 0x97, 0x3b, 0x05, 0xa0, 0x8c, 0xa4, 0x0a, 0x0c, 0xb7, 0x8a, 0x02, 0xaa, 0x34, 0xc4, 0x27, 0x50, + 0xbe, 0x49, 0xeb, 0x35, 0x37, 0x65, 0x5a, 0x11, 0xd3, 0x9a, 0x98, 0x86, 0xe8, 0x42, 0x11, 0x53, + 0xc2, 0xbd, 0x6e, 0x05, 0x7d, 0xf3, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x31, 0x14, 0xb4, 0x11, 0xf6, + 0x01, 0x00, 0x00, +} diff --git a/test/vendor/google.golang.org/grpc/credentials/alts/utils.go b/test/vendor/google.golang.org/grpc/credentials/alts/utils.go new file mode 100644 index 0000000000..e46280ad5f --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/alts/utils.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package alts + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +const ( + linuxProductNameFile = "/sys/class/dmi/id/product_name" + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +type platformError string + +func (k platformError) Error() string { + return fmt.Sprintf("%s is not supported", string(k)) +} + +var ( + // The following two variables will be reassigned in tests. + runningOS = runtime.GOOS + manufacturerReader = func() (io.Reader, error) { + switch runningOS { + case "linux": + return os.Open(linuxProductNameFile) + case "windows": + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return strings.NewReader(name), nil + } + } + + return nil, errors.New("cannot determine the machine's manufacturer") + default: + return nil, platformError(runningOS) + } + } + vmOnGCP bool +) + +// isRunningOnGCP checks whether the local system, without doing a network request is +// running on GCP. +func isRunningOnGCP() bool { + manufacturer, err := readManufacturer() + if os.IsNotExist(err) { + return false + } + if err != nil { + log.Fatalf("failure to read manufacturer information: %v", err) + } + name := string(manufacturer) + switch runningOS { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + log.Fatal(platformError(runningOS)) + } + return false +} + +func readManufacturer() ([]byte, error) { + reader, err := manufacturerReader() + if err != nil { + return nil, err + } + if reader == nil { + return nil, errors.New("got nil reader") + } + manufacturer, err := ioutil.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) + } + return manufacturer, nil +} + +// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, +// if it exists. This API should be used by gRPC server RPC handlers to get +// information about the communicating peer. For client-side, use grpc.Peer() +// CallOption. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} + +// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it +// exists. This API should be used by gRPC clients after obtaining a peer object +// using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + altsAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no alts.AuthInfo found in Peer") + } + return altsAuthInfo, nil +} + +// ClientAuthorizationCheck checks whether the client is authorized to access +// the requested resources based on the given expected client service accounts. +// This API should be used by gRPC server RPC handlers. This API should not be +// used by clients. +func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { + authInfo, err := AuthInfoFromContext(ctx) + if err != nil { + return status.Newf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err).Err() + } + for _, sa := range expectedServiceAccounts { + if authInfo.PeerServiceAccount() == sa { + return nil + } + } + return status.Newf(codes.PermissionDenied, "Client %v is not authorized", authInfo.PeerServiceAccount()).Err() +} diff --git a/test/vendor/google.golang.org/grpc/credentials/credentials.go b/test/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 0000000000..845ce5d216 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // NoSecurity indicates a connection is insecure. + // The zero SecurityLevel value is invalid for backward compatibility. + NoSecurity SecurityLevel = iota + 1 + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // The auth information should embed CommonAuthInfo to return additional information about + // the credentials. Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. +type requestInfoKey struct{} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) + return +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo + } + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") + } + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == 0 { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +func init() { + internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) + } +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/test/vendor/google.golang.org/grpc/credentials/go12.go b/test/vendor/google.golang.org/grpc/credentials/go12.go new file mode 100644 index 0000000000..ccbf35b331 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/go12.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/test/vendor/google.golang.org/grpc/credentials/google/google.go b/test/vendor/google.golang.org/grpc/credentials/google/google.go new file mode 100644 index 0000000000..04b349abcf --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/google/google.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package google defines credentials for google cloud services. +package google + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/alts" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const tokenRequestTimeout = 30 * time.Second + +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + perRPCCreds, err := oauth.NewApplicationDefault(ctx) + if err != nil { + grpclog.Warningf("google default creds: failed to create application oauth: %v", err) + } + return perRPCCreds + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("google default creds: failed to create new creds: %v", err) + } + return bundle +} + +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. +// +// This API is experimental. +func NewComputeEngineCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + return oauth.NewComputeEngine() + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("compute engine creds: failed to create new creds: %v", err) + } + return bundle +} + +// creds implements credentials.Bundle. +type creds struct { + // Supported modes are defined in internal/internal.go. + mode string + // The transport credentials associated with this bundle. + transportCreds credentials.TransportCredentials + // The per RPC credentials associated with this bundle. + perRPCCreds credentials.PerRPCCredentials + // Creates new per RPC credentials + newPerRPCCreds func() credentials.PerRPCCredentials +} + +func (c *creds) TransportCredentials() credentials.TransportCredentials { + return c.transportCreds +} + +func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { + if c == nil { + return nil + } + return c.perRPCCreds +} + +// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +// existing Bundle may cause races. +func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { + newCreds := &creds{ + mode: mode, + newPerRPCCreds: c.newPerRPCCreds, + } + + // Create transport credentials. + switch mode { + case internal.CredsBundleModeFallback: + newCreds.transportCreds = credentials.NewTLS(nil) + case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: + // Only the clients can use google default credentials, so we only need + // to create new ALTS client creds here. + newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) + default: + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + + if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { + newCreds.perRPCCreds = newCreds.newPerRPCCreds() + } + + return newCreds, nil +} diff --git a/test/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/test/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go new file mode 100644 index 0000000000..2f4472becc --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go @@ -0,0 +1,61 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains credentials-internal code. +package internal + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/test/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/test/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go new file mode 100644 index 0000000000..d4346e9eab --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/test/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/test/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 0000000000..899e3372ce --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,185 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "context" + "fmt" + "io/ioutil" + "sync" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + if err = credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + if err = credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + if err := credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + if err := credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { + return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/test/vendor/google.golang.org/grpc/credentials/tls.go b/test/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 0000000000..28b4f6232d --- /dev/null +++ b/test/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,225 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + + "google.golang.org/grpc/credentials/internal" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// This API is EXPERIMENTAL. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/test/vendor/google.golang.org/grpc/dialoptions.go b/test/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 0000000000..63f5ae21df --- /dev/null +++ b/test/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,594 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + // This is used by ccResolverWrapper to backoff between successive calls to + // resolver.ResolveNow(). The user will have no need to configure this, but + // we need to be able to configure this in tests. + resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. Will be removed in a future 1.x release. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// This API is EXPERIMENTAL. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// This API is experimental. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// This API is EXPERIMENTAL. +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. +// +// This API is EXPERIMENTAL. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// This API is EXPERIMENTAL. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// withResolveNowBackoff specifies the function that clientconn uses to backoff +// between successive calls to resolver.ResolveNow(). +// +// For testing purpose only. +func withResolveNowBackoff(f func(int) time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolveNowBackoff = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// This API is EXPERIMENTAL. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/test/vendor/google.golang.org/grpc/doc.go b/test/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 0000000000..187adbb117 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/test/vendor/google.golang.org/grpc/encoding/encoding.go b/test/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 0000000000..195e8448b6 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // EXPERIMENTAL: if a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/test/vendor/google.golang.org/grpc/encoding/proto/proto.go b/test/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 0000000000..66b97a6f69 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/test/vendor/google.golang.org/grpc/grpclog/grpclog.go b/test/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 0000000000..874ea6d98a --- /dev/null +++ b/test/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/test/vendor/google.golang.org/grpc/grpclog/logger.go b/test/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 0000000000..097494f710 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/test/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/test/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 0000000000..d493257769 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/test/vendor/google.golang.org/grpc/interceptor.go b/test/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 0000000000..8b7350022a --- /dev/null +++ b/test/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/test/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/test/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 0000000000..5fc0ee3da5 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/test/vendor/google.golang.org/grpc/internal/balancerload/load.go b/test/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 0000000000..3a905d9665 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/test/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/test/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 0000000000..8b10516749 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := parseMethodName(methodName) + if err != nil { + grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/test/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/test/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 0000000000..1ee00a39ac --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/test/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/test/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 0000000000..be30d0e65e --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/grpclog" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclog.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/test/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/test/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 0000000000..160f6e8616 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,423 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: defaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclog.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/test/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/test/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 0000000000..a2e7c346dd --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" +) + +var ( + defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// SetDefaultSink sets the sink where binary logs will be written to. +// +// Not thread safe. Only set during initialization. +func SetDefaultSink(s Sink) { + if defaultSink != nil { + defaultSink.Close() + } + defaultSink = s +} + +// Sink writes log entry into the binary log sink. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) *writerSink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufWriteCloserSink struct { + mu sync.Mutex + closer io.Closer + out *writerSink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufWriteCloserSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + fs.buf.Flush() + fs.mu.Unlock() + } + }() +} + +func (fs *bufWriteCloserSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + fs.buf.Flush() + fs.closer.Close() + fs.out.Close() + fs.mu.Unlock() + return nil +} + +func newBufWriteCloserSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufWriteCloserSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} + +// NewTempFileSink creates a temp file and returns a Sink that writes to this +// file. +func NewTempFileSink() (Sink, error) { + tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %v", err) + } + return newBufWriteCloserSink(tempFile), nil +} diff --git a/test/vendor/google.golang.org/grpc/internal/binarylog/util.go b/test/vendor/google.golang.org/grpc/internal/binarylog/util.go new file mode 100644 index 0000000000..15dc7803d8 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/binarylog/util.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "strings" +) + +// parseMethodName splits service and method from the input. It expects format +// "/service/method". +// +// TODO: move to internal/grpcutil. +func parseMethodName(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} diff --git a/test/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/test/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 0000000000..9f6a0c1200 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/test/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/test/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 0000000000..f0744f9937 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(id int64, desc *TraceEventDesc) { + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/test/vendor/google.golang.org/grpc/internal/channelz/types.go b/test/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 0000000000..17c2274cb3 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,702 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUNKNOWN indicates unknown severity of a trace event. + CtUNKNOWN Severity = iota + // CtINFO indicates info level severity of a trace event. + CtINFO + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/test/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/test/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 0000000000..692dd61817 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/test/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/test/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 0000000000..79edbefc43 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,44 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/test/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/test/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 0000000000..fdf409d55d --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/test/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/test/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 0000000000..8864a08111 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/test/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/test/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 0000000000..ae6c8972fd --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") +) diff --git a/test/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/test/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 0000000000..200b115ca2 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/test/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/test/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 0000000000..fbe697c376 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/test/vendor/google.golang.org/grpc/internal/internal.go b/test/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 0000000000..0912f0bf4c --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,72 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // StatusRawProto is exported by status/status.go. This func returns a + // pointer to the wrapped Status proto for a given status.Status without a + // call to proto.Clone(). The returned Status proto should not be mutated by + // the caller. + StatusRawProto interface{} // func (*status.Status) *spb.Status + // NewRequestInfoContext creates a new context based on the argument context attaching + // the passed in RequestInfo to the new context. + NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/test/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/test/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..c368db62ea --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,441 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + d.ResolveNow(resolver.ResolveNowOptions{}) + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) + } else { + d.cc.UpdateState(*state) + } + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + grpclog.Infoln(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + state := &resolver.State{ + Addresses: append(addrs, srv...), + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/test/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/test/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 0000000000..8783a8cf82 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/test/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/test/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 0000000000..520d9229e1 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/test/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/test/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 0000000000..43281a3e07 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,114 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + grpclog.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux non-appengine environment. +type Rusage syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() (rusage *Rusage) { + rusage = new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) + return +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + f := (*syscall.Rusage)(first) + l := (*syscall.Rusage)(latest) + var ( + utimeDiffs = l.Utime.Sec - f.Utime.Sec + utimeDiffus = l.Utime.Usec - f.Utime.Usec + stimeDiffs = l.Stime.Sec - f.Stime.Sec + stimeDiffus = l.Stime.Usec - f.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/test/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/test/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 0000000000..d3fd9dab33 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,73 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +func log() { + once.Do(func() { + grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() (rusage *Rusage) { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/test/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 0000000000..070680edba --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/test/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 0000000000..ddee20b6be --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,926 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // *chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(*chan struct{}) + if ch != nil { + select { + case <-*ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(*chan struct{}) + close(*ch) + c.trfChan.Store((*chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { // connection-level flow control. + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/defaults.go b/test/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 0000000000..9fa306b2e0 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/test/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/test/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 0000000000..f262edd8ec --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/test/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 0000000000..c3c32dafe9 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,435 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/test/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 0000000000..2d6feeb1be --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1454 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + scheme = "https" + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + header, _, _ := metadata.FromOutgoingContextRaw(ctx) + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + state := &decodeState{} + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + state.data.isGRPC = !initialHeader + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.data.encoding + if len(state.data.mdata) > 0 { + s.header = state.data.mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close() + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/test/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 0000000000..8b04b0392a --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1253 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + // statusRawProto is a function to get to the raw status proto wrapped in a + // status.Status without a proto.Clone(). + statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + done := make(chan struct{}) + t := &http2Server{ + ctx: context.Background(), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := &decodeState{ + serverSide: true, + } + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.data.encoding, + method: state.data.method, + contentSubtype: state.data.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.data.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.data.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + } + if state.data.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + } + if state.data.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.data.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + s.cancel() + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(state.data.mdata).Copy(), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.drain(http2.ErrCodeNo, []byte{}) + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + infof("transport: closing server transport due to maximum connection age.") + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + infof("transport: closing server transport due to idleness.") + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/http_util.go b/test/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 0000000000..8f5f3349d9 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,677 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +type parsedHeaderData struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + + // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). + // + // We are in gRPC mode (peer speaking gRPC) if: + // * We are client side and have already received a HEADER frame that indicates gRPC peer. + // * The header contains valid a content-type, i.e. a string starts with "application/grpc" + // And we should handle error specific to gRPC. + // + // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we + // are in HTTP fallback mode, and should handle error specific to HTTP. + isGRPC bool + grpcErr error + httpErr error + contentTypeErr string +} + +// decodeState configures decoding criteria and records the decoded data. +type decodeState struct { + // whether decoding on server side or not + serverSide bool + + // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS + // frame once decodeHeader function has been invoked and returned. + data parsedHeaderData +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.data.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + } + return d.data.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } + + for _, hf := range frame.Fields { + d.processHeaderField(hf) + } + + if d.data.isGRPC { + if d.data.grpcErr != nil { + return d.data.grpcErr + } + if d.serverSide { + return nil + } + if d.data.rawStatusCode == nil && d.data.statusGen == nil { + // gRPC status doesn't exist. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.data.rawStatusCode = &code + } + return nil + } + + // HTTP fallback mode + if d.data.httpErr != nil { + return d.data.httpErr + } + + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + ok bool + ) + + if d.data.httpStatus != nil { + code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if !ok { + code = codes.Unknown + } + } + + return status.Error(code, d.constructHTTPErrMsg()) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func (d *decodeState) constructHTTPErrMsg() string { + var errMsgs []string + + if d.data.httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + } + + if d.data.contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, d.data.contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +func (d *decodeState) addMetadata(k, v string) { + if d.data.mdata == nil { + d.data.mdata = make(map[string][]string) + } + d.data.mdata[k] = append(d.data.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) + return + } + d.data.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + d.data.isGRPC = true + case "grpc-encoding": + d.data.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.data.rawStatusCode = &code + case "grpc-message": + d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + d.data.statusGen = status.FromProto(s) + case "grpc-timeout": + d.data.timeoutSet = true + var err error + if d.data.timeout, err = decodeTimeout(f.Value); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.data.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + return + } + d.data.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return + } + d.data.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return + } + d.data.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return + } + d.addMetadata(f.Name, v) + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/log.go b/test/vendor/google.golang.org/grpc/internal/transport/log.go new file mode 100644 index 0000000000..879df80c4d --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/log.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} diff --git a/test/vendor/google.golang.org/grpc/internal/transport/transport.go b/test/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 0000000000..a30da9eb32 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,808 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() + } + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/test/vendor/google.golang.org/grpc/keepalive/keepalive.go b/test/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 0000000000..34d31b5e7d --- /dev/null +++ b/test/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/test/vendor/google.golang.org/grpc/metadata/metadata.go b/test/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 0000000000..cf6d1b9478 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/test/vendor/google.golang.org/grpc/naming/dns_resolver.go b/test/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 0000000000..c9f79dc533 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,293 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") + + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/test/vendor/google.golang.org/grpc/naming/naming.go b/test/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 0000000000..f4c1c8b689 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// +// This package is deprecated: please use package resolver instead. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/test/vendor/google.golang.org/grpc/peer/peer.go b/test/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 0000000000..e01d219ffb --- /dev/null +++ b/test/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/test/vendor/google.golang.org/grpc/picker_wrapper.go b/test/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 0000000000..00447894f0 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,229 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// v2PickerWrapper wraps a balancer.Picker while providing the +// balancer.V2Picker API. It requires a pickerWrapper to generate errors +// including the latest connectionError. To be deleted when balancer.Picker is +// updated to the balancer.V2Picker API. +type v2PickerWrapper struct { + picker balancer.Picker + connErr *connErr +} + +func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + sc, done, err := v.picker.Pick(info.Ctx, info) + if err != nil { + if err == balancer.ErrTransientFailure { + return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) + } + return balancer.PickResult{}, err + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.V2Picker + + // The latest connection error. TODO: remove when V1 picker is deprecated; + // balancer should be responsible for providing the error. + *connErr +} + +type connErr struct { + mu sync.Mutex + err error +} + +func (c *connErr) updateConnectionError(err error) { + c.mu.Lock() + c.err = err + c.mu.Unlock() +} + +func (c *connErr) connectionError() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else if connectionErr := pw.connectionError(); connectionErr != nil { + errStr = "latest connection error: " + connectionErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + if _, ok := status.FromError(err); ok { + return nil, nil, err + } + // err is some other error. + return nil, nil, status.Error(codes.Unknown, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} diff --git a/test/vendor/google.golang.org/grpc/pickfirst.go b/test/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 0000000000..c43dac9ad8 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn +} + +var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + b.ResolverError(err) + return + } + b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, + ) + } + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + if b.sc == nil { + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if grpclog.V(2) { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, + ) + return balancer.ErrBadResolverState + } + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.sc.Connect() + } + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + } + if b.sc != sc { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { + b.sc = nil + return + } + + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.TransientFailure: + err := balancer.ErrTransientFailure + // TODO: this can be unconditional after the V1 API is removed, as + // SubConnState will always contain a connection error. + if s.ConnectionError != nil { + err = balancer.TransientFailureError(s.ConnectionError) + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: err}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/test/vendor/google.golang.org/grpc/preloader.go b/test/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 0000000000..76acbbcc93 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// This API is EXPERIMENTAL. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/test/vendor/google.golang.org/grpc/proxy.go b/test/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 0000000000..f8f69bfb70 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + if url == nil { + return nil, errDisabled + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var newAddr string + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + newAddr = addr + } else { + newAddr = proxyURL.Host + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + } + return + } +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/test/vendor/google.golang.org/grpc/resolver/resolver.go b/test/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 0000000000..fe14b2fb98 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,253 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: use Attributes in Address instead. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/test/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/test/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 0000000000..3eaf724cd6 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,263 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + pollingMu sync.Mutex + polling chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +// poll begins or ends asynchronous polling of the resolver based on whether +// err is ErrBadResolverState. +func (ccr *ccResolverWrapper) poll(err error) { + ccr.pollingMu.Lock() + defer ccr.pollingMu.Unlock() + if err != balancer.ErrBadResolverState { + // stop polling + if ccr.polling != nil { + close(ccr.polling) + ccr.polling = nil + } + return + } + if ccr.polling != nil { + // already polling + return + } + p := make(chan struct{}) + ccr.polling = p + go func() { + for i := 0; ; i++ { + ccr.resolveNow(resolver.ResolveNowOptions{}) + t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) + select { + case <-p: + t.Stop() + return + case <-ccr.done.Done(): + // Resolver has been closed. + t.Stop() + return + case <-t.C: + select { + case <-p: + return + default: + } + // Timer expired; re-resolve. + } + } + }() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { + if ccr.done.HasFired() { + return + } + grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.curState = s + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + if ccr.done.HasFired() { + return + } + grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) + if channelz.IsOn() { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver reported error: %v", err), + Severity: channelz.CtWarning, + }) + } + ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + if ccr.done.HasFired() { + return + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + if ccr.done.HasFired() { + return + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + grpclog.Infof("Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) + if channelz.IsOn() { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), + Severity: channelz.CtWarning, + }) + } + ccr.poll(balancer.ErrBadResolverState) + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + } + ccr.curState.ServiceConfig = scpr + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtINFO, + }) +} diff --git a/test/vendor/google.golang.org/grpc/rpc_util.go b/test/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 0000000000..d3a4adc5ee --- /dev/null +++ b/test/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,887 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream ClientStream + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// ForceCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// This is an EXPERIMENTAL API. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + size = len(d) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 6. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/test/vendor/google.golang.org/grpc/server.go b/test/vendor/google.golang.org/grpc/server.go new file mode 100644 index 0000000000..0d75cb109a --- /dev/null +++ b/test/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1548 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +var statusOK = status.New(codes.OK, "") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// This API is EXPERIMENTAL. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// This API is EXPERIMENTAL. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + sh := s.opts.statsHandler + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.m[service] + if knownService { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for st := range s.conns { + st.Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/test/vendor/google.golang.org/grpc/service_config.go b/test/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 0000000000..5a80a575a5 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,434 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancer will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +type loadBalancingConfig map[string]json.RawMessage + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *[]loadBalancingConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfigForTesting = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if rsc.LoadBalancingConfig != nil { + for i, lbcfg := range *rsc.LoadBalancingConfig { + if len(lbcfg) != 1 { + err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + sc.lbConfig = &lbConfig{name: name} + if parser, ok := builder.(balancer.ConfigParser); ok { + var err error + sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} + } + } else if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + break + } + if sc.lbConfig == nil { + // We had a loadBalancingConfig field but did not encounter a + // supported policy. The config is considered invalid in this + // case. + err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/test/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/test/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 0000000000..187c304421 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// This package is EXPERIMENTAL. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/test/vendor/google.golang.org/grpc/stats/handlers.go b/test/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 0000000000..dc03731e45 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/test/vendor/google.golang.org/grpc/stats/stats.go b/test/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 0000000000..9e22c393f1 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/test/vendor/google.golang.org/grpc/status/status.go b/test/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 0000000000..a1348e9b16 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" +) + +func init() { + internal.StatusRawProto = statusRawProto +} + +func statusRawProto(s *Status) *spb.Status { return s.s } + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Is implements future error.Is functionality. +// A statusError is equivalent if the code and message are identical. +func (se *statusError) Is(target error) bool { + tse, ok := target.(*statusError) + if !ok { + return false + } + + return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return nil + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/test/vendor/google.golang.org/grpc/stream.go b/test/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 0000000000..bb99940e36 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1529 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if !cs.attempt.s.TrailersOnly() { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + as.callInfo.stream = as + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/test/vendor/google.golang.org/grpc/tap/tap.go b/test/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 0000000000..584360f681 --- /dev/null +++ b/test/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/test/vendor/google.golang.org/grpc/trace.go b/test/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 0000000000..07a2d26b3e --- /dev/null +++ b/test/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/test/vendor/google.golang.org/grpc/version.go b/test/vendor/google.golang.org/grpc/version.go new file mode 100644 index 0000000000..1a831b159a --- /dev/null +++ b/test/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.27.1" diff --git a/test/vendor/istio.io/api/LICENSE b/test/vendor/istio.io/api/LICENSE new file mode 100644 index 0000000000..139182e271 --- /dev/null +++ b/test/vendor/istio.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/common/config/license-lint.yml b/test/vendor/istio.io/api/common/config/license-lint.yml new file mode 100644 index 0000000000..fbdf259624 --- /dev/null +++ b/test/vendor/istio.io/api/common/config/license-lint.yml @@ -0,0 +1,141 @@ +unrestricted_licenses: + - Apache-2.0 + - ISC + - AFL-2.1 + - AFL-3.0 + - Artistic-1.0 + - Artistic-2.0 + - Apache-1.1 + - BSD-1-Clause + - BSD-2-Clause + - BSD-3-Clause + - FTL + - LPL-1.02 + - MS-PL + - MIT + - NCSA + - OpenSSL + - PHP-3.0 + - TCP-wrappers + - W3C + - Xnet + - Zlib + +reciprocal_licenses: + - CC0-1.0 + - APSL-2.0 + - CDDL-1.0 + - CDDL-1.1 + - CPL-1.0 + - EPL-1.0 + - IPL-1.0 + - MPL-1.0 + - MPL-1.1 + - MPL-2.0 + - Ruby + +restricted_licenses: + - GPL-1.0-only + - GPL-1.0-or-later + - GPL-2.0-only + - GPL-2.0-or-later + - GPL-3.0-only + - GPL-3.0-or-later + - LGPL-2.0-only + - LGPL-2.0-or-later + - LGPL-2.1-only + - LGPL-2.1-or-later + - LGPL-3.0-only + - LGPL-3.0-or-later + - NPL-1.0 + - NPL-1.1 + - OSL-1.0 + - OSL-1.1 + - OSL-2.0 + - OSL-2.1 + - OSL-3.0 + - QPL-1.0 + - Sleepycat + +whitelisted_modules: + - bitbucket.org/ww/goautoneg + - git.apache.org/thrift.git + - github.com/alicebob/gopher-json + - github.com/antlr/antlr4 + - github.com/apache/thrift + - github.com/bazelbuild/buildtools + - github.com/bgentry/speakeasy + - github.com/bmizerany/assert + - github.com/BurntSushi/xgb + - github.com/DATA-DOG/go-sqlmock + - github.com/daviddengcn/go-colortext + - github.com/dchest/siphash + - github.com/dnaeon/go-vcr + - github.com/docker/docker + - github.com/duosecurity/duo_api_golang + - github.com/dustin/go-humanize + - github.com/facebookgo/stack + - github.com/facebookgo/stackerr + - github.com/ghodss/yaml + - github.com/globalsign/mgo + - github.com/gogo/protobuf + - github.com/google/cadvisor + - github.com/google/pprof + - github.com/gophercloud/gophercloud + - github.com/gotestyourself/gotestyourself + - github.com/hashicorp/consul + - github.com/hashicorp/serf + - github.com/hashicorp/vault + - github.com/heketi/heketi + - github.com/heketi/utils + - github.com/inconshreveable/mousetrap + - github.com/JeffAshton/win_pdh + - github.com/jmespath/go-jmespath + - github.com/jteeuwen/go-bindata + - github.com/juju/errors + - github.com/juju/loggo + - github.com/juju/testing + - github.com/julienschmidt/httprouter + - github.com/koneu/natend + - github.com/kr/logfmt + - github.com/libopenstorage/openstorage + - github.com/logrusorgru/aurora + - github.com/magiconair/properties + - github.com/Masterminds/semver + - github.com/Masterminds/sprig + - github.com/mesos/mesos-go + - github.com/miekg/dns + - github.com/munnerz/goautoneg + - github.com/Nvveen/Gotty + - github.com/NYTimes/gziphandler + - github.com/opencontainers/runc + - github.com/openshift/origin + - github.com/pascaldekloe/goe + - github.com/pmezard/go-difflib + - github.com/projectcalico/go-yaml + - github.com/projectcalico/go-yaml-wrapper + - github.com/rcrowley/go-metrics + - github.com/russross/blackfriday + - github.com/russross/blackfriday/v2 + - github.com/sean-/seed + - github.com/signalfx/com_signalfx_metrics_protobuf + - github.com/smartystreets/assertions + - github.com/smartystreets/goconvey + - github.com/storageos/go-api + - github.com/technosophos/moniker + - github.com/ulikunitz/xz + - github.com/xeipuuv/gojsonpointer + - github.com/xeipuuv/gojsonreference + - github.com/xi2/xz + - github.com/ziutek/mymysql + - gopkg.in/check.v1 + - gopkg.in/mgo.v2 + - gopkg.in/tomb.v1 + - gopkg.in/yaml.v1 + - gopkg.in/yaml.v3 + - gotest.tools + - istio.io/tools + - k8s.io/helm + - k8s.io/kubernetes + - modernc.org/cc + - sigs.k8s.io/yaml diff --git a/test/vendor/istio.io/api/licenses/cloud.google.com/go/LICENSE b/test/vendor/istio.io/api/licenses/cloud.google.com/go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/COPYING b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/github.com/client9/misspell/LICENSE b/test/vendor/istio.io/api/licenses/github.com/client9/misspell/LICENSE new file mode 100644 index 0000000000..423e1f9e0f --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/client9/misspell/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Nick Galbreath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/test/vendor/istio.io/api/licenses/github.com/gogo/protobuf/LICENSE b/test/vendor/istio.io/api/licenses/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000000..f57de90da8 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/istio.io/api/licenses/github.com/golang/glog/LICENSE b/test/vendor/istio.io/api/licenses/github.com/golang/glog/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/licenses/github.com/golang/mock/LICENSE b/test/vendor/istio.io/api/licenses/github.com/golang/mock/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/golang/mock/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/licenses/github.com/golang/protobuf/LICENSE b/test/vendor/istio.io/api/licenses/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000000..0f646931a4 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/istio.io/api/licenses/github.com/google/go-cmp/LICENSE b/test/vendor/istio.io/api/licenses/github.com/google/go-cmp/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/github.com/kisielk/errcheck/LICENSE b/test/vendor/istio.io/api/licenses/github.com/kisielk/errcheck/LICENSE new file mode 100644 index 0000000000..a2b16b5bd9 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/kisielk/errcheck/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/github.com/kisielk/gotool/LICENSE b/test/vendor/istio.io/api/licenses/github.com/kisielk/gotool/LICENSE new file mode 100644 index 0000000000..1cbf651e2f --- /dev/null +++ b/test/vendor/istio.io/api/licenses/github.com/kisielk/gotool/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/crypto/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/lint/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/lint/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/net/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/net/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/oauth2/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/sync/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/sys/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/sys/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/text/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/text/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/tools/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/golang.org/x/tools/cmd/getgo/LICENSE b/test/vendor/istio.io/api/licenses/golang.org/x/tools/cmd/getgo/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/golang.org/x/tools/cmd/getgo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/google.golang.org/appengine/LICENSE b/test/vendor/istio.io/api/licenses/google.golang.org/appengine/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/licenses/google.golang.org/genproto/LICENSE b/test/vendor/istio.io/api/licenses/google.golang.org/genproto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/licenses/google.golang.org/grpc/LICENSE b/test/vendor/istio.io/api/licenses/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/licenses/honnef.co/go/tools/LICENSE b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/LICENSE new file mode 100644 index 0000000000..dfd0314546 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/api/licenses/honnef.co/go/tools/gcsizes/LICENSE b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/gcsizes/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/gcsizes/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/honnef.co/go/tools/lint/LICENSE b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/lint/LICENSE new file mode 100644 index 0000000000..796130a123 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/lint/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/honnef.co/go/tools/ssa/LICENSE b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/ssa/LICENSE new file mode 100644 index 0000000000..aee48041e1 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/honnef.co/go/tools/ssa/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/api/licenses/istio.io/gogo-genproto/LICENSE b/test/vendor/istio.io/api/licenses/istio.io/gogo-genproto/LICENSE new file mode 100644 index 0000000000..139182e271 --- /dev/null +++ b/test/vendor/istio.io/api/licenses/istio.io/gogo-genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/api/networking/v1alpha3/destination_rule.pb.go b/test/vendor/istio.io/api/networking/v1alpha3/destination_rule.pb.go new file mode 100644 index 0000000000..16c0e94be6 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/destination_rule.pb.go @@ -0,0 +1,5436 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/destination_rule.proto + +// `DestinationRule` defines policies that apply to traffic intended for a +// service after routing has occurred. These rules specify configuration +// for load balancing, connection pool size from the sidecar, and outlier +// detection settings to detect and evict unhealthy hosts from the load +// balancing pool. For example, a simple load balancing policy for the +// ratings service would look as follows: +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// ``` +// +// Version specific policies can be specified by defining a named +// `subset` and overriding the settings specified at the service level. The +// following rule uses a round robin load balancing policy for all traffic +// going to a subset named testversion that is composed of endpoints (e.g., +// pods) with labels (version:v3). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// ``` +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +// +// Traffic policies can be customized to specific ports as well. The +// following rule uses the least connection load balancing policy for all +// traffic to port 80, while uses a round robin load balancing setting for +// traffic to the port 9080. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings-port +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: # Apply to all ports +// portLevelSettings: +// - port: +// number: 80 +// loadBalancer: +// simple: LEAST_CONN +// - port: +// number: 9080 +// loadBalancer: +// simple: ROUND_ROBIN +// ``` + +package v1alpha3 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/gogo/protobuf/types" + io "io" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Standard load balancing algorithms that require no tuning. +type LoadBalancerSettings_SimpleLB int32 + +const ( + // Round Robin policy. Default + LoadBalancerSettings_ROUND_ROBIN LoadBalancerSettings_SimpleLB = 0 + // The least request load balancer uses an O(1) algorithm which selects + // two random healthy hosts and picks the host which has fewer active + // requests. + LoadBalancerSettings_LEAST_CONN LoadBalancerSettings_SimpleLB = 1 + // The random load balancer selects a random healthy host. The random + // load balancer generally performs better than round robin if no health + // checking policy is configured. + LoadBalancerSettings_RANDOM LoadBalancerSettings_SimpleLB = 2 + // This option will forward the connection to the original IP address + // requested by the caller without doing any form of load + // balancing. This option must be used with care. It is meant for + // advanced use cases. Refer to Original Destination load balancer in + // Envoy for further details. + LoadBalancerSettings_PASSTHROUGH LoadBalancerSettings_SimpleLB = 3 +) + +var LoadBalancerSettings_SimpleLB_name = map[int32]string{ + 0: "ROUND_ROBIN", + 1: "LEAST_CONN", + 2: "RANDOM", + 3: "PASSTHROUGH", +} + +var LoadBalancerSettings_SimpleLB_value = map[string]int32{ + "ROUND_ROBIN": 0, + "LEAST_CONN": 1, + "RANDOM": 2, + "PASSTHROUGH": 3, +} + +func (x LoadBalancerSettings_SimpleLB) String() string { + return proto.EnumName(LoadBalancerSettings_SimpleLB_name, int32(x)) +} + +func (LoadBalancerSettings_SimpleLB) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{3, 0} +} + +// Policy for upgrading http1.1 connections to http2. +type ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy int32 + +const ( + // Use the global default. + ConnectionPoolSettings_HTTPSettings_DEFAULT ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy = 0 + // Do not upgrade the connection to http2. + // This opt-out option overrides the default. + ConnectionPoolSettings_HTTPSettings_DO_NOT_UPGRADE ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy = 1 + // Upgrade the connection to http2. + // This opt-in option overrides the default. + ConnectionPoolSettings_HTTPSettings_UPGRADE ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy = 2 +) + +var ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy_name = map[int32]string{ + 0: "DEFAULT", + 1: "DO_NOT_UPGRADE", + 2: "UPGRADE", +} + +var ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy_value = map[string]int32{ + "DEFAULT": 0, + "DO_NOT_UPGRADE": 1, + "UPGRADE": 2, +} + +func (x ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy) String() string { + return proto.EnumName(ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy_name, int32(x)) +} + +func (ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{4, 1, 0} +} + +// TLS connection mode +type TLSSettings_TLSmode int32 + +const ( + // Do not setup a TLS connection to the upstream endpoint. + TLSSettings_DISABLE TLSSettings_TLSmode = 0 + // Originate a TLS connection to the upstream endpoint. + TLSSettings_SIMPLE TLSSettings_TLSmode = 1 + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + TLSSettings_MUTUAL TLSSettings_TLSmode = 2 + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + // Compared to Mutual mode, this mode uses certificates generated + // automatically by Istio for mTLS authentication. When this mode is + // used, all other fields in `TLSSettings` should be empty. + TLSSettings_ISTIO_MUTUAL TLSSettings_TLSmode = 3 +) + +var TLSSettings_TLSmode_name = map[int32]string{ + 0: "DISABLE", + 1: "SIMPLE", + 2: "MUTUAL", + 3: "ISTIO_MUTUAL", +} + +var TLSSettings_TLSmode_value = map[string]int32{ + "DISABLE": 0, + "SIMPLE": 1, + "MUTUAL": 2, + "ISTIO_MUTUAL": 3, +} + +func (x TLSSettings_TLSmode) String() string { + return proto.EnumName(TLSSettings_TLSmode_name, int32(x)) +} + +func (TLSSettings_TLSmode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{6, 0} +} + +// DestinationRule defines policies that apply to traffic intended for a service +// after routing has occurred. +// +// +type DestinationRule struct { + // The name of a service from the service registry. Service + // names are looked up from the platform's service registry (e.g., + // Kubernetes services, Consul services, etc.) and from the hosts + // declared by [ServiceEntries](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). Rules defined for + // services that do not exist in the service registry will be ignored. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews" will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + // + // Note that the host field applies to both HTTP and TCP services. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // Traffic policies to apply (load balancing policy, connection pool + // sizes, outlier detection). + TrafficPolicy *TrafficPolicy `protobuf:"bytes,2,opt,name=traffic_policy,json=trafficPolicy,proto3" json:"traffic_policy,omitempty"` + // One or more named sets that represent individual versions of a + // service. Traffic policies can be overridden at subset level. + Subsets []*Subset `protobuf:"bytes,3,rep,name=subsets,proto3" json:"subsets,omitempty"` + // A list of namespaces to which this destination rule is exported. + // The resolution of a destination rule to apply to a service occurs in the + // context of a hierarchy of namespaces. Exporting a destination rule allows + // it to be included in the resolution hierarchy for services in + // other namespaces. This feature provides a mechanism for service owners + // and mesh administrators to control the visibility of destination rules + // across namespace boundaries. + // + // If no namespaces are specified then the destination rule is exported to all + // namespaces by default. + // + // The value "." is reserved and defines an export to the same namespace that + // the destination rule is declared in. Similarly, the value "*" is reserved and + // defines an export to all namespaces. + // + // NOTE: in the current release, the `exportTo` value is restricted to + // "." or "*" (i.e., the current namespace or all namespaces). + ExportTo []string `protobuf:"bytes,4,rep,name=export_to,json=exportTo,proto3" json:"export_to,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DestinationRule) Reset() { *m = DestinationRule{} } +func (m *DestinationRule) String() string { return proto.CompactTextString(m) } +func (*DestinationRule) ProtoMessage() {} +func (*DestinationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{0} +} +func (m *DestinationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DestinationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DestinationRule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DestinationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_DestinationRule.Merge(m, src) +} +func (m *DestinationRule) XXX_Size() int { + return m.Size() +} +func (m *DestinationRule) XXX_DiscardUnknown() { + xxx_messageInfo_DestinationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_DestinationRule proto.InternalMessageInfo + +func (m *DestinationRule) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *DestinationRule) GetTrafficPolicy() *TrafficPolicy { + if m != nil { + return m.TrafficPolicy + } + return nil +} + +func (m *DestinationRule) GetSubsets() []*Subset { + if m != nil { + return m.Subsets + } + return nil +} + +func (m *DestinationRule) GetExportTo() []string { + if m != nil { + return m.ExportTo + } + return nil +} + +// Traffic policies to apply for a specific destination, across all +// destination ports. See DestinationRule for examples. +type TrafficPolicy struct { + // Settings controlling the load balancer algorithms. + LoadBalancer *LoadBalancerSettings `protobuf:"bytes,1,opt,name=load_balancer,json=loadBalancer,proto3" json:"load_balancer,omitempty"` + // Settings controlling the volume of connections to an upstream service + ConnectionPool *ConnectionPoolSettings `protobuf:"bytes,2,opt,name=connection_pool,json=connectionPool,proto3" json:"connection_pool,omitempty"` + // Settings controlling eviction of unhealthy hosts from the load balancing pool + OutlierDetection *OutlierDetection `protobuf:"bytes,3,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` + // TLS related settings for connections to the upstream service. + Tls *TLSSettings `protobuf:"bytes,4,opt,name=tls,proto3" json:"tls,omitempty"` + // Traffic policies specific to individual ports. Note that port level + // settings will override the destination-level settings. Traffic + // settings specified at the destination-level will not be inherited when + // overridden by port-level settings, i.e. default values will be applied + // to fields omitted in port-level traffic policies. + PortLevelSettings []*TrafficPolicy_PortTrafficPolicy `protobuf:"bytes,5,rep,name=port_level_settings,json=portLevelSettings,proto3" json:"port_level_settings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrafficPolicy) Reset() { *m = TrafficPolicy{} } +func (m *TrafficPolicy) String() string { return proto.CompactTextString(m) } +func (*TrafficPolicy) ProtoMessage() {} +func (*TrafficPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{1} +} +func (m *TrafficPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TrafficPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TrafficPolicy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TrafficPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrafficPolicy.Merge(m, src) +} +func (m *TrafficPolicy) XXX_Size() int { + return m.Size() +} +func (m *TrafficPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TrafficPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TrafficPolicy proto.InternalMessageInfo + +func (m *TrafficPolicy) GetLoadBalancer() *LoadBalancerSettings { + if m != nil { + return m.LoadBalancer + } + return nil +} + +func (m *TrafficPolicy) GetConnectionPool() *ConnectionPoolSettings { + if m != nil { + return m.ConnectionPool + } + return nil +} + +func (m *TrafficPolicy) GetOutlierDetection() *OutlierDetection { + if m != nil { + return m.OutlierDetection + } + return nil +} + +func (m *TrafficPolicy) GetTls() *TLSSettings { + if m != nil { + return m.Tls + } + return nil +} + +func (m *TrafficPolicy) GetPortLevelSettings() []*TrafficPolicy_PortTrafficPolicy { + if m != nil { + return m.PortLevelSettings + } + return nil +} + +// Traffic policies that apply to specific ports of the service +type TrafficPolicy_PortTrafficPolicy struct { + // Specifies the number of a port on the destination service + // on which this policy is being applied. + // + Port *PortSelector `protobuf:"bytes,1,opt,name=port,proto3" json:"port,omitempty"` + // Settings controlling the load balancer algorithms. + LoadBalancer *LoadBalancerSettings `protobuf:"bytes,2,opt,name=load_balancer,json=loadBalancer,proto3" json:"load_balancer,omitempty"` + // Settings controlling the volume of connections to an upstream service + ConnectionPool *ConnectionPoolSettings `protobuf:"bytes,3,opt,name=connection_pool,json=connectionPool,proto3" json:"connection_pool,omitempty"` + // Settings controlling eviction of unhealthy hosts from the load balancing pool + OutlierDetection *OutlierDetection `protobuf:"bytes,4,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"` + // TLS related settings for connections to the upstream service. + Tls *TLSSettings `protobuf:"bytes,5,opt,name=tls,proto3" json:"tls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrafficPolicy_PortTrafficPolicy) Reset() { *m = TrafficPolicy_PortTrafficPolicy{} } +func (m *TrafficPolicy_PortTrafficPolicy) String() string { return proto.CompactTextString(m) } +func (*TrafficPolicy_PortTrafficPolicy) ProtoMessage() {} +func (*TrafficPolicy_PortTrafficPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{1, 0} +} +func (m *TrafficPolicy_PortTrafficPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TrafficPolicy_PortTrafficPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TrafficPolicy_PortTrafficPolicy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TrafficPolicy_PortTrafficPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrafficPolicy_PortTrafficPolicy.Merge(m, src) +} +func (m *TrafficPolicy_PortTrafficPolicy) XXX_Size() int { + return m.Size() +} +func (m *TrafficPolicy_PortTrafficPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TrafficPolicy_PortTrafficPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TrafficPolicy_PortTrafficPolicy proto.InternalMessageInfo + +func (m *TrafficPolicy_PortTrafficPolicy) GetPort() *PortSelector { + if m != nil { + return m.Port + } + return nil +} + +func (m *TrafficPolicy_PortTrafficPolicy) GetLoadBalancer() *LoadBalancerSettings { + if m != nil { + return m.LoadBalancer + } + return nil +} + +func (m *TrafficPolicy_PortTrafficPolicy) GetConnectionPool() *ConnectionPoolSettings { + if m != nil { + return m.ConnectionPool + } + return nil +} + +func (m *TrafficPolicy_PortTrafficPolicy) GetOutlierDetection() *OutlierDetection { + if m != nil { + return m.OutlierDetection + } + return nil +} + +func (m *TrafficPolicy_PortTrafficPolicy) GetTls() *TLSSettings { + if m != nil { + return m.Tls + } + return nil +} + +// A subset of endpoints of a service. Subsets can be used for scenarios +// like A/B testing, or routing to a specific version of a service. Refer +// to [VirtualService](https://istio.io/docs/reference/config/networking/virtual-service/#VirtualService) documentation for examples of using +// subsets in these scenarios. In addition, traffic policies defined at the +// service-level can be overridden at a subset-level. The following rule +// uses a round robin load balancing policy for all traffic going to a +// subset named testversion that is composed of endpoints (e.g., pods) with +// labels (version:v3). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// ``` +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +// +// One or more labels are typically required to identify the subset destination, +// however, when the corresponding DestinationRule represents a host that +// supports multiple SNI hosts (e.g., an egress gateway), a subset without labels +// may be meaningful. In this case a traffic policy with [TLSSettings](#TLSSettings) +// can be used to identify a specific SNI host corresponding to the named subset. +type Subset struct { + // Name of the subset. The service name and the subset name can + // be used for traffic splitting in a route rule. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Labels apply a filter over the endpoints of a service in the + // service registry. See route rules for examples of usage. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Traffic policies that apply to this subset. Subsets inherit the + // traffic policies specified at the DestinationRule level. Settings + // specified at the subset level will override the corresponding settings + // specified at the DestinationRule level. + TrafficPolicy *TrafficPolicy `protobuf:"bytes,3,opt,name=traffic_policy,json=trafficPolicy,proto3" json:"traffic_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Subset) Reset() { *m = Subset{} } +func (m *Subset) String() string { return proto.CompactTextString(m) } +func (*Subset) ProtoMessage() {} +func (*Subset) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{2} +} +func (m *Subset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Subset.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Subset) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subset.Merge(m, src) +} +func (m *Subset) XXX_Size() int { + return m.Size() +} +func (m *Subset) XXX_DiscardUnknown() { + xxx_messageInfo_Subset.DiscardUnknown(m) +} + +var xxx_messageInfo_Subset proto.InternalMessageInfo + +func (m *Subset) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Subset) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Subset) GetTrafficPolicy() *TrafficPolicy { + if m != nil { + return m.TrafficPolicy + } + return nil +} + +// Load balancing policies to apply for a specific destination. See Envoy's +// load balancing +// [documentation](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/load_balancing) +// for more details. +// +// For example, the following rule uses a round robin load balancing policy +// for all traffic going to the ratings service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// ``` +// +// The following example sets up sticky sessions for the ratings service +// hashing-based load balancer for the same ratings service using the +// the User cookie as the hash key. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// consistentHash: +// httpCookie: +// name: user +// ttl: 0s +// ``` +// +type LoadBalancerSettings struct { + // Upstream load balancing policy. + // + // Types that are valid to be assigned to LbPolicy: + // *LoadBalancerSettings_Simple + // *LoadBalancerSettings_ConsistentHash + LbPolicy isLoadBalancerSettings_LbPolicy `protobuf_oneof:"lb_policy"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalancerSettings) Reset() { *m = LoadBalancerSettings{} } +func (m *LoadBalancerSettings) String() string { return proto.CompactTextString(m) } +func (*LoadBalancerSettings) ProtoMessage() {} +func (*LoadBalancerSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{3} +} +func (m *LoadBalancerSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LoadBalancerSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LoadBalancerSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerSettings.Merge(m, src) +} +func (m *LoadBalancerSettings) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerSettings) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalancerSettings proto.InternalMessageInfo + +type isLoadBalancerSettings_LbPolicy interface { + isLoadBalancerSettings_LbPolicy() + MarshalTo([]byte) (int, error) + Size() int +} + +type LoadBalancerSettings_Simple struct { + Simple LoadBalancerSettings_SimpleLB `protobuf:"varint,1,opt,name=simple,proto3,enum=istio.networking.v1alpha3.LoadBalancerSettings_SimpleLB,oneof"` +} +type LoadBalancerSettings_ConsistentHash struct { + ConsistentHash *LoadBalancerSettings_ConsistentHashLB `protobuf:"bytes,2,opt,name=consistent_hash,json=consistentHash,proto3,oneof"` +} + +func (*LoadBalancerSettings_Simple) isLoadBalancerSettings_LbPolicy() {} +func (*LoadBalancerSettings_ConsistentHash) isLoadBalancerSettings_LbPolicy() {} + +func (m *LoadBalancerSettings) GetLbPolicy() isLoadBalancerSettings_LbPolicy { + if m != nil { + return m.LbPolicy + } + return nil +} + +func (m *LoadBalancerSettings) GetSimple() LoadBalancerSettings_SimpleLB { + if x, ok := m.GetLbPolicy().(*LoadBalancerSettings_Simple); ok { + return x.Simple + } + return LoadBalancerSettings_ROUND_ROBIN +} + +func (m *LoadBalancerSettings) GetConsistentHash() *LoadBalancerSettings_ConsistentHashLB { + if x, ok := m.GetLbPolicy().(*LoadBalancerSettings_ConsistentHash); ok { + return x.ConsistentHash + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*LoadBalancerSettings) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*LoadBalancerSettings_Simple)(nil), + (*LoadBalancerSettings_ConsistentHash)(nil), + } +} + +// Consistent Hash-based load balancing can be used to provide soft +// session affinity based on HTTP headers, cookies or other +// properties. This load balancing policy is applicable only for HTTP +// connections. The affinity to a particular destination host will be +// lost when one or more hosts are added/removed from the destination +// service. +type LoadBalancerSettings_ConsistentHashLB struct { + // The hash key to use. + // + // Types that are valid to be assigned to HashKey: + // *LoadBalancerSettings_ConsistentHashLB_HttpHeaderName + // *LoadBalancerSettings_ConsistentHashLB_HttpCookie + // *LoadBalancerSettings_ConsistentHashLB_UseSourceIp + HashKey isLoadBalancerSettings_ConsistentHashLB_HashKey `protobuf_oneof:"hash_key"` + // The minimum number of virtual nodes to use for the hash + // ring. Defaults to 1024. Larger ring sizes result in more granular + // load distributions. If the number of hosts in the load balancing + // pool is larger than the ring size, each host will be assigned a + // single virtual node. + MinimumRingSize uint64 `protobuf:"varint,4,opt,name=minimum_ring_size,json=minimumRingSize,proto3" json:"minimum_ring_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalancerSettings_ConsistentHashLB) Reset() { *m = LoadBalancerSettings_ConsistentHashLB{} } +func (m *LoadBalancerSettings_ConsistentHashLB) String() string { return proto.CompactTextString(m) } +func (*LoadBalancerSettings_ConsistentHashLB) ProtoMessage() {} +func (*LoadBalancerSettings_ConsistentHashLB) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{3, 0} +} +func (m *LoadBalancerSettings_ConsistentHashLB) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerSettings_ConsistentHashLB) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LoadBalancerSettings_ConsistentHashLB) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB.Merge(m, src) +} +func (m *LoadBalancerSettings_ConsistentHashLB) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerSettings_ConsistentHashLB) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB proto.InternalMessageInfo + +type isLoadBalancerSettings_ConsistentHashLB_HashKey interface { + isLoadBalancerSettings_ConsistentHashLB_HashKey() + MarshalTo([]byte) (int, error) + Size() int +} + +type LoadBalancerSettings_ConsistentHashLB_HttpHeaderName struct { + HttpHeaderName string `protobuf:"bytes,1,opt,name=http_header_name,json=httpHeaderName,proto3,oneof"` +} +type LoadBalancerSettings_ConsistentHashLB_HttpCookie struct { + HttpCookie *LoadBalancerSettings_ConsistentHashLB_HTTPCookie `protobuf:"bytes,2,opt,name=http_cookie,json=httpCookie,proto3,oneof"` +} +type LoadBalancerSettings_ConsistentHashLB_UseSourceIp struct { + UseSourceIp bool `protobuf:"varint,3,opt,name=use_source_ip,json=useSourceIp,proto3,oneof"` +} + +func (*LoadBalancerSettings_ConsistentHashLB_HttpHeaderName) isLoadBalancerSettings_ConsistentHashLB_HashKey() { +} +func (*LoadBalancerSettings_ConsistentHashLB_HttpCookie) isLoadBalancerSettings_ConsistentHashLB_HashKey() { +} +func (*LoadBalancerSettings_ConsistentHashLB_UseSourceIp) isLoadBalancerSettings_ConsistentHashLB_HashKey() { +} + +func (m *LoadBalancerSettings_ConsistentHashLB) GetHashKey() isLoadBalancerSettings_ConsistentHashLB_HashKey { + if m != nil { + return m.HashKey + } + return nil +} + +func (m *LoadBalancerSettings_ConsistentHashLB) GetHttpHeaderName() string { + if x, ok := m.GetHashKey().(*LoadBalancerSettings_ConsistentHashLB_HttpHeaderName); ok { + return x.HttpHeaderName + } + return "" +} + +func (m *LoadBalancerSettings_ConsistentHashLB) GetHttpCookie() *LoadBalancerSettings_ConsistentHashLB_HTTPCookie { + if x, ok := m.GetHashKey().(*LoadBalancerSettings_ConsistentHashLB_HttpCookie); ok { + return x.HttpCookie + } + return nil +} + +func (m *LoadBalancerSettings_ConsistentHashLB) GetUseSourceIp() bool { + if x, ok := m.GetHashKey().(*LoadBalancerSettings_ConsistentHashLB_UseSourceIp); ok { + return x.UseSourceIp + } + return false +} + +func (m *LoadBalancerSettings_ConsistentHashLB) GetMinimumRingSize() uint64 { + if m != nil { + return m.MinimumRingSize + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*LoadBalancerSettings_ConsistentHashLB) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*LoadBalancerSettings_ConsistentHashLB_HttpHeaderName)(nil), + (*LoadBalancerSettings_ConsistentHashLB_HttpCookie)(nil), + (*LoadBalancerSettings_ConsistentHashLB_UseSourceIp)(nil), + } +} + +// Describes a HTTP cookie that will be used as the hash key for the +// Consistent Hash load balancer. If the cookie is not present, it will +// be generated. +type LoadBalancerSettings_ConsistentHashLB_HTTPCookie struct { + // Name of the cookie. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Path to set for the cookie. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Lifetime of the cookie. + Ttl *time.Duration `protobuf:"bytes,3,opt,name=ttl,proto3,stdduration" json:"ttl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) Reset() { + *m = LoadBalancerSettings_ConsistentHashLB_HTTPCookie{} +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) String() string { + return proto.CompactTextString(m) +} +func (*LoadBalancerSettings_ConsistentHashLB_HTTPCookie) ProtoMessage() {} +func (*LoadBalancerSettings_ConsistentHashLB_HTTPCookie) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{3, 0, 0} +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB_HTTPCookie.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB_HTTPCookie.Merge(m, src) +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB_HTTPCookie.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalancerSettings_ConsistentHashLB_HTTPCookie proto.InternalMessageInfo + +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) GetTtl() *time.Duration { + if m != nil { + return m.Ttl + } + return nil +} + +// Connection pool settings for an upstream host. The settings apply to +// each individual host in the upstream service. See Envoy's [circuit +// breaker](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking) +// for more details. Connection pool settings can be applied at the TCP +// level as well as at HTTP level. +// +// For example, the following rule sets a limit of 100 connections to redis +// service called myredissrv with a connect timeout of 30ms +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-redis +// spec: +// host: myredissrv.prod.svc.cluster.local +// trafficPolicy: +// connectionPool: +// tcp: +// maxConnections: 100 +// connectTimeout: 30ms +// tcpKeepalive: +// time: 7200s +// interval: 75s +// ``` +type ConnectionPoolSettings struct { + // Settings common to both HTTP and TCP upstream connections. + Tcp *ConnectionPoolSettings_TCPSettings `protobuf:"bytes,1,opt,name=tcp,proto3" json:"tcp,omitempty"` + // HTTP connection pool settings. + Http *ConnectionPoolSettings_HTTPSettings `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionPoolSettings) Reset() { *m = ConnectionPoolSettings{} } +func (m *ConnectionPoolSettings) String() string { return proto.CompactTextString(m) } +func (*ConnectionPoolSettings) ProtoMessage() {} +func (*ConnectionPoolSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{4} +} +func (m *ConnectionPoolSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionPoolSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionPoolSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionPoolSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionPoolSettings.Merge(m, src) +} +func (m *ConnectionPoolSettings) XXX_Size() int { + return m.Size() +} +func (m *ConnectionPoolSettings) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionPoolSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionPoolSettings proto.InternalMessageInfo + +func (m *ConnectionPoolSettings) GetTcp() *ConnectionPoolSettings_TCPSettings { + if m != nil { + return m.Tcp + } + return nil +} + +func (m *ConnectionPoolSettings) GetHttp() *ConnectionPoolSettings_HTTPSettings { + if m != nil { + return m.Http + } + return nil +} + +// Settings common to both HTTP and TCP upstream connections. +type ConnectionPoolSettings_TCPSettings struct { + // Maximum number of HTTP1 /TCP connections to a destination host. Default 2^32-1. + MaxConnections int32 `protobuf:"varint,1,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"` + // TCP connection timeout. + ConnectTimeout *types.Duration `protobuf:"bytes,2,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"` + // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + TcpKeepalive *ConnectionPoolSettings_TCPSettings_TcpKeepalive `protobuf:"bytes,3,opt,name=tcp_keepalive,json=tcpKeepalive,proto3" json:"tcp_keepalive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionPoolSettings_TCPSettings) Reset() { *m = ConnectionPoolSettings_TCPSettings{} } +func (m *ConnectionPoolSettings_TCPSettings) String() string { return proto.CompactTextString(m) } +func (*ConnectionPoolSettings_TCPSettings) ProtoMessage() {} +func (*ConnectionPoolSettings_TCPSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{4, 0} +} +func (m *ConnectionPoolSettings_TCPSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionPoolSettings_TCPSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionPoolSettings_TCPSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionPoolSettings_TCPSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionPoolSettings_TCPSettings.Merge(m, src) +} +func (m *ConnectionPoolSettings_TCPSettings) XXX_Size() int { + return m.Size() +} +func (m *ConnectionPoolSettings_TCPSettings) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionPoolSettings_TCPSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionPoolSettings_TCPSettings proto.InternalMessageInfo + +func (m *ConnectionPoolSettings_TCPSettings) GetMaxConnections() int32 { + if m != nil { + return m.MaxConnections + } + return 0 +} + +func (m *ConnectionPoolSettings_TCPSettings) GetConnectTimeout() *types.Duration { + if m != nil { + return m.ConnectTimeout + } + return nil +} + +func (m *ConnectionPoolSettings_TCPSettings) GetTcpKeepalive() *ConnectionPoolSettings_TCPSettings_TcpKeepalive { + if m != nil { + return m.TcpKeepalive + } + return nil +} + +// TCP keepalive. +type ConnectionPoolSettings_TCPSettings_TcpKeepalive struct { + // Maximum number of keepalive probes to send without response before + // deciding the connection is dead. Default is to use the OS level configuration + // (unless overridden, Linux defaults to 9.) + Probes uint32 `protobuf:"varint,1,opt,name=probes,proto3" json:"probes,omitempty"` + // The time duration a connection needs to be idle before keep-alive + // probes start being sent. Default is to use the OS level configuration + // (unless overridden, Linux defaults to 7200s (ie 2 hours.) + Time *types.Duration `protobuf:"bytes,2,opt,name=time,proto3" json:"time,omitempty"` + // The time duration between keep-alive probes. + // Default is to use the OS level configuration + // (unless overridden, Linux defaults to 75s.) + Interval *types.Duration `protobuf:"bytes,3,opt,name=interval,proto3" json:"interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) Reset() { + *m = ConnectionPoolSettings_TCPSettings_TcpKeepalive{} +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) String() string { + return proto.CompactTextString(m) +} +func (*ConnectionPoolSettings_TCPSettings_TcpKeepalive) ProtoMessage() {} +func (*ConnectionPoolSettings_TCPSettings_TcpKeepalive) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{4, 0, 0} +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionPoolSettings_TCPSettings_TcpKeepalive.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionPoolSettings_TCPSettings_TcpKeepalive.Merge(m, src) +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) XXX_Size() int { + return m.Size() +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionPoolSettings_TCPSettings_TcpKeepalive.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionPoolSettings_TCPSettings_TcpKeepalive proto.InternalMessageInfo + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) GetProbes() uint32 { + if m != nil { + return m.Probes + } + return 0 +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) GetTime() *types.Duration { + if m != nil { + return m.Time + } + return nil +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) GetInterval() *types.Duration { + if m != nil { + return m.Interval + } + return nil +} + +// Settings applicable to HTTP1.1/HTTP2/GRPC connections. +type ConnectionPoolSettings_HTTPSettings struct { + // Maximum number of pending HTTP requests to a destination. Default 2^32-1. + Http1MaxPendingRequests int32 `protobuf:"varint,1,opt,name=http1_max_pending_requests,json=http1MaxPendingRequests,proto3" json:"http1_max_pending_requests,omitempty"` + // Maximum number of requests to a backend. Default 2^32-1. + Http2MaxRequests int32 `protobuf:"varint,2,opt,name=http2_max_requests,json=http2MaxRequests,proto3" json:"http2_max_requests,omitempty"` + // Maximum number of requests per connection to a backend. Setting this + // parameter to 1 disables keep alive. Default 0, meaning "unlimited", + // up to 2^29. + MaxRequestsPerConnection int32 `protobuf:"varint,3,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"` + // Maximum number of retries that can be outstanding to all hosts in a + // cluster at a given time. Defaults to 2^32-1. + MaxRetries int32 `protobuf:"varint,4,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` + // The idle timeout for upstream connection pool connections. The idle timeout is defined as the period in which there are no active requests. + // If not set, there is no idle timeout. When the idle timeout is reached the connection will be closed. + // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. Applies to both HTTP1.1 and HTTP2 connections. + IdleTimeout *types.Duration `protobuf:"bytes,5,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"` + // Specify if http1.1 connection should be upgraded to http2 for the associated destination. + H2UpgradePolicy ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy `protobuf:"varint,6,opt,name=h2_upgrade_policy,json=h2UpgradePolicy,proto3,enum=istio.networking.v1alpha3.ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy" json:"h2_upgrade_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionPoolSettings_HTTPSettings) Reset() { *m = ConnectionPoolSettings_HTTPSettings{} } +func (m *ConnectionPoolSettings_HTTPSettings) String() string { return proto.CompactTextString(m) } +func (*ConnectionPoolSettings_HTTPSettings) ProtoMessage() {} +func (*ConnectionPoolSettings_HTTPSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{4, 1} +} +func (m *ConnectionPoolSettings_HTTPSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionPoolSettings_HTTPSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionPoolSettings_HTTPSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionPoolSettings_HTTPSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionPoolSettings_HTTPSettings.Merge(m, src) +} +func (m *ConnectionPoolSettings_HTTPSettings) XXX_Size() int { + return m.Size() +} +func (m *ConnectionPoolSettings_HTTPSettings) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionPoolSettings_HTTPSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionPoolSettings_HTTPSettings proto.InternalMessageInfo + +func (m *ConnectionPoolSettings_HTTPSettings) GetHttp1MaxPendingRequests() int32 { + if m != nil { + return m.Http1MaxPendingRequests + } + return 0 +} + +func (m *ConnectionPoolSettings_HTTPSettings) GetHttp2MaxRequests() int32 { + if m != nil { + return m.Http2MaxRequests + } + return 0 +} + +func (m *ConnectionPoolSettings_HTTPSettings) GetMaxRequestsPerConnection() int32 { + if m != nil { + return m.MaxRequestsPerConnection + } + return 0 +} + +func (m *ConnectionPoolSettings_HTTPSettings) GetMaxRetries() int32 { + if m != nil { + return m.MaxRetries + } + return 0 +} + +func (m *ConnectionPoolSettings_HTTPSettings) GetIdleTimeout() *types.Duration { + if m != nil { + return m.IdleTimeout + } + return nil +} + +func (m *ConnectionPoolSettings_HTTPSettings) GetH2UpgradePolicy() ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy { + if m != nil { + return m.H2UpgradePolicy + } + return ConnectionPoolSettings_HTTPSettings_DEFAULT +} + +// A Circuit breaker implementation that tracks the status of each +// individual host in the upstream service. Applicable to both HTTP and +// TCP services. For HTTP services, hosts that continually return 5xx +// errors for API calls are ejected from the pool for a pre-defined period +// of time. For TCP services, connection timeouts or connection +// failures to a given host counts as an error when measuring the +// consecutive errors metric. See Envoy's [outlier +// detection](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/outlier) +// for more details. +// +// The following rule sets a connection pool size of 100 HTTP1 connections +// with no more than 10 req/connection to the "reviews" service. In addition, +// it sets a limit of 1000 concurrent HTTP2 requests and configures upstream +// hosts to be scanned every 5 mins so that any host that fails 7 consecutive +// times with a 502, 503, or 504 error code will be ejected for 15 minutes. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-cb-policy +// spec: +// host: reviews.prod.svc.cluster.local +// trafficPolicy: +// connectionPool: +// tcp: +// maxConnections: 100 +// http: +// http2MaxRequests: 1000 +// maxRequestsPerConnection: 10 +// outlierDetection: +// consecutiveErrors: 7 +// interval: 5m +// baseEjectionTime: 15m +// ``` +type OutlierDetection struct { + // Number of errors before a host is ejected from the connection + // pool. Defaults to 5. When the upstream host is accessed over HTTP, a + // 502, 503, or 504 return code qualifies as an error. When the upstream host + // is accessed over an opaque TCP connection, connect timeouts and + // connection error/failure events qualify as an error. + ConsecutiveErrors int32 `protobuf:"varint,1,opt,name=consecutive_errors,json=consecutiveErrors,proto3" json:"consecutive_errors,omitempty"` + // Time interval between ejection sweep analysis. format: + // 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s. + Interval *types.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"` + // Minimum ejection duration. A host will remain ejected for a period + // equal to the product of minimum ejection duration and the number of + // times the host has been ejected. This technique allows the system to + // automatically increase the ejection period for unhealthy upstream + // servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 30s. + BaseEjectionTime *types.Duration `protobuf:"bytes,3,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"` + // Maximum % of hosts in the load balancing pool for the upstream + // service that can be ejected. Defaults to 10%. + MaxEjectionPercent int32 `protobuf:"varint,4,opt,name=max_ejection_percent,json=maxEjectionPercent,proto3" json:"max_ejection_percent,omitempty"` + // Outlier detection will be enabled as long as the associated load balancing + // pool has at least min_health_percent hosts in healthy mode. When the + // percentage of healthy hosts in the load balancing pool drops below this + // threshold, outlier detection will be disabled and the proxy will load balance + // across all hosts in the pool (healthy and unhealthy). The threshold can be + // disabled by setting it to 0%. The default is 0% as it's not typically + // applicable in k8s environments with few pods per service. + MinHealthPercent int32 `protobuf:"varint,5,opt,name=min_health_percent,json=minHealthPercent,proto3" json:"min_health_percent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OutlierDetection) Reset() { *m = OutlierDetection{} } +func (m *OutlierDetection) String() string { return proto.CompactTextString(m) } +func (*OutlierDetection) ProtoMessage() {} +func (*OutlierDetection) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{5} +} +func (m *OutlierDetection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OutlierDetection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OutlierDetection.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OutlierDetection) XXX_Merge(src proto.Message) { + xxx_messageInfo_OutlierDetection.Merge(m, src) +} +func (m *OutlierDetection) XXX_Size() int { + return m.Size() +} +func (m *OutlierDetection) XXX_DiscardUnknown() { + xxx_messageInfo_OutlierDetection.DiscardUnknown(m) +} + +var xxx_messageInfo_OutlierDetection proto.InternalMessageInfo + +func (m *OutlierDetection) GetConsecutiveErrors() int32 { + if m != nil { + return m.ConsecutiveErrors + } + return 0 +} + +func (m *OutlierDetection) GetInterval() *types.Duration { + if m != nil { + return m.Interval + } + return nil +} + +func (m *OutlierDetection) GetBaseEjectionTime() *types.Duration { + if m != nil { + return m.BaseEjectionTime + } + return nil +} + +func (m *OutlierDetection) GetMaxEjectionPercent() int32 { + if m != nil { + return m.MaxEjectionPercent + } + return 0 +} + +func (m *OutlierDetection) GetMinHealthPercent() int32 { + if m != nil { + return m.MinHealthPercent + } + return 0 +} + +// SSL/TLS related settings for upstream connections. See Envoy's [TLS +// context](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/cert.proto.html) +// for more details. These settings are common to both HTTP and TCP upstreams. +// +// For example, the following rule configures a client to use mutual TLS +// for connections to upstream database cluster. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: db-mtls +// spec: +// host: mydbserver.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// ``` +// +// The following rule configures a client to use TLS when talking to a +// foreign service whose domain matches *.foo.com. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: tls-foo +// spec: +// host: "*.foo.com" +// trafficPolicy: +// tls: +// mode: SIMPLE +// ``` +// +// The following rule configures a client to use Istio mutual TLS when talking +// to rating services. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: ratings-istio-mtls +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: ISTIO_MUTUAL +// ``` +type TLSSettings struct { + // Indicates whether connections to this port should be secured + // using TLS. The value of this field determines how TLS is enforced. + Mode TLSSettings_TLSmode `protobuf:"varint,1,opt,name=mode,proto3,enum=istio.networking.v1alpha3.TLSSettings_TLSmode" json:"mode,omitempty"` + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client-side TLS certificate to use. + // Should be empty if mode is `ISTIO_MUTUAL`. + ClientCertificate string `protobuf:"bytes,2,opt,name=client_certificate,json=clientCertificate,proto3" json:"client_certificate,omitempty"` + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client's private key. + // Should be empty if mode is `ISTIO_MUTUAL`. + PrivateKey string `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // OPTIONAL: The path to the file containing certificate authority + // certificates to use in verifying a presented server certificate. If + // omitted, the proxy will not verify the server's certificate. + // Should be empty if mode is `ISTIO_MUTUAL`. + CaCertificates string `protobuf:"bytes,4,opt,name=ca_certificates,json=caCertificates,proto3" json:"ca_certificates,omitempty"` + // A list of alternate names to verify the subject identity in the + // certificate. If specified, the proxy will verify that the server + // certificate's subject alt name matches one of the specified values. + // If specified, this list overrides the value of subject_alt_names + // from the ServiceEntry. + SubjectAltNames []string `protobuf:"bytes,5,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"` + // SNI string to present to the server during TLS handshake. + Sni string `protobuf:"bytes,6,opt,name=sni,proto3" json:"sni,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TLSSettings) Reset() { *m = TLSSettings{} } +func (m *TLSSettings) String() string { return proto.CompactTextString(m) } +func (*TLSSettings) ProtoMessage() {} +func (*TLSSettings) Descriptor() ([]byte, []int) { + return fileDescriptor_12899beb695152c8, []int{6} +} +func (m *TLSSettings) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TLSSettings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TLSSettings.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TLSSettings) XXX_Merge(src proto.Message) { + xxx_messageInfo_TLSSettings.Merge(m, src) +} +func (m *TLSSettings) XXX_Size() int { + return m.Size() +} +func (m *TLSSettings) XXX_DiscardUnknown() { + xxx_messageInfo_TLSSettings.DiscardUnknown(m) +} + +var xxx_messageInfo_TLSSettings proto.InternalMessageInfo + +func (m *TLSSettings) GetMode() TLSSettings_TLSmode { + if m != nil { + return m.Mode + } + return TLSSettings_DISABLE +} + +func (m *TLSSettings) GetClientCertificate() string { + if m != nil { + return m.ClientCertificate + } + return "" +} + +func (m *TLSSettings) GetPrivateKey() string { + if m != nil { + return m.PrivateKey + } + return "" +} + +func (m *TLSSettings) GetCaCertificates() string { + if m != nil { + return m.CaCertificates + } + return "" +} + +func (m *TLSSettings) GetSubjectAltNames() []string { + if m != nil { + return m.SubjectAltNames + } + return nil +} + +func (m *TLSSettings) GetSni() string { + if m != nil { + return m.Sni + } + return "" +} + +func init() { + proto.RegisterEnum("istio.networking.v1alpha3.LoadBalancerSettings_SimpleLB", LoadBalancerSettings_SimpleLB_name, LoadBalancerSettings_SimpleLB_value) + proto.RegisterEnum("istio.networking.v1alpha3.ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy", ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy_name, ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy_value) + proto.RegisterEnum("istio.networking.v1alpha3.TLSSettings_TLSmode", TLSSettings_TLSmode_name, TLSSettings_TLSmode_value) + proto.RegisterType((*DestinationRule)(nil), "istio.networking.v1alpha3.DestinationRule") + proto.RegisterType((*TrafficPolicy)(nil), "istio.networking.v1alpha3.TrafficPolicy") + proto.RegisterType((*TrafficPolicy_PortTrafficPolicy)(nil), "istio.networking.v1alpha3.TrafficPolicy.PortTrafficPolicy") + proto.RegisterType((*Subset)(nil), "istio.networking.v1alpha3.Subset") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.Subset.LabelsEntry") + proto.RegisterType((*LoadBalancerSettings)(nil), "istio.networking.v1alpha3.LoadBalancerSettings") + proto.RegisterType((*LoadBalancerSettings_ConsistentHashLB)(nil), "istio.networking.v1alpha3.LoadBalancerSettings.ConsistentHashLB") + proto.RegisterType((*LoadBalancerSettings_ConsistentHashLB_HTTPCookie)(nil), "istio.networking.v1alpha3.LoadBalancerSettings.ConsistentHashLB.HTTPCookie") + proto.RegisterType((*ConnectionPoolSettings)(nil), "istio.networking.v1alpha3.ConnectionPoolSettings") + proto.RegisterType((*ConnectionPoolSettings_TCPSettings)(nil), "istio.networking.v1alpha3.ConnectionPoolSettings.TCPSettings") + proto.RegisterType((*ConnectionPoolSettings_TCPSettings_TcpKeepalive)(nil), "istio.networking.v1alpha3.ConnectionPoolSettings.TCPSettings.TcpKeepalive") + proto.RegisterType((*ConnectionPoolSettings_HTTPSettings)(nil), "istio.networking.v1alpha3.ConnectionPoolSettings.HTTPSettings") + proto.RegisterType((*OutlierDetection)(nil), "istio.networking.v1alpha3.OutlierDetection") + proto.RegisterType((*TLSSettings)(nil), "istio.networking.v1alpha3.TLSSettings") +} + +func init() { + proto.RegisterFile("networking/v1alpha3/destination_rule.proto", fileDescriptor_12899beb695152c8) +} + +var fileDescriptor_12899beb695152c8 = []byte{ + // 1514 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6e, 0xdb, 0xc6, + 0x16, 0xb6, 0x7e, 0x6d, 0x1d, 0xd9, 0x12, 0x3d, 0xd7, 0x48, 0x14, 0x05, 0x70, 0x7c, 0x85, 0x8b, + 0x1b, 0xdf, 0xdc, 0x86, 0x6e, 0x94, 0x16, 0x48, 0x93, 0xa6, 0x8d, 0x64, 0xa9, 0x96, 0x1b, 0x59, + 0x12, 0x46, 0x32, 0x50, 0x64, 0x43, 0x8c, 0xa8, 0xb1, 0x34, 0x31, 0xc5, 0x61, 0xc9, 0xa1, 0x6a, + 0xe7, 0x19, 0xba, 0xe9, 0xa6, 0xe8, 0xb6, 0x2f, 0xd0, 0xe7, 0xe8, 0xae, 0x5d, 0xb4, 0x9b, 0x6e, + 0x1a, 0xe4, 0x19, 0xba, 0x2a, 0x50, 0xa0, 0x98, 0x21, 0x29, 0xc9, 0x8e, 0x63, 0xc7, 0x70, 0xb3, + 0x9b, 0x39, 0xe7, 0x3b, 0xdf, 0xfc, 0x9c, 0x8f, 0xe7, 0x0c, 0xe1, 0x8e, 0x4d, 0xc5, 0x57, 0xdc, + 0x3d, 0x64, 0xf6, 0x70, 0x6b, 0x72, 0x8f, 0x58, 0xce, 0x88, 0xdc, 0xdf, 0x1a, 0x50, 0x4f, 0x30, + 0x9b, 0x08, 0xc6, 0x6d, 0xc3, 0xf5, 0x2d, 0xaa, 0x3b, 0x2e, 0x17, 0x1c, 0xdd, 0x60, 0x9e, 0x60, + 0x5c, 0x9f, 0x45, 0xe8, 0x51, 0x44, 0xf1, 0xd6, 0x90, 0xf3, 0xa1, 0x45, 0xb7, 0x88, 0xc3, 0xb6, + 0x0e, 0x18, 0xb5, 0x06, 0x46, 0x9f, 0x8e, 0xc8, 0x84, 0x71, 0x37, 0x88, 0x2d, 0xae, 0x87, 0x00, + 0x35, 0xeb, 0xfb, 0x07, 0x5b, 0x03, 0xdf, 0x55, 0x0b, 0x84, 0xfe, 0xff, 0x9d, 0xb5, 0x8f, 0x09, + 0x73, 0x85, 0x4f, 0x2c, 0xc3, 0xa3, 0xee, 0x84, 0x99, 0xe1, 0x36, 0x8a, 0x6b, 0x43, 0x3e, 0xe4, + 0x6a, 0xb8, 0x25, 0x47, 0x81, 0xb5, 0xf4, 0x6b, 0x0c, 0xf2, 0xb5, 0xd9, 0xbe, 0xb1, 0x6f, 0x51, + 0x74, 0x1d, 0x92, 0x23, 0xee, 0x89, 0x42, 0x6c, 0x23, 0xb6, 0x99, 0xa9, 0x26, 0x5e, 0x56, 0xe2, + 0x58, 0x19, 0x50, 0x1b, 0x72, 0xc2, 0x25, 0x07, 0x07, 0xcc, 0x34, 0x1c, 0x6e, 0x31, 0xf3, 0xb8, + 0x10, 0xdf, 0x88, 0x6d, 0x66, 0xcb, 0x9b, 0xfa, 0x1b, 0x8f, 0xa8, 0xf7, 0x82, 0x80, 0x8e, 0xc2, + 0xe3, 0x15, 0x31, 0x3f, 0x45, 0x8f, 0x60, 0xd1, 0xf3, 0xfb, 0x1e, 0x15, 0x5e, 0x21, 0xb1, 0x91, + 0xd8, 0xcc, 0x96, 0xff, 0x7d, 0x0e, 0x53, 0x57, 0x21, 0x71, 0x14, 0x81, 0x6e, 0x42, 0x86, 0x1e, + 0x39, 0xdc, 0x15, 0x86, 0xe0, 0x85, 0xe4, 0x46, 0x62, 0x33, 0x83, 0x97, 0x02, 0x43, 0x8f, 0x97, + 0x7e, 0x4b, 0xc3, 0xca, 0x89, 0xa5, 0x51, 0x0f, 0x56, 0x2c, 0x4e, 0x06, 0x46, 0x9f, 0x58, 0xc4, + 0x36, 0xa9, 0xab, 0x8e, 0x97, 0x2d, 0x6f, 0x9d, 0xb3, 0x62, 0x93, 0x93, 0x41, 0x35, 0x84, 0x77, + 0xa9, 0x10, 0xcc, 0x1e, 0x7a, 0x78, 0xd9, 0x9a, 0xb3, 0xa2, 0x67, 0x90, 0x37, 0xb9, 0x6d, 0x53, + 0x53, 0x65, 0xdd, 0xe1, 0xdc, 0x0a, 0xef, 0xe4, 0xde, 0x39, 0xbc, 0xdb, 0xd3, 0x88, 0x0e, 0xe7, + 0xd6, 0x94, 0x39, 0x67, 0x9e, 0xb0, 0xa3, 0x2f, 0x60, 0x95, 0xfb, 0xc2, 0x62, 0xd4, 0x35, 0x06, + 0x54, 0x04, 0x8e, 0x42, 0x42, 0xb1, 0xff, 0xff, 0x1c, 0xf6, 0x76, 0x10, 0x53, 0x8b, 0x42, 0xb0, + 0xc6, 0x4f, 0x59, 0xd0, 0x03, 0x48, 0x08, 0xcb, 0x2b, 0x24, 0x15, 0xd7, 0x7f, 0xcf, 0xcb, 0x5e, + 0xb3, 0x3b, 0xdd, 0x9e, 0x0c, 0x41, 0xcf, 0xe1, 0x5f, 0xea, 0xca, 0x2d, 0x3a, 0xa1, 0x52, 0x61, + 0x81, 0xaf, 0x90, 0x52, 0xd9, 0x7b, 0xf8, 0xb6, 0x3a, 0xd0, 0x3b, 0x32, 0x4b, 0x27, 0x94, 0xb1, + 0x2a, 0x69, 0x9b, 0x92, 0x35, 0x5a, 0xb0, 0xf8, 0x6d, 0x02, 0x56, 0x5f, 0x03, 0xa2, 0x47, 0x90, + 0x94, 0xd0, 0x30, 0x7d, 0xb7, 0xcf, 0x59, 0x52, 0xc6, 0x76, 0xa9, 0x45, 0x4d, 0xc1, 0x5d, 0xac, + 0x82, 0x5e, 0x17, 0x41, 0xfc, 0x1d, 0x89, 0x20, 0xf1, 0x4e, 0x45, 0x90, 0xfc, 0x07, 0x45, 0x90, + 0xba, 0xb4, 0x08, 0x4a, 0x7f, 0xc4, 0x20, 0x1d, 0x7c, 0x8d, 0xb2, 0x56, 0xd8, 0x64, 0x4c, 0x4f, + 0xd4, 0x0a, 0x69, 0x40, 0x75, 0x48, 0x5b, 0xa4, 0x4f, 0x2d, 0xaf, 0x10, 0x57, 0xda, 0xb8, 0x7b, + 0xe1, 0x97, 0xad, 0x37, 0x15, 0xbe, 0x6e, 0x0b, 0xf7, 0x18, 0x87, 0xc1, 0x67, 0x94, 0x9c, 0xc4, + 0x95, 0x4a, 0x4e, 0xf1, 0x23, 0xc8, 0xce, 0xad, 0x83, 0x34, 0x48, 0x1c, 0xd2, 0xe3, 0x60, 0xfb, + 0x58, 0x0e, 0xd1, 0x1a, 0xa4, 0x26, 0xc4, 0xf2, 0xa9, 0x92, 0x46, 0x06, 0x07, 0x93, 0x87, 0xf1, + 0x07, 0xb1, 0xd2, 0x0f, 0x29, 0x58, 0x3b, 0x4b, 0x0d, 0x08, 0x43, 0xda, 0x63, 0x63, 0xc7, 0x0a, + 0xae, 0x21, 0x57, 0x7e, 0x70, 0x49, 0x39, 0xe9, 0x5d, 0x15, 0xdd, 0xac, 0x36, 0x16, 0x70, 0xc8, + 0x84, 0x0e, 0x95, 0xa6, 0x3c, 0xe6, 0x09, 0x6a, 0x0b, 0x63, 0x44, 0xbc, 0x51, 0xa8, 0xd5, 0x27, + 0x97, 0x25, 0xdf, 0x9e, 0xd2, 0x34, 0x88, 0x37, 0x52, 0x8b, 0xe4, 0xcc, 0x13, 0xb6, 0xe2, 0x5f, + 0x71, 0xd0, 0x4e, 0xc3, 0xd0, 0x1d, 0xd0, 0x46, 0x42, 0x38, 0xc6, 0x88, 0x92, 0x01, 0x75, 0x8d, + 0x59, 0x9a, 0x25, 0x81, 0xf4, 0x34, 0x94, 0xa3, 0x25, 0xb3, 0x6d, 0x43, 0x56, 0x61, 0x4d, 0xce, + 0x0f, 0x19, 0x0d, 0x77, 0xfa, 0xf4, 0xaa, 0x3b, 0xd5, 0x1b, 0xbd, 0x5e, 0x67, 0x5b, 0x51, 0x36, + 0x16, 0x30, 0xc8, 0x15, 0x82, 0x19, 0xfa, 0x0f, 0xac, 0xf8, 0x1e, 0x35, 0x3c, 0xee, 0xbb, 0x26, + 0x35, 0x98, 0xa3, 0x54, 0xb1, 0xd4, 0x58, 0xc0, 0x59, 0xdf, 0xa3, 0x5d, 0x65, 0xdd, 0x75, 0xd0, + 0x1d, 0x58, 0x1d, 0x33, 0x9b, 0x8d, 0xfd, 0xb1, 0xe1, 0x32, 0x7b, 0x68, 0x78, 0xec, 0x05, 0x55, + 0xdf, 0x4e, 0x12, 0xe7, 0x43, 0x07, 0x66, 0xf6, 0xb0, 0xcb, 0x5e, 0xd0, 0x22, 0x07, 0x98, 0xad, + 0xf6, 0x66, 0x59, 0x23, 0x48, 0x3a, 0x44, 0x8c, 0x42, 0x71, 0xa8, 0x31, 0xfa, 0x00, 0x12, 0x42, + 0x44, 0x9f, 0xfc, 0x0d, 0x3d, 0x68, 0xd9, 0x7a, 0xd4, 0xb2, 0xf5, 0x5a, 0xd8, 0xb2, 0xab, 0x8b, + 0x2f, 0x2b, 0xf1, 0xef, 0x7e, 0xbf, 0x15, 0xc3, 0x12, 0x5e, 0x05, 0x58, 0x92, 0x59, 0x35, 0x0e, + 0xe9, 0x71, 0xa9, 0x01, 0x4b, 0x91, 0x04, 0x50, 0x1e, 0xb2, 0xb8, 0xbd, 0xdf, 0xaa, 0x19, 0xb8, + 0x5d, 0xdd, 0x6d, 0x69, 0x0b, 0x28, 0x07, 0xd0, 0xac, 0x57, 0xba, 0x3d, 0x63, 0xbb, 0xdd, 0x6a, + 0x69, 0x31, 0x04, 0x90, 0xc6, 0x95, 0x56, 0xad, 0xbd, 0xa7, 0xc5, 0x25, 0xb8, 0x53, 0xe9, 0x76, + 0x7b, 0x0d, 0xdc, 0xde, 0xdf, 0x69, 0x68, 0x89, 0x6a, 0x16, 0x32, 0x56, 0x3f, 0xfc, 0x54, 0x4a, + 0xdf, 0x2c, 0xc1, 0xb5, 0xb3, 0xcb, 0x0c, 0x6a, 0x43, 0x42, 0x98, 0x4e, 0x58, 0x44, 0x1f, 0x5f, + 0xba, 0x4c, 0xe9, 0xbd, 0xed, 0xce, 0x5c, 0x4d, 0x30, 0x1d, 0x84, 0x21, 0x29, 0xf3, 0x13, 0xa6, + 0xfe, 0x93, 0xcb, 0x33, 0xca, 0xdb, 0x9f, 0x52, 0x2a, 0xae, 0xe2, 0x9f, 0x71, 0xc8, 0xce, 0x2d, + 0x84, 0x6e, 0x43, 0x7e, 0x4c, 0x8e, 0x8c, 0x59, 0x85, 0xf4, 0xd4, 0x01, 0x52, 0x38, 0x37, 0x26, + 0x47, 0x33, 0x5a, 0x0f, 0x55, 0xa7, 0x05, 0xd9, 0x10, 0x6c, 0x4c, 0xb9, 0x2f, 0xc2, 0x7d, 0xbd, + 0x39, 0x3b, 0xd3, 0xc2, 0xdb, 0x0b, 0x02, 0x10, 0x87, 0x15, 0x61, 0x3a, 0xc6, 0x21, 0xa5, 0x0e, + 0xb1, 0xd8, 0x84, 0x86, 0xf9, 0xfd, 0xfc, 0x4a, 0x77, 0xa5, 0xf7, 0x4c, 0xe7, 0x69, 0xc4, 0x88, + 0x97, 0xc5, 0xdc, 0xac, 0xf8, 0x75, 0x0c, 0x96, 0xe7, 0xdd, 0xe8, 0x1a, 0xa4, 0x1d, 0x97, 0xf7, + 0x69, 0x70, 0xca, 0x15, 0x1c, 0xce, 0xd0, 0x5d, 0x48, 0xca, 0x53, 0x5d, 0x7c, 0x24, 0x05, 0x43, + 0x1f, 0xc2, 0x12, 0xb3, 0x05, 0x75, 0x27, 0xe4, 0x62, 0x8d, 0xe2, 0x29, 0xb4, 0xf8, 0x4b, 0x02, + 0x96, 0xe7, 0x73, 0x82, 0x1e, 0x41, 0x51, 0x66, 0xe5, 0x9e, 0x21, 0x73, 0xe0, 0x50, 0x7b, 0x20, + 0x3f, 0x29, 0x97, 0x7e, 0xe9, 0x53, 0x4f, 0x44, 0x89, 0xb8, 0xae, 0x10, 0x7b, 0xe4, 0xa8, 0x13, + 0xf8, 0x71, 0xe8, 0x46, 0xef, 0x01, 0x92, 0xae, 0xb2, 0x0a, 0x9e, 0x06, 0xc5, 0x55, 0x90, 0x2a, + 0x33, 0xe5, 0x3d, 0x72, 0x34, 0x45, 0x3f, 0x86, 0x9b, 0xf3, 0x38, 0xc3, 0xa1, 0xee, 0x5c, 0xd6, + 0xd5, 0x29, 0x52, 0xb8, 0x30, 0x9e, 0x45, 0x74, 0xa8, 0x3b, 0xbb, 0x7c, 0x74, 0x0b, 0xb2, 0x41, + 0xb8, 0x70, 0x19, 0x0d, 0x9e, 0x39, 0x29, 0x0c, 0x0a, 0xae, 0x2c, 0xe8, 0x63, 0x58, 0x66, 0x03, + 0x8b, 0x4e, 0xc5, 0x91, 0xba, 0xe8, 0x5a, 0xb2, 0x12, 0x1e, 0x29, 0xe3, 0x05, 0xac, 0x8e, 0xca, + 0x86, 0xef, 0x0c, 0x5d, 0x32, 0xa0, 0x51, 0x5b, 0x4a, 0xab, 0xca, 0xdf, 0xba, 0x9a, 0xee, 0xf5, + 0x46, 0x79, 0x3f, 0xa0, 0x0d, 0x9b, 0x57, 0x7e, 0x74, 0xd2, 0x50, 0xfa, 0x14, 0xf2, 0xa7, 0x30, + 0x28, 0x0b, 0x8b, 0xb5, 0xfa, 0x67, 0x95, 0xfd, 0x66, 0x4f, 0x5b, 0x40, 0x08, 0x72, 0xb5, 0xb6, + 0xd1, 0x6a, 0xf7, 0x8c, 0xfd, 0xce, 0x0e, 0xae, 0xd4, 0xea, 0x5a, 0x4c, 0x02, 0xa2, 0x49, 0xbc, + 0xf4, 0x7d, 0x1c, 0xb4, 0xd3, 0x8f, 0x03, 0x74, 0x17, 0x90, 0xec, 0x08, 0xd4, 0xf4, 0x05, 0x9b, + 0x50, 0x83, 0xba, 0x2e, 0x77, 0xa3, 0x94, 0xae, 0xce, 0x79, 0xea, 0xca, 0x71, 0x42, 0x51, 0xf1, + 0xb7, 0x56, 0x14, 0xda, 0x01, 0xd4, 0x27, 0x1e, 0x35, 0xe8, 0xf3, 0xf0, 0xa5, 0xa4, 0x54, 0x7c, + 0xa1, 0x24, 0x35, 0x19, 0x54, 0x0f, 0x63, 0x64, 0x0e, 0xd0, 0xfb, 0xb0, 0x26, 0xf3, 0x3b, 0xe5, + 0x71, 0xa8, 0x6b, 0x52, 0x5b, 0x84, 0x89, 0x46, 0x63, 0x72, 0x14, 0xc1, 0x3b, 0x81, 0x47, 0xca, + 0x6f, 0xcc, 0x6c, 0xd9, 0xca, 0x2c, 0x31, 0x9a, 0xe2, 0x53, 0x81, 0xfc, 0xc6, 0xcc, 0x6e, 0x28, + 0x47, 0x88, 0x2e, 0xfd, 0x24, 0xeb, 0xce, 0xec, 0xd1, 0x83, 0x76, 0x20, 0x39, 0xe6, 0x83, 0xa8, + 0xbb, 0xeb, 0x6f, 0xf7, 0x54, 0x92, 0x63, 0x19, 0x15, 0x76, 0x0f, 0x39, 0x54, 0xf7, 0x6c, 0x31, + 0xd9, 0xd0, 0x4d, 0xea, 0x0a, 0x76, 0xc0, 0x4c, 0x22, 0xa2, 0x87, 0xc6, 0x6a, 0xe0, 0xd9, 0x9e, + 0x39, 0xa4, 0x8e, 0x1d, 0x97, 0x4d, 0x88, 0xa0, 0xb2, 0x4b, 0xa8, 0x9b, 0xca, 0x60, 0x08, 0x4d, + 0x4f, 0xe9, 0xb1, 0x2c, 0x88, 0x26, 0x99, 0xe7, 0x0a, 0xc4, 0x9e, 0xc1, 0x39, 0x93, 0xcc, 0x11, + 0x79, 0xb2, 0x13, 0x7a, 0x7e, 0x5f, 0x5e, 0x8a, 0x41, 0x2c, 0xa1, 0x7a, 0x79, 0xf0, 0x68, 0xcf, + 0xe0, 0x7c, 0xe8, 0xa8, 0x58, 0x42, 0xb6, 0x72, 0x4f, 0x3e, 0x89, 0x3c, 0x9b, 0x29, 0x41, 0x67, + 0xb0, 0x1c, 0x96, 0x9e, 0xc0, 0x62, 0x78, 0x18, 0x25, 0xb6, 0xdd, 0x6e, 0xa5, 0xda, 0xac, 0x6b, + 0x0b, 0xb2, 0x13, 0x75, 0x77, 0xf7, 0x3a, 0xcd, 0x7a, 0xd0, 0x95, 0xf6, 0xf6, 0x7b, 0xfb, 0x95, + 0xa6, 0x16, 0x47, 0x1a, 0x2c, 0xef, 0x76, 0x7b, 0xbb, 0x6d, 0x23, 0xb4, 0x24, 0xaa, 0xfa, 0x8f, + 0xaf, 0xd6, 0x63, 0x3f, 0xbf, 0x5a, 0x8f, 0xbd, 0x7c, 0xb5, 0x1e, 0x7b, 0xb6, 0x11, 0x5c, 0x20, + 0xe3, 0xea, 0xc7, 0xf7, 0x8c, 0x5f, 0xd8, 0x7e, 0x5a, 0xc9, 0xe0, 0xfe, 0xdf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xae, 0x5d, 0x93, 0x66, 0x68, 0x0f, 0x00, 0x00, +} + +func (m *DestinationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DestinationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DestinationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExportTo) > 0 { + for iNdEx := len(m.ExportTo) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExportTo[iNdEx]) + copy(dAtA[i:], m.ExportTo[iNdEx]) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.ExportTo[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Subsets) > 0 { + for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TrafficPolicy != nil { + { + size, err := m.TrafficPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TrafficPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrafficPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TrafficPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PortLevelSettings) > 0 { + for iNdEx := len(m.PortLevelSettings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PortLevelSettings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Tls != nil { + { + size, err := m.Tls.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.OutlierDetection != nil { + { + size, err := m.OutlierDetection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ConnectionPool != nil { + { + size, err := m.ConnectionPool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.LoadBalancer != nil { + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TrafficPolicy_PortTrafficPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrafficPolicy_PortTrafficPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TrafficPolicy_PortTrafficPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Tls != nil { + { + size, err := m.Tls.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.OutlierDetection != nil { + { + size, err := m.OutlierDetection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConnectionPool != nil { + { + size, err := m.ConnectionPool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.LoadBalancer != nil { + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Subset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TrafficPolicy != nil { + { + size, err := m.TrafficPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintDestinationRule(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LoadBalancerSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.LbPolicy != nil { + { + size := m.LbPolicy.Size() + i -= size + if _, err := m.LbPolicy.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *LoadBalancerSettings_Simple) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *LoadBalancerSettings_Simple) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintDestinationRule(dAtA, i, uint64(m.Simple)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *LoadBalancerSettings_ConsistentHash) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *LoadBalancerSettings_ConsistentHash) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ConsistentHash != nil { + { + size, err := m.ConsistentHash.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *LoadBalancerSettings_ConsistentHashLB) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerSettings_ConsistentHashLB) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerSettings_ConsistentHashLB) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MinimumRingSize != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.MinimumRingSize)) + i-- + dAtA[i] = 0x20 + } + if m.HashKey != nil { + { + size := m.HashKey.Size() + i -= size + if _, err := m.HashKey.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HttpHeaderName) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HttpHeaderName) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.HttpHeaderName) + copy(dAtA[i:], m.HttpHeaderName) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.HttpHeaderName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *LoadBalancerSettings_ConsistentHashLB_HttpCookie) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HttpCookie) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HttpCookie != nil { + { + size, err := m.HttpCookie.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *LoadBalancerSettings_ConsistentHashLB_UseSourceIp) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *LoadBalancerSettings_ConsistentHashLB_UseSourceIp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.UseSourceIp { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Ttl != nil { + n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Ttl, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Ttl):]) + if err14 != nil { + return 0, err14 + } + i -= n14 + i = encodeVarintDestinationRule(dAtA, i, uint64(n14)) + i-- + dAtA[i] = 0x1a + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionPoolSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionPoolSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionPoolSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Http != nil { + { + size, err := m.Http.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Tcp != nil { + { + size, err := m.Tcp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionPoolSettings_TCPSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionPoolSettings_TCPSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionPoolSettings_TCPSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TcpKeepalive != nil { + { + size, err := m.TcpKeepalive.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ConnectTimeout != nil { + { + size, err := m.ConnectTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MaxConnections != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.MaxConnections)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Interval != nil { + { + size, err := m.Interval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Probes != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.Probes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ConnectionPoolSettings_HTTPSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionPoolSettings_HTTPSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionPoolSettings_HTTPSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.H2UpgradePolicy != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.H2UpgradePolicy)) + i-- + dAtA[i] = 0x30 + } + if m.IdleTimeout != nil { + { + size, err := m.IdleTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.MaxRetries != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.MaxRetries)) + i-- + dAtA[i] = 0x20 + } + if m.MaxRequestsPerConnection != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.MaxRequestsPerConnection)) + i-- + dAtA[i] = 0x18 + } + if m.Http2MaxRequests != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.Http2MaxRequests)) + i-- + dAtA[i] = 0x10 + } + if m.Http1MaxPendingRequests != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.Http1MaxPendingRequests)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutlierDetection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutlierDetection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OutlierDetection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MinHealthPercent != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.MinHealthPercent)) + i-- + dAtA[i] = 0x28 + } + if m.MaxEjectionPercent != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.MaxEjectionPercent)) + i-- + dAtA[i] = 0x20 + } + if m.BaseEjectionTime != nil { + { + size, err := m.BaseEjectionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Interval != nil { + { + size, err := m.Interval.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDestinationRule(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ConsecutiveErrors != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.ConsecutiveErrors)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TLSSettings) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSSettings) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSSettings) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Sni) > 0 { + i -= len(m.Sni) + copy(dAtA[i:], m.Sni) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.Sni))) + i-- + dAtA[i] = 0x32 + } + if len(m.SubjectAltNames) > 0 { + for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SubjectAltNames[iNdEx]) + copy(dAtA[i:], m.SubjectAltNames[iNdEx]) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.SubjectAltNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.CaCertificates) > 0 { + i -= len(m.CaCertificates) + copy(dAtA[i:], m.CaCertificates) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.CaCertificates))) + i-- + dAtA[i] = 0x22 + } + if len(m.PrivateKey) > 0 { + i -= len(m.PrivateKey) + copy(dAtA[i:], m.PrivateKey) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.PrivateKey))) + i-- + dAtA[i] = 0x1a + } + if len(m.ClientCertificate) > 0 { + i -= len(m.ClientCertificate) + copy(dAtA[i:], m.ClientCertificate) + i = encodeVarintDestinationRule(dAtA, i, uint64(len(m.ClientCertificate))) + i-- + dAtA[i] = 0x12 + } + if m.Mode != 0 { + i = encodeVarintDestinationRule(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDestinationRule(dAtA []byte, offset int, v uint64) int { + offset -= sovDestinationRule(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DestinationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.TrafficPolicy != nil { + l = m.TrafficPolicy.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + } + if len(m.ExportTo) > 0 { + for _, s := range m.ExportTo { + l = len(s) + n += 1 + l + sovDestinationRule(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TrafficPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadBalancer != nil { + l = m.LoadBalancer.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.ConnectionPool != nil { + l = m.ConnectionPool.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.Tls != nil { + l = m.Tls.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if len(m.PortLevelSettings) > 0 { + for _, e := range m.PortLevelSettings { + l = e.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TrafficPolicy_PortTrafficPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.LoadBalancer != nil { + l = m.LoadBalancer.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.ConnectionPool != nil { + l = m.ConnectionPool.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.OutlierDetection != nil { + l = m.OutlierDetection.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.Tls != nil { + l = m.Tls.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Subset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovDestinationRule(uint64(len(k))) + 1 + len(v) + sovDestinationRule(uint64(len(v))) + n += mapEntrySize + 1 + sovDestinationRule(uint64(mapEntrySize)) + } + } + if m.TrafficPolicy != nil { + l = m.TrafficPolicy.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LoadBalancerSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LbPolicy != nil { + n += m.LbPolicy.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LoadBalancerSettings_Simple) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovDestinationRule(uint64(m.Simple)) + return n +} +func (m *LoadBalancerSettings_ConsistentHash) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsistentHash != nil { + l = m.ConsistentHash.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + return n +} +func (m *LoadBalancerSettings_ConsistentHashLB) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HashKey != nil { + n += m.HashKey.Size() + } + if m.MinimumRingSize != 0 { + n += 1 + sovDestinationRule(uint64(m.MinimumRingSize)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LoadBalancerSettings_ConsistentHashLB_HttpHeaderName) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HttpHeaderName) + n += 1 + l + sovDestinationRule(uint64(l)) + return n +} +func (m *LoadBalancerSettings_ConsistentHashLB_HttpCookie) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpCookie != nil { + l = m.HttpCookie.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + return n +} +func (m *LoadBalancerSettings_ConsistentHashLB_UseSourceIp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.Ttl != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Ttl) + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConnectionPoolSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tcp != nil { + l = m.Tcp.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.Http != nil { + l = m.Http.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConnectionPoolSettings_TCPSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxConnections != 0 { + n += 1 + sovDestinationRule(uint64(m.MaxConnections)) + } + if m.ConnectTimeout != nil { + l = m.ConnectTimeout.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.TcpKeepalive != nil { + l = m.TcpKeepalive.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Probes != 0 { + n += 1 + sovDestinationRule(uint64(m.Probes)) + } + if m.Time != nil { + l = m.Time.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConnectionPoolSettings_HTTPSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Http1MaxPendingRequests != 0 { + n += 1 + sovDestinationRule(uint64(m.Http1MaxPendingRequests)) + } + if m.Http2MaxRequests != 0 { + n += 1 + sovDestinationRule(uint64(m.Http2MaxRequests)) + } + if m.MaxRequestsPerConnection != 0 { + n += 1 + sovDestinationRule(uint64(m.MaxRequestsPerConnection)) + } + if m.MaxRetries != 0 { + n += 1 + sovDestinationRule(uint64(m.MaxRetries)) + } + if m.IdleTimeout != nil { + l = m.IdleTimeout.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.H2UpgradePolicy != 0 { + n += 1 + sovDestinationRule(uint64(m.H2UpgradePolicy)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *OutlierDetection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsecutiveErrors != 0 { + n += 1 + sovDestinationRule(uint64(m.ConsecutiveErrors)) + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.BaseEjectionTime != nil { + l = m.BaseEjectionTime.Size() + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.MaxEjectionPercent != 0 { + n += 1 + sovDestinationRule(uint64(m.MaxEjectionPercent)) + } + if m.MinHealthPercent != 0 { + n += 1 + sovDestinationRule(uint64(m.MinHealthPercent)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSSettings) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovDestinationRule(uint64(m.Mode)) + } + l = len(m.ClientCertificate) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + l = len(m.PrivateKey) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + l = len(m.CaCertificates) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + if len(m.SubjectAltNames) > 0 { + for _, s := range m.SubjectAltNames { + l = len(s) + n += 1 + l + sovDestinationRule(uint64(l)) + } + } + l = len(m.Sni) + if l > 0 { + n += 1 + l + sovDestinationRule(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDestinationRule(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDestinationRule(x uint64) (n int) { + return sovDestinationRule(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DestinationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DestinationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DestinationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrafficPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TrafficPolicy == nil { + m.TrafficPolicy = &TrafficPolicy{} + } + if err := m.TrafficPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, &Subset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportTo = append(m.ExportTo, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrafficPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TrafficPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TrafficPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LoadBalancer == nil { + m.LoadBalancer = &LoadBalancerSettings{} + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionPool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionPool == nil { + m.ConnectionPool = &ConnectionPoolSettings{} + } + if err := m.ConnectionPool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutlierDetection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OutlierDetection == nil { + m.OutlierDetection = &OutlierDetection{} + } + if err := m.OutlierDetection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tls == nil { + m.Tls = &TLSSettings{} + } + if err := m.Tls.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortLevelSettings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortLevelSettings = append(m.PortLevelSettings, &TrafficPolicy_PortTrafficPolicy{}) + if err := m.PortLevelSettings[len(m.PortLevelSettings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrafficPolicy_PortTrafficPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortTrafficPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortTrafficPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &PortSelector{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LoadBalancer == nil { + m.LoadBalancer = &LoadBalancerSettings{} + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionPool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionPool == nil { + m.ConnectionPool = &ConnectionPoolSettings{} + } + if err := m.ConnectionPool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutlierDetection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OutlierDetection == nil { + m.OutlierDetection = &OutlierDetection{} + } + if err := m.OutlierDetection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tls == nil { + m.Tls = &TLSSettings{} + } + if err := m.Tls.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDestinationRule + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDestinationRule + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthDestinationRule + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthDestinationRule + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrafficPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TrafficPolicy == nil { + m.TrafficPolicy = &TrafficPolicy{} + } + if err := m.TrafficPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Simple", wireType) + } + var v LoadBalancerSettings_SimpleLB + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= LoadBalancerSettings_SimpleLB(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LbPolicy = &LoadBalancerSettings_Simple{v} + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsistentHash", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &LoadBalancerSettings_ConsistentHashLB{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.LbPolicy = &LoadBalancerSettings_ConsistentHash{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerSettings_ConsistentHashLB) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsistentHashLB: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsistentHashLB: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpHeaderName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HashKey = &LoadBalancerSettings_ConsistentHashLB_HttpHeaderName{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpCookie", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &LoadBalancerSettings_ConsistentHashLB_HTTPCookie{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.HashKey = &LoadBalancerSettings_ConsistentHashLB_HttpCookie{v} + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseSourceIp", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.HashKey = &LoadBalancerSettings_ConsistentHashLB_UseSourceIp{b} + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinimumRingSize", wireType) + } + m.MinimumRingSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinimumRingSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPCookie: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPCookie: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ttl", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ttl == nil { + m.Ttl = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Ttl, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionPoolSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionPoolSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionPoolSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tcp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tcp == nil { + m.Tcp = &ConnectionPoolSettings_TCPSettings{} + } + if err := m.Tcp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Http", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Http == nil { + m.Http = &ConnectionPoolSettings_HTTPSettings{} + } + if err := m.Http.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionPoolSettings_TCPSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxConnections", wireType) + } + m.MaxConnections = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxConnections |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectTimeout == nil { + m.ConnectTimeout = &types.Duration{} + } + if err := m.ConnectTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TcpKeepalive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TcpKeepalive == nil { + m.TcpKeepalive = &ConnectionPoolSettings_TCPSettings_TcpKeepalive{} + } + if err := m.TcpKeepalive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionPoolSettings_TCPSettings_TcpKeepalive) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TcpKeepalive: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TcpKeepalive: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Probes", wireType) + } + m.Probes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Probes |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Time == nil { + m.Time = &types.Duration{} + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &types.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionPoolSettings_HTTPSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Http1MaxPendingRequests", wireType) + } + m.Http1MaxPendingRequests = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Http1MaxPendingRequests |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Http2MaxRequests", wireType) + } + m.Http2MaxRequests = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Http2MaxRequests |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRequestsPerConnection", wireType) + } + m.MaxRequestsPerConnection = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRequestsPerConnection |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRetries", wireType) + } + m.MaxRetries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRetries |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdleTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IdleTimeout == nil { + m.IdleTimeout = &types.Duration{} + } + if err := m.IdleTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field H2UpgradePolicy", wireType) + } + m.H2UpgradePolicy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.H2UpgradePolicy |= ConnectionPoolSettings_HTTPSettings_H2UpgradePolicy(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OutlierDetection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OutlierDetection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OutlierDetection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsecutiveErrors", wireType) + } + m.ConsecutiveErrors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ConsecutiveErrors |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &types.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseEjectionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BaseEjectionTime == nil { + m.BaseEjectionTime = &types.Duration{} + } + if err := m.BaseEjectionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxEjectionPercent", wireType) + } + m.MaxEjectionPercent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxEjectionPercent |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinHealthPercent", wireType) + } + m.MinHealthPercent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinHealthPercent |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSSettings) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= TLSSettings_TLSmode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrivateKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrivateKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CaCertificates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CaCertificates = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubjectAltNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubjectAltNames = append(m.SubjectAltNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sni", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDestinationRule + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDestinationRule + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sni = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDestinationRule(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDestinationRule + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDestinationRule(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDestinationRule + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthDestinationRule + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDestinationRule + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDestinationRule(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthDestinationRule + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDestinationRule = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDestinationRule = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/destination_rule_deepcopy.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/destination_rule_deepcopy.gen.go new file mode 100644 index 0000000000..72456da067 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/destination_rule_deepcopy.gen.go @@ -0,0 +1,95 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/destination_rule.proto + +// `DestinationRule` defines policies that apply to traffic intended for a +// service after routing has occurred. These rules specify configuration +// for load balancing, connection pool size from the sidecar, and outlier +// detection settings to detect and evict unhealthy hosts from the load +// balancing pool. For example, a simple load balancing policy for the +// ratings service would look as follows: +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// ``` +// +// Version specific policies can be specified by defining a named +// `subset` and overriding the settings specified at the service level. The +// following rule uses a round robin load balancing policy for all traffic +// going to a subset named testversion that is composed of endpoints (e.g., +// pods) with labels (version:v3). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// ``` +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +// +// Traffic policies can be customized to specific ports as well. The +// following rule uses the least connection load balancing policy for all +// traffic to port 80, while uses a round robin load balancing setting for +// traffic to the port 9080. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings-port +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: # Apply to all ports +// portLevelSettings: +// - port: +// number: 80 +// loadBalancer: +// simple: LEAST_CONN +// - port: +// number: 9080 +// loadBalancer: +// simple: ROUND_ROBIN +// ``` + +package v1alpha3 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// DeepCopyInto supports using DestinationRule within kubernetes types, where deepcopy-gen is used. +func (in *DestinationRule) DeepCopyInto(out *DestinationRule) { + p := proto.Clone(in).(*DestinationRule) + *out = *p +} diff --git a/test/vendor/istio.io/api/networking/v1alpha3/destination_rule_json.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/destination_rule_json.gen.go new file mode 100644 index 0000000000..11b9807be2 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/destination_rule_json.gen.go @@ -0,0 +1,239 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/destination_rule.proto + +// `DestinationRule` defines policies that apply to traffic intended for a +// service after routing has occurred. These rules specify configuration +// for load balancing, connection pool size from the sidecar, and outlier +// detection settings to detect and evict unhealthy hosts from the load +// balancing pool. For example, a simple load balancing policy for the +// ratings service would look as follows: +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// ``` +// +// Version specific policies can be specified by defining a named +// `subset` and overriding the settings specified at the service level. The +// following rule uses a round robin load balancing policy for all traffic +// going to a subset named testversion that is composed of endpoints (e.g., +// pods) with labels (version:v3). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// ``` +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +// +// Traffic policies can be customized to specific ports as well. The +// following rule uses the least connection load balancing policy for all +// traffic to port 80, while uses a round robin load balancing setting for +// traffic to the port 9080. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings-port +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: # Apply to all ports +// portLevelSettings: +// - port: +// number: 80 +// loadBalancer: +// simple: LEAST_CONN +// - port: +// number: 9080 +// loadBalancer: +// simple: ROUND_ROBIN +// ``` + +package v1alpha3 + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// MarshalJSON is a custom marshaler for DestinationRule +func (this *DestinationRule) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for DestinationRule +func (this *DestinationRule) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for TrafficPolicy +func (this *TrafficPolicy) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for TrafficPolicy +func (this *TrafficPolicy) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for TrafficPolicy_PortTrafficPolicy +func (this *TrafficPolicy_PortTrafficPolicy) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for TrafficPolicy_PortTrafficPolicy +func (this *TrafficPolicy_PortTrafficPolicy) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Subset +func (this *Subset) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Subset +func (this *Subset) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for LoadBalancerSettings +func (this *LoadBalancerSettings) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for LoadBalancerSettings +func (this *LoadBalancerSettings) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for LoadBalancerSettings_ConsistentHashLB +func (this *LoadBalancerSettings_ConsistentHashLB) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for LoadBalancerSettings_ConsistentHashLB +func (this *LoadBalancerSettings_ConsistentHashLB) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for LoadBalancerSettings_ConsistentHashLB_HTTPCookie +func (this *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for LoadBalancerSettings_ConsistentHashLB_HTTPCookie +func (this *LoadBalancerSettings_ConsistentHashLB_HTTPCookie) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for ConnectionPoolSettings +func (this *ConnectionPoolSettings) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ConnectionPoolSettings +func (this *ConnectionPoolSettings) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for ConnectionPoolSettings_TCPSettings +func (this *ConnectionPoolSettings_TCPSettings) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ConnectionPoolSettings_TCPSettings +func (this *ConnectionPoolSettings_TCPSettings) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for ConnectionPoolSettings_TCPSettings_TcpKeepalive +func (this *ConnectionPoolSettings_TCPSettings_TcpKeepalive) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ConnectionPoolSettings_TCPSettings_TcpKeepalive +func (this *ConnectionPoolSettings_TCPSettings_TcpKeepalive) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for ConnectionPoolSettings_HTTPSettings +func (this *ConnectionPoolSettings_HTTPSettings) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ConnectionPoolSettings_HTTPSettings +func (this *ConnectionPoolSettings_HTTPSettings) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for OutlierDetection +func (this *OutlierDetection) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for OutlierDetection +func (this *OutlierDetection) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for TLSSettings +func (this *TLSSettings) MarshalJSON() ([]byte, error) { + str, err := DestinationRuleMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for TLSSettings +func (this *TLSSettings) UnmarshalJSON(b []byte) error { + return DestinationRuleUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +var ( + DestinationRuleMarshaler = &github_com_gogo_protobuf_jsonpb.Marshaler{} + DestinationRuleUnmarshaler = &github_com_gogo_protobuf_jsonpb.Unmarshaler{} +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter.pb.go b/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter.pb.go new file mode 100644 index 0000000000..f33f6da3a8 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter.pb.go @@ -0,0 +1,6132 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/envoy_filter.proto + +// `EnvoyFilter` provides a mechanism to customize the Envoy +// configuration generated by Istio Pilot. Use EnvoyFilter to modify +// values for certain fields, add specific filters, or even add +// entirely new listeners, clusters, etc. This feature must be used +// with care, as incorrect configurations could potentially +// destabilize the entire mesh. Unlike other Istio networking objects, +// EnvoyFilters are additively applied. Any number of EnvoyFilters can +// exist for a given workload in a specific namespace. The order of +// application of these EnvoyFilters is as follows: all EnvoyFilters +// in the config [root +// namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig), +// followed by all matching EnvoyFilters in the workload's namespace. +// +// **NOTE 1**: Since this is break glass configuration, there will not +// be any backward compatibility across different Istio releases. In +// other words, this configuration is subject to change based on +// internal implementation of Istio networking subsystem. +// +// **NOTE 2**: The envoy configuration provided through this mechanism +// should be carefully monitored across Istio proxy version upgrades, +// to ensure that deprecated fields are removed and replaced +// appropriately. +// +// **NOTE 3**: When multiple EnvoyFilters are bound to the same +// workload in a given namespace, all patches will be processed +// sequentially in order of creation time. The behavior is undefined +// if multiple EnvoyFilter configurations conflict with each other. +// +// **NOTE 4**: *_To apply an EnvoyFilter resource to all workloads +// (sidecars and gateways) in the system, define the resource in the +// config [root +// namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig), +// without a workloadSelector. +// +// The example below declares a global default EnvoyFilter resource in +// the root namespace called `istio-config`, that adds a custom +// protocol filter on all sidecars in the system, for outbound port +// 9307. The filter should be added before the terminating tcp_proxy +// filter to take effect. In addition, it sets a 30s idle timeout for +// all HTTP connections in both gateays and sidecars. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: custom-protocol +// namespace: istio-config # as defined in meshConfig resource. +// spec: +// configPatches: +// - applyTo: NETWORK_FILTER +// match: +// context: SIDECAR_OUTBOUND # will match outbound listeners in all sidecars +// listener: +// portNumber: 9307 +// filterChain: +// filter: +// name: "envoy.tcp_proxy" +// patch: +// operation: INSERT_BEFORE +// value: +// name: "envoy.config.filter.network.custom_protocol" +// config: +// ... +// - applyTo: NETWORK_FILTER # http connection manager is a filter in Envoy +// match: +// # context omitted so that this applies to both sidecars and gateways +// listener: +// filterChain: +// filter: +// name: "envoy.http_connection_manager" +// patch: +// operation: MERGE +// value: +// typed_config: +// "@type": "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" +// idle_timeout: 30s +//``` +// +// The following example enables Envoy's Lua filter for all inbound +// HTTP calls arriving at service port 8080 of the reviews service pod +// with labels "app: reviews", in the bookinfo namespace. The lua +// filter calls out to an external service internal.org.net:8888 that +// requires a special cluster definition in envoy. The cluster is also +// added to the sidecar as part of this configuration. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: reviews-lua +// namespace: bookinfo +// spec: +// workloadSelector: +// labels: +// app: reviews +// configPatches: +// # The first patch adds the lua filter to the listener/http connection manager +// - applyTo: HTTP_FILTER +// match: +// context: SIDECAR_INBOUND +// listener: +// portNumber: 8080 +// filterChain: +// filter: +// name: "envoy.http_connection_manager" +// subFilter: +// name: "envoy.router" +// patch: +// operation: INSERT_BEFORE +// value: # lua filter specification +// name: envoy.lua +// config: +// inlineCode: | +// function envoy_on_request(request_handle) +// -- Make an HTTP call to an upstream host with the following headers, body, and timeout. +// local headers, body = request_handle:httpCall( +// "lua_cluster", +// { +// [":method"] = "POST", +// [":path"] = "/acl", +// [":authority"] = "internal.org.net" +// }, +// "authorize call", +// 5000) +// end +// # The second patch adds the cluster that is referenced by the lua code +// # cds match is omitted as a new cluster is being added +// - applyTo: CLUSTER +// match: +// context: SIDECAR_OUTBOUND +// patch: +// operation: ADD +// value: # cluster specification +// name: "lua_cluster" +// type: STRICT_DNS +// connect_timeout: 0.5s +// lb_policy: ROUND_ROBIN +// hosts: +// - socket_address: +// protocol: TCP +// address: "internal.org.net" +// port_value: 8888 +// +// ``` +// +// The following example overwrites certain fields (HTTP idle timeout +// and X-Forward-For trusted hops) in the HTTP connection manager in a +// listener on the ingress gateway in istio-system namespace for the +// SNI host app.example.com: +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: hcm-tweaks +// namespace: istio-system +// spec: +// workloadSelector: +// labels: +// istio: ingress-gateway +// configPatches: +// - applyTo: NETWORK_FILTER # http connection manager is a filter in Envoy +// match: +// context: GATEWAY +// listener: +// filterChain: +// sni: app.example.com +// filter: +// name: "envoy.http_connection_manager" +// patch: +// operation: MERGE +// value: +// idle_timeout: 30s +// xff_num_trusted_hops: 5 +//``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + io "io" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ApplyTo specifies where in the Envoy configuration, the given patch should be applied. +type EnvoyFilter_ApplyTo int32 + +const ( + EnvoyFilter_INVALID EnvoyFilter_ApplyTo = 0 + // Applies the patch to the listener. + EnvoyFilter_LISTENER EnvoyFilter_ApplyTo = 1 + // Applies the patch to the filter chain. + EnvoyFilter_FILTER_CHAIN EnvoyFilter_ApplyTo = 2 + // Applies the patch to the network filter chain, to modify an + // existing filter or add a new filter. + EnvoyFilter_NETWORK_FILTER EnvoyFilter_ApplyTo = 3 + // Applies the patch to the HTTP filter chain in the http + // connection manager, to modify an existing filter or add a new + // filter. + EnvoyFilter_HTTP_FILTER EnvoyFilter_ApplyTo = 4 + // Applies the patch to the Route configuration (rds output) + // inside a HTTP connection manager. This does not apply to the + // virtual host. Currently, only MERGE operation is allowed on the + // route configuration objects. + EnvoyFilter_ROUTE_CONFIGURATION EnvoyFilter_ApplyTo = 5 + // Applies the patch to a virtual host inside a route configuration. + EnvoyFilter_VIRTUAL_HOST EnvoyFilter_ApplyTo = 6 + // Applies the patch to a route object inside the matched virtual + // host in a route configuration. Currently, only MERGE operation + // is allowed on the route objects. + EnvoyFilter_HTTP_ROUTE EnvoyFilter_ApplyTo = 7 + // Applies the patch to a cluster in a CDS output. Also used to add new clusters. + EnvoyFilter_CLUSTER EnvoyFilter_ApplyTo = 8 +) + +var EnvoyFilter_ApplyTo_name = map[int32]string{ + 0: "INVALID", + 1: "LISTENER", + 2: "FILTER_CHAIN", + 3: "NETWORK_FILTER", + 4: "HTTP_FILTER", + 5: "ROUTE_CONFIGURATION", + 6: "VIRTUAL_HOST", + 7: "HTTP_ROUTE", + 8: "CLUSTER", +} + +var EnvoyFilter_ApplyTo_value = map[string]int32{ + "INVALID": 0, + "LISTENER": 1, + "FILTER_CHAIN": 2, + "NETWORK_FILTER": 3, + "HTTP_FILTER": 4, + "ROUTE_CONFIGURATION": 5, + "VIRTUAL_HOST": 6, + "HTTP_ROUTE": 7, + "CLUSTER": 8, +} + +func (x EnvoyFilter_ApplyTo) String() string { + return proto.EnumName(EnvoyFilter_ApplyTo_name, int32(x)) +} + +func (EnvoyFilter_ApplyTo) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 0} +} + +// PatchContext selects a class of configurations based on the +// traffic flow direction and workload type. +type EnvoyFilter_PatchContext int32 + +const ( + // All listeners/routes/clusters in both sidecars and gateways. + EnvoyFilter_ANY EnvoyFilter_PatchContext = 0 + // Inbound listener/route/cluster in sidecar. + EnvoyFilter_SIDECAR_INBOUND EnvoyFilter_PatchContext = 1 + // Outbound listener/route/cluster in sidecar. + EnvoyFilter_SIDECAR_OUTBOUND EnvoyFilter_PatchContext = 2 + // Gateway listener/route/cluster. + EnvoyFilter_GATEWAY EnvoyFilter_PatchContext = 3 +) + +var EnvoyFilter_PatchContext_name = map[int32]string{ + 0: "ANY", + 1: "SIDECAR_INBOUND", + 2: "SIDECAR_OUTBOUND", + 3: "GATEWAY", +} + +var EnvoyFilter_PatchContext_value = map[string]int32{ + "ANY": 0, + "SIDECAR_INBOUND": 1, + "SIDECAR_OUTBOUND": 2, + "GATEWAY": 3, +} + +func (x EnvoyFilter_PatchContext) String() string { + return proto.EnumName(EnvoyFilter_PatchContext_name, int32(x)) +} + +func (EnvoyFilter_PatchContext) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 1} +} + +type EnvoyFilter_DeprecatedListenerMatch_ListenerType int32 + +const ( + // All listeners + EnvoyFilter_DeprecatedListenerMatch_ANY EnvoyFilter_DeprecatedListenerMatch_ListenerType = 0 + // Inbound listener in sidecar + EnvoyFilter_DeprecatedListenerMatch_SIDECAR_INBOUND EnvoyFilter_DeprecatedListenerMatch_ListenerType = 1 + // Outbound listener in sidecar + EnvoyFilter_DeprecatedListenerMatch_SIDECAR_OUTBOUND EnvoyFilter_DeprecatedListenerMatch_ListenerType = 2 + // Gateway listener + EnvoyFilter_DeprecatedListenerMatch_GATEWAY EnvoyFilter_DeprecatedListenerMatch_ListenerType = 3 +) + +var EnvoyFilter_DeprecatedListenerMatch_ListenerType_name = map[int32]string{ + 0: "ANY", + 1: "SIDECAR_INBOUND", + 2: "SIDECAR_OUTBOUND", + 3: "GATEWAY", +} + +var EnvoyFilter_DeprecatedListenerMatch_ListenerType_value = map[string]int32{ + "ANY": 0, + "SIDECAR_INBOUND": 1, + "SIDECAR_OUTBOUND": 2, + "GATEWAY": 3, +} + +func (x EnvoyFilter_DeprecatedListenerMatch_ListenerType) String() string { + return proto.EnumName(EnvoyFilter_DeprecatedListenerMatch_ListenerType_name, int32(x)) +} + +func (EnvoyFilter_DeprecatedListenerMatch_ListenerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 0, 0} +} + +type EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol int32 + +const ( + // All protocols + EnvoyFilter_DeprecatedListenerMatch_ALL EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol = 0 + // HTTP or HTTPS (with termination) / HTTP2/gRPC + EnvoyFilter_DeprecatedListenerMatch_HTTP EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol = 1 + // Any non-HTTP listener + EnvoyFilter_DeprecatedListenerMatch_TCP EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol = 2 +) + +var EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol_name = map[int32]string{ + 0: "ALL", + 1: "HTTP", + 2: "TCP", +} + +var EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol_value = map[string]int32{ + "ALL": 0, + "HTTP": 1, + "TCP": 2, +} + +func (x EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol) String() string { + return proto.EnumName(EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol_name, int32(x)) +} + +func (EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 0, 1} +} + +// Index/position in the filter chain. +type EnvoyFilter_InsertPosition_Index int32 + +const ( + // Insert first + EnvoyFilter_InsertPosition_FIRST EnvoyFilter_InsertPosition_Index = 0 + // Insert last + EnvoyFilter_InsertPosition_LAST EnvoyFilter_InsertPosition_Index = 1 + // Insert before the named filter. + EnvoyFilter_InsertPosition_BEFORE EnvoyFilter_InsertPosition_Index = 2 + // Insert after the named filter. + EnvoyFilter_InsertPosition_AFTER EnvoyFilter_InsertPosition_Index = 3 +) + +var EnvoyFilter_InsertPosition_Index_name = map[int32]string{ + 0: "FIRST", + 1: "LAST", + 2: "BEFORE", + 3: "AFTER", +} + +var EnvoyFilter_InsertPosition_Index_value = map[string]int32{ + "FIRST": 0, + "LAST": 1, + "BEFORE": 2, + "AFTER": 3, +} + +func (x EnvoyFilter_InsertPosition_Index) String() string { + return proto.EnumName(EnvoyFilter_InsertPosition_Index_name, int32(x)) +} + +func (EnvoyFilter_InsertPosition_Index) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 1, 0} +} + +type EnvoyFilter_Filter_FilterType int32 + +const ( + // placeholder + EnvoyFilter_Filter_INVALID EnvoyFilter_Filter_FilterType = 0 + // Http filter + EnvoyFilter_Filter_HTTP EnvoyFilter_Filter_FilterType = 1 + // Network filter + EnvoyFilter_Filter_NETWORK EnvoyFilter_Filter_FilterType = 2 +) + +var EnvoyFilter_Filter_FilterType_name = map[int32]string{ + 0: "INVALID", + 1: "HTTP", + 2: "NETWORK", +} + +var EnvoyFilter_Filter_FilterType_value = map[string]int32{ + "INVALID": 0, + "HTTP": 1, + "NETWORK": 2, +} + +func (x EnvoyFilter_Filter_FilterType) String() string { + return proto.EnumName(EnvoyFilter_Filter_FilterType_name, int32(x)) +} + +func (EnvoyFilter_Filter_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 2, 0} +} + +// Action refers to the route action taken by Envoy when a http route matches. +type EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action int32 + +const ( + // All three route actions + EnvoyFilter_RouteConfigurationMatch_RouteMatch_ANY EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action = 0 + // Route traffic to a cluster / weighted clusters. + EnvoyFilter_RouteConfigurationMatch_RouteMatch_ROUTE EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action = 1 + // Redirect request. + EnvoyFilter_RouteConfigurationMatch_RouteMatch_REDIRECT EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action = 2 + // directly respond to a request with specific payload. + EnvoyFilter_RouteConfigurationMatch_RouteMatch_DIRECT_RESPONSE EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action = 3 +) + +var EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action_name = map[int32]string{ + 0: "ANY", + 1: "ROUTE", + 2: "REDIRECT", + 3: "DIRECT_RESPONSE", +} + +var EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action_value = map[string]int32{ + "ANY": 0, + "ROUTE": 1, + "REDIRECT": 2, + "DIRECT_RESPONSE": 3, +} + +func (x EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action) String() string { + return proto.EnumName(EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action_name, int32(x)) +} + +func (EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 5, 0, 0} +} + +// Operation denotes how the patch should be applied to the selected +// configuration. +type EnvoyFilter_Patch_Operation int32 + +const ( + EnvoyFilter_Patch_INVALID EnvoyFilter_Patch_Operation = 0 + // Merge the provided config with the generated config using + // json merge semantics. + EnvoyFilter_Patch_MERGE EnvoyFilter_Patch_Operation = 1 + // Add the provided config to an existing list (of listeners, + // clusters, virtual hosts, network filters, or http + // filters). This operation will be ignored when applyTo is set + // to ROUTE_CONFIGURATION, or HTTP_ROUTE. + EnvoyFilter_Patch_ADD EnvoyFilter_Patch_Operation = 2 + // Remove the selected object from the list (of listeners, + // clusters, virtual hosts, network filters, or http + // filters). Does not require a value to be specified. This + // operation will be ignored when applyTo is set to + // ROUTE_CONFIGURATION, or HTTP_ROUTE. + EnvoyFilter_Patch_REMOVE EnvoyFilter_Patch_Operation = 3 + // Insert operation on an array of named objects. This operation + // is typically useful only in the context of filters, where the + // order of filters matter. For clusters and virtual hosts, + // order of the element in the array does not matter. Insert + // before the selected filter or sub filter. If no filter is + // selected, the specified filter will be inserted at the front + // of the list. + EnvoyFilter_Patch_INSERT_BEFORE EnvoyFilter_Patch_Operation = 4 + // Insert operation on an array of named objects. This operation + // is typically useful only in the context of filters, where the + // order of filters matter. For clusters and virtual hosts, + // order of the element in the array does not matter. Insert + // after the selected filter or sub filter. If no filter is + // selected, the specified filter will be inserted at the end + // of the list. + EnvoyFilter_Patch_INSERT_AFTER EnvoyFilter_Patch_Operation = 5 +) + +var EnvoyFilter_Patch_Operation_name = map[int32]string{ + 0: "INVALID", + 1: "MERGE", + 2: "ADD", + 3: "REMOVE", + 4: "INSERT_BEFORE", + 5: "INSERT_AFTER", +} + +var EnvoyFilter_Patch_Operation_value = map[string]int32{ + "INVALID": 0, + "MERGE": 1, + "ADD": 2, + "REMOVE": 3, + "INSERT_BEFORE": 4, + "INSERT_AFTER": 5, +} + +func (x EnvoyFilter_Patch_Operation) String() string { + return proto.EnumName(EnvoyFilter_Patch_Operation_name, int32(x)) +} + +func (EnvoyFilter_Patch_Operation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 7, 0} +} + +// EnvoyFilter provides a mechanism to customize the Envoy configuration +// generated by Istio Pilot. +// +// +type EnvoyFilter struct { + // Deprecated. Use workload_selector instead. + // $hide_from_docs + WorkloadLabels map[string]string `protobuf:"bytes,1,rep,name=workload_labels,json=workloadLabels,proto3" json:"workload_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + // $hide_from_docs + Filters []*EnvoyFilter_Filter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` // Deprecated: Do not use. + // Criteria used to select the specific set of pods/VMs on which + // this patch configuration should be applied. If omitted, the set + // of patches in this configuration will be applied to all workload + // instances in the same namespace. If omitted, the EnvoyFilter + // patches will be applied to all workloads in the same + // namespace. If the EnvoyFilter is present in the config root + // namespace, it will be applied to all applicable workloads in any + // namespace. + WorkloadSelector *WorkloadSelector `protobuf:"bytes,3,opt,name=workload_selector,json=workloadSelector,proto3" json:"workload_selector,omitempty"` + // One or more patches with match conditions. + ConfigPatches []*EnvoyFilter_EnvoyConfigObjectPatch `protobuf:"bytes,4,rep,name=config_patches,json=configPatches,proto3" json:"config_patches,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter) Reset() { *m = EnvoyFilter{} } +func (m *EnvoyFilter) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter) ProtoMessage() {} +func (*EnvoyFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0} +} +func (m *EnvoyFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter.Merge(m, src) +} +func (m *EnvoyFilter) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *EnvoyFilter) GetWorkloadLabels() map[string]string { + if m != nil { + return m.WorkloadLabels + } + return nil +} + +// Deprecated: Do not use. +func (m *EnvoyFilter) GetFilters() []*EnvoyFilter_Filter { + if m != nil { + return m.Filters + } + return nil +} + +func (m *EnvoyFilter) GetWorkloadSelector() *WorkloadSelector { + if m != nil { + return m.WorkloadSelector + } + return nil +} + +func (m *EnvoyFilter) GetConfigPatches() []*EnvoyFilter_EnvoyConfigObjectPatch { + if m != nil { + return m.ConfigPatches + } + return nil +} + +// Deprecated. +// Select a listener to add the filter to based on the match conditions. +// All conditions specified in the ListenerMatch must be met for the filter +// to be applied to a listener. +// $hide_from_docs +type EnvoyFilter_DeprecatedListenerMatch struct { + // The service port/gateway port to which traffic is being + // sent/received. If not specified, matches all listeners. Even though + // inbound listeners are generated for the instance/pod ports, only + // service ports should be used to match listeners. + PortNumber uint32 `protobuf:"varint,1,opt,name=port_number,json=portNumber,proto3" json:"port_number,omitempty"` + // Instead of using specific port numbers, a set of ports matching a + // given port name prefix can be selected. E.g., "mongo" selects ports + // named mongo-port, mongo, mongoDB, MONGO, etc. Matching is case + // insensitive. + PortNamePrefix string `protobuf:"bytes,2,opt,name=port_name_prefix,json=portNamePrefix,proto3" json:"port_name_prefix,omitempty"` + // Inbound vs outbound sidecar listener or gateway listener. If not specified, + // matches all listeners. + ListenerType EnvoyFilter_DeprecatedListenerMatch_ListenerType `protobuf:"varint,3,opt,name=listener_type,json=listenerType,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_DeprecatedListenerMatch_ListenerType" json:"listener_type,omitempty"` + // Selects a class of listeners for the same protocol. Use the protocol + // selection to select all HTTP listeners (includes HTTP2/gRPC/HTTPS + // where Envoy terminates TLS) or all TCP listeners (includes HTTPS + // passthrough using SNI). When adding a HTTP filter, the listenerProtocol + // should be set to HTTP. + ListenerProtocol EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol `protobuf:"varint,4,opt,name=listener_protocol,json=listenerProtocol,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol" json:"listener_protocol,omitempty"` + // One or more IP addresses to which the listener is bound. If + // specified, should match at least one address in the list. + Address []string `protobuf:"bytes,5,rep,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) Reset() { *m = EnvoyFilter_DeprecatedListenerMatch{} } +func (m *EnvoyFilter_DeprecatedListenerMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_DeprecatedListenerMatch) ProtoMessage() {} +func (*EnvoyFilter_DeprecatedListenerMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 0} +} +func (m *EnvoyFilter_DeprecatedListenerMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_DeprecatedListenerMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_DeprecatedListenerMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_DeprecatedListenerMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_DeprecatedListenerMatch.Merge(m, src) +} +func (m *EnvoyFilter_DeprecatedListenerMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_DeprecatedListenerMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_DeprecatedListenerMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_DeprecatedListenerMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_DeprecatedListenerMatch) GetPortNumber() uint32 { + if m != nil { + return m.PortNumber + } + return 0 +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) GetPortNamePrefix() string { + if m != nil { + return m.PortNamePrefix + } + return "" +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) GetListenerType() EnvoyFilter_DeprecatedListenerMatch_ListenerType { + if m != nil { + return m.ListenerType + } + return EnvoyFilter_DeprecatedListenerMatch_ANY +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) GetListenerProtocol() EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol { + if m != nil { + return m.ListenerProtocol + } + return EnvoyFilter_DeprecatedListenerMatch_ALL +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) GetAddress() []string { + if m != nil { + return m.Address + } + return nil +} + +// Deprecated. +// Indicates the relative index in the filter chain where the filter should be inserted. +// $hide_from_docs +type EnvoyFilter_InsertPosition struct { + // Position of this filter in the filter chain. + Index EnvoyFilter_InsertPosition_Index `protobuf:"varint,1,opt,name=index,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_InsertPosition_Index" json:"index,omitempty"` + // If BEFORE or AFTER position is specified, specify the name of the + // filter relative to which this filter should be inserted. + RelativeTo string `protobuf:"bytes,2,opt,name=relative_to,json=relativeTo,proto3" json:"relative_to,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_InsertPosition) Reset() { *m = EnvoyFilter_InsertPosition{} } +func (m *EnvoyFilter_InsertPosition) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_InsertPosition) ProtoMessage() {} +func (*EnvoyFilter_InsertPosition) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 1} +} +func (m *EnvoyFilter_InsertPosition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_InsertPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_InsertPosition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_InsertPosition) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_InsertPosition.Merge(m, src) +} +func (m *EnvoyFilter_InsertPosition) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_InsertPosition) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_InsertPosition.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_InsertPosition proto.InternalMessageInfo + +func (m *EnvoyFilter_InsertPosition) GetIndex() EnvoyFilter_InsertPosition_Index { + if m != nil { + return m.Index + } + return EnvoyFilter_InsertPosition_FIRST +} + +func (m *EnvoyFilter_InsertPosition) GetRelativeTo() string { + if m != nil { + return m.RelativeTo + } + return "" +} + +// Deprecated. +// Envoy filters to be added to a network or http filter chain. +// $hide_from_docs +type EnvoyFilter_Filter struct { + // Filter will be added to the listener only if the match + // conditions are true. If not specified, the filters will be + // applied to all listeners where possible, potentially resulting + // in invalid configurations. It is recommended to specify the + // listener match criteria for all filter insertions. + ListenerMatch *EnvoyFilter_DeprecatedListenerMatch `protobuf:"bytes,1,opt,name=listener_match,json=listenerMatch,proto3" json:"listener_match,omitempty"` + // Insert position in the filter chain. Defaults to FIRST + InsertPosition *EnvoyFilter_InsertPosition `protobuf:"bytes,2,opt,name=insert_position,json=insertPosition,proto3" json:"insert_position,omitempty"` + // The type of filter to instantiate. + FilterType EnvoyFilter_Filter_FilterType `protobuf:"varint,3,opt,name=filter_type,json=filterType,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_Filter_FilterType" json:"filter_type,omitempty"` + // The name of the filter to instantiate. The name must match a supported + // filter _compiled into_ Envoy. + FilterName string `protobuf:"bytes,4,opt,name=filter_name,json=filterName,proto3" json:"filter_name,omitempty"` + // Filter specific configuration which depends on the filter being + // instantiated. + FilterConfig *types.Struct `protobuf:"bytes,5,opt,name=filter_config,json=filterConfig,proto3" json:"filter_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_Filter) Reset() { *m = EnvoyFilter_Filter{} } +func (m *EnvoyFilter_Filter) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_Filter) ProtoMessage() {} +func (*EnvoyFilter_Filter) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 2} +} +func (m *EnvoyFilter_Filter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_Filter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_Filter) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_Filter.Merge(m, src) +} +func (m *EnvoyFilter_Filter) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_Filter) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_Filter.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_Filter proto.InternalMessageInfo + +func (m *EnvoyFilter_Filter) GetListenerMatch() *EnvoyFilter_DeprecatedListenerMatch { + if m != nil { + return m.ListenerMatch + } + return nil +} + +func (m *EnvoyFilter_Filter) GetInsertPosition() *EnvoyFilter_InsertPosition { + if m != nil { + return m.InsertPosition + } + return nil +} + +func (m *EnvoyFilter_Filter) GetFilterType() EnvoyFilter_Filter_FilterType { + if m != nil { + return m.FilterType + } + return EnvoyFilter_Filter_INVALID +} + +func (m *EnvoyFilter_Filter) GetFilterName() string { + if m != nil { + return m.FilterName + } + return "" +} + +func (m *EnvoyFilter_Filter) GetFilterConfig() *types.Struct { + if m != nil { + return m.FilterConfig + } + return nil +} + +// One or more properties of the proxy to match on. +type EnvoyFilter_ProxyMatch struct { + // A regular expression in golang regex format (RE2) that can be + // used to select proxies using a specific version of istio + // proxy. The Istio version for a given proxy is obtained from the + // node metadata field ISTIO_VERSION supplied by the proxy when + // connecting to Pilot. This value is embedded as an environment + // variable (ISTIO_META_ISTIO_VERSION) in the Istio proxy docker + // image. Custom proxy implementations should provide this metadata + // variable to take advantage of the Istio version check option. + ProxyVersion string `protobuf:"bytes,1,opt,name=proxy_version,json=proxyVersion,proto3" json:"proxy_version,omitempty"` + // Match on the node metadata supplied by a proxy when connecting + // to Istio Pilot. Note that while Envoy's node metadata is of + // type Struct, only string key-value pairs are processed by + // Pilot. All keys specified in the metadata must match with exact + // values. The match will fail if any of the specified keys are + // absent or the values fail to match. + Metadata map[string]string `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_ProxyMatch) Reset() { *m = EnvoyFilter_ProxyMatch{} } +func (m *EnvoyFilter_ProxyMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_ProxyMatch) ProtoMessage() {} +func (*EnvoyFilter_ProxyMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 3} +} +func (m *EnvoyFilter_ProxyMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_ProxyMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_ProxyMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_ProxyMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_ProxyMatch.Merge(m, src) +} +func (m *EnvoyFilter_ProxyMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_ProxyMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_ProxyMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_ProxyMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_ProxyMatch) GetProxyVersion() string { + if m != nil { + return m.ProxyVersion + } + return "" +} + +func (m *EnvoyFilter_ProxyMatch) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +// Conditions specified in ClusterMatch must be met for the patch +// to be applied to a cluster. +type EnvoyFilter_ClusterMatch struct { + // The service port for which this cluster was generated. If + // omitted, applies to clusters for any port. + PortNumber uint32 `protobuf:"varint,1,opt,name=port_number,json=portNumber,proto3" json:"port_number,omitempty"` + // The fully qualified service name for this cluster. If omitted, + // applies to clusters for any service. For services defined + // through service entries, the service name is same as the hosts + // defined in the service entry. + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + // The subset associated with the service. If omitted, applies to + // clusters for any subset of a service. + Subset string `protobuf:"bytes,3,opt,name=subset,proto3" json:"subset,omitempty"` + // The exact name of the cluster to match. To match a specific + // cluster by name, such as the internally generated "Passthrough" + // cluster, leave all fields in clusterMatch empty, except the + // name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_ClusterMatch) Reset() { *m = EnvoyFilter_ClusterMatch{} } +func (m *EnvoyFilter_ClusterMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_ClusterMatch) ProtoMessage() {} +func (*EnvoyFilter_ClusterMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 4} +} +func (m *EnvoyFilter_ClusterMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_ClusterMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_ClusterMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_ClusterMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_ClusterMatch.Merge(m, src) +} +func (m *EnvoyFilter_ClusterMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_ClusterMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_ClusterMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_ClusterMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_ClusterMatch) GetPortNumber() uint32 { + if m != nil { + return m.PortNumber + } + return 0 +} + +func (m *EnvoyFilter_ClusterMatch) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +func (m *EnvoyFilter_ClusterMatch) GetSubset() string { + if m != nil { + return m.Subset + } + return "" +} + +func (m *EnvoyFilter_ClusterMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Conditions specified in RouteConfigurationMatch must be met for +// the patch to be applied to a route configuration object or a +// specific virtual host within the route configuration. +type EnvoyFilter_RouteConfigurationMatch struct { + // The service port number or gateway server port number for which + // this route configuration was generated. If omitted, applies to + // route configurations for all ports. + PortNumber uint32 `protobuf:"varint,1,opt,name=port_number,json=portNumber,proto3" json:"port_number,omitempty"` + // Applicable only for GATEWAY context. The gateway server port + // name for which this route configuration was generated. + PortName string `protobuf:"bytes,2,opt,name=port_name,json=portName,proto3" json:"port_name,omitempty"` + // The Istio gateway config's namespace/name for which this route + // configuration was generated. Applies only if the context is + // GATEWAY. Should be in the namespace/name format. Use this field + // in conjunction with the portNumber and portName to accurately + // select the Envoy route configuration for a specific HTTPS + // server within a gateway config object. + Gateway string `protobuf:"bytes,3,opt,name=gateway,proto3" json:"gateway,omitempty"` + // Match a specific virtual host in a route configuration and + // apply the patch to the virtual host. + Vhost *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch `protobuf:"bytes,4,opt,name=vhost,proto3" json:"vhost,omitempty"` + // Route configuration name to match on. Can be used to match a + // specific route configuration by name, such as the internally + // generated "http_proxy" route configuration for all sidecars. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_RouteConfigurationMatch) Reset() { *m = EnvoyFilter_RouteConfigurationMatch{} } +func (m *EnvoyFilter_RouteConfigurationMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_RouteConfigurationMatch) ProtoMessage() {} +func (*EnvoyFilter_RouteConfigurationMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 5} +} +func (m *EnvoyFilter_RouteConfigurationMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_RouteConfigurationMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_RouteConfigurationMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch.Merge(m, src) +} +func (m *EnvoyFilter_RouteConfigurationMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_RouteConfigurationMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_RouteConfigurationMatch) GetPortNumber() uint32 { + if m != nil { + return m.PortNumber + } + return 0 +} + +func (m *EnvoyFilter_RouteConfigurationMatch) GetPortName() string { + if m != nil { + return m.PortName + } + return "" +} + +func (m *EnvoyFilter_RouteConfigurationMatch) GetGateway() string { + if m != nil { + return m.Gateway + } + return "" +} + +func (m *EnvoyFilter_RouteConfigurationMatch) GetVhost() *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch { + if m != nil { + return m.Vhost + } + return nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Match a specific route inside a virtual host in a route configuration. +type EnvoyFilter_RouteConfigurationMatch_RouteMatch struct { + // The Route objects generated by default are named as + // "default". Route objects generated using a virtual service + // will carry the name used in the virtual service's HTTP + // routes. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Match a route with specific action type. + Action EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action `protobuf:"varint,2,opt,name=action,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action" json:"action,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) Reset() { + *m = EnvoyFilter_RouteConfigurationMatch_RouteMatch{} +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) String() string { + return proto.CompactTextString(m) +} +func (*EnvoyFilter_RouteConfigurationMatch_RouteMatch) ProtoMessage() {} +func (*EnvoyFilter_RouteConfigurationMatch_RouteMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 5, 0} +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_RouteMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_RouteMatch.Merge(m, src) +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_RouteMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_RouteMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) GetAction() EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action { + if m != nil { + return m.Action + } + return EnvoyFilter_RouteConfigurationMatch_RouteMatch_ANY +} + +// Match a specific virtual host inside a route configuration. +type EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch struct { + // The VirtualHosts objects generated by Istio are named as + // host:port, where the host typically corresponds to the + // VirtualService's host field or the hostname of a service in the + // registry. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Match a specific route within the virtual host. + Route *EnvoyFilter_RouteConfigurationMatch_RouteMatch `protobuf:"bytes,2,opt,name=route,proto3" json:"route,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) Reset() { + *m = EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch{} +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) String() string { + return proto.CompactTextString(m) +} +func (*EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) ProtoMessage() {} +func (*EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 5, 1} +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch.Merge(m, src) +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) GetRoute() *EnvoyFilter_RouteConfigurationMatch_RouteMatch { + if m != nil { + return m.Route + } + return nil +} + +// Conditions specified in a listener match must be met for the +// patch to be applied to a specific listener across all filter +// chains, or a specific filter chain inside the listener. +type EnvoyFilter_ListenerMatch struct { + // The service port/gateway port to which traffic is being + // sent/received. If not specified, matches all listeners. Even though + // inbound listeners are generated for the instance/pod ports, only + // service ports should be used to match listeners. + PortNumber uint32 `protobuf:"varint,1,opt,name=port_number,json=portNumber,proto3" json:"port_number,omitempty"` + // Instead of using specific port numbers, a set of ports matching + // a given service's port name can be selected. Matching is case + // insensitive. + // Not implemented. + // $hide_from_docs + PortName string `protobuf:"bytes,2,opt,name=port_name,json=portName,proto3" json:"port_name,omitempty"` + // Match a specific filter chain in a listener. If specified, the + // patch will be applied to the filter chain (and a specific + // filter if specified) and not to other filter chains in the + // listener. + FilterChain *EnvoyFilter_ListenerMatch_FilterChainMatch `protobuf:"bytes,3,opt,name=filter_chain,json=filterChain,proto3" json:"filter_chain,omitempty"` + // Match a specific listener by its name. The listeners generated + // by Pilot are typically named as IP:Port. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_ListenerMatch) Reset() { *m = EnvoyFilter_ListenerMatch{} } +func (m *EnvoyFilter_ListenerMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_ListenerMatch) ProtoMessage() {} +func (*EnvoyFilter_ListenerMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 6} +} +func (m *EnvoyFilter_ListenerMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_ListenerMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_ListenerMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_ListenerMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_ListenerMatch.Merge(m, src) +} +func (m *EnvoyFilter_ListenerMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_ListenerMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_ListenerMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_ListenerMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_ListenerMatch) GetPortNumber() uint32 { + if m != nil { + return m.PortNumber + } + return 0 +} + +func (m *EnvoyFilter_ListenerMatch) GetPortName() string { + if m != nil { + return m.PortName + } + return "" +} + +func (m *EnvoyFilter_ListenerMatch) GetFilterChain() *EnvoyFilter_ListenerMatch_FilterChainMatch { + if m != nil { + return m.FilterChain + } + return nil +} + +func (m *EnvoyFilter_ListenerMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// For listeners with multiple filter chains (e.g., inbound +// listeners on sidecars with permissive mTLS, gateway listeners +// with multiple SNI matches), the filter chain match can be used +// to select a specific filter chain to patch. +type EnvoyFilter_ListenerMatch_FilterChainMatch struct { + // The name assigned to the filter chain. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The SNI value used by a filter chain's match condition. This + // condition will evaluate to false if the filter chain has no + // sni match. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` + // Applies only to SIDECAR_INBOUND context. If non-empty, a + // transport protocol to consider when determining a filter + // chain match. This value will be compared against the + // transport protocol of a new connection, when it's detected by + // the tls_inspector listener filter. + // + // Accepted values include: + // + // * `raw_buffer` - default, used when no transport protocol is detected. + // * `tls` - set when TLS protocol is detected by the TLS inspector. + TransportProtocol string `protobuf:"bytes,3,opt,name=transport_protocol,json=transportProtocol,proto3" json:"transport_protocol,omitempty"` + // Applies only to sidecars. If non-empty, a comma separated set + // of application protocols to consider when determining a + // filter chain match. This value will be compared against the + // application protocols of a new connection, when it's detected + // by one of the listener filters such as the http_inspector. + // + // Accepted values include: h2,http/1.1,http/1.0 + ApplicationProtocols string `protobuf:"bytes,4,opt,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The name of a specific filter to apply the patch to. Set this + // to envoy.http_connection_manager to add a filter or apply a + // patch to the HTTP connection manager. + Filter *EnvoyFilter_ListenerMatch_FilterMatch `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) Reset() { + *m = EnvoyFilter_ListenerMatch_FilterChainMatch{} +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) String() string { + return proto.CompactTextString(m) +} +func (*EnvoyFilter_ListenerMatch_FilterChainMatch) ProtoMessage() {} +func (*EnvoyFilter_ListenerMatch_FilterChainMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 6, 0} +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterChainMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterChainMatch.Merge(m, src) +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterChainMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterChainMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) GetSni() string { + if m != nil { + return m.Sni + } + return "" +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) GetTransportProtocol() string { + if m != nil { + return m.TransportProtocol + } + return "" +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) GetApplicationProtocols() string { + if m != nil { + return m.ApplicationProtocols + } + return "" +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) GetFilter() *EnvoyFilter_ListenerMatch_FilterMatch { + if m != nil { + return m.Filter + } + return nil +} + +// Conditions to match a specific filter within a filter chain. +type EnvoyFilter_ListenerMatch_FilterMatch struct { + // The filter name to match on. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The next level filter within this filter to match + // upon. Typically used for HTTP Connection Manager filters and + // Thrift filters. + SubFilter *EnvoyFilter_ListenerMatch_SubFilterMatch `protobuf:"bytes,2,opt,name=sub_filter,json=subFilter,proto3" json:"sub_filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) Reset() { *m = EnvoyFilter_ListenerMatch_FilterMatch{} } +func (m *EnvoyFilter_ListenerMatch_FilterMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_ListenerMatch_FilterMatch) ProtoMessage() {} +func (*EnvoyFilter_ListenerMatch_FilterMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 6, 1} +} +func (m *EnvoyFilter_ListenerMatch_FilterMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_ListenerMatch_FilterMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_ListenerMatch_FilterMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterMatch.Merge(m, src) +} +func (m *EnvoyFilter_ListenerMatch_FilterMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_ListenerMatch_FilterMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_ListenerMatch_FilterMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) GetSubFilter() *EnvoyFilter_ListenerMatch_SubFilterMatch { + if m != nil { + return m.SubFilter + } + return nil +} + +// Conditions to match a specific filter within another +// filter. This field is typically useful to match a HTTP filter +// inside the envoy.http_connection_manager network filter. This +// could also be applicable for thrift filters. +type EnvoyFilter_ListenerMatch_SubFilterMatch struct { + // The filter name to match on. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) Reset() { + *m = EnvoyFilter_ListenerMatch_SubFilterMatch{} +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_ListenerMatch_SubFilterMatch) ProtoMessage() {} +func (*EnvoyFilter_ListenerMatch_SubFilterMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 6, 2} +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_ListenerMatch_SubFilterMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_ListenerMatch_SubFilterMatch.Merge(m, src) +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_ListenerMatch_SubFilterMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_ListenerMatch_SubFilterMatch proto.InternalMessageInfo + +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Patch specifies how the selected object should be modified. +type EnvoyFilter_Patch struct { + // Determines how the patch should be applied. + Operation EnvoyFilter_Patch_Operation `protobuf:"varint,1,opt,name=operation,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_Patch_Operation" json:"operation,omitempty"` + // The JSON config of the object being patched. This will be merged using + // json merge semantics with the existing proto in the path. + Value *types.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_Patch) Reset() { *m = EnvoyFilter_Patch{} } +func (m *EnvoyFilter_Patch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_Patch) ProtoMessage() {} +func (*EnvoyFilter_Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 7} +} +func (m *EnvoyFilter_Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_Patch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_Patch.Merge(m, src) +} +func (m *EnvoyFilter_Patch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_Patch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_Patch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_Patch proto.InternalMessageInfo + +func (m *EnvoyFilter_Patch) GetOperation() EnvoyFilter_Patch_Operation { + if m != nil { + return m.Operation + } + return EnvoyFilter_Patch_INVALID +} + +func (m *EnvoyFilter_Patch) GetValue() *types.Struct { + if m != nil { + return m.Value + } + return nil +} + +// One or more match conditions to be met before a patch is applied +// to the generated configuration for a given proxy. +type EnvoyFilter_EnvoyConfigObjectMatch struct { + // The specific config generation context to match on. Istio Pilot + // generates envoy configuration in the context of a gateway, + // inbound traffic to sidecar and outbound traffic from sidecar. + Context EnvoyFilter_PatchContext `protobuf:"varint,1,opt,name=context,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_PatchContext" json:"context,omitempty"` + // Match on properties associated with a proxy. + Proxy *EnvoyFilter_ProxyMatch `protobuf:"bytes,2,opt,name=proxy,proto3" json:"proxy,omitempty"` + // Types that are valid to be assigned to ObjectTypes: + // *EnvoyFilter_EnvoyConfigObjectMatch_Listener + // *EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration + // *EnvoyFilter_EnvoyConfigObjectMatch_Cluster + ObjectTypes isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes `protobuf_oneof:"object_types"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) Reset() { *m = EnvoyFilter_EnvoyConfigObjectMatch{} } +func (m *EnvoyFilter_EnvoyConfigObjectMatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_EnvoyConfigObjectMatch) ProtoMessage() {} +func (*EnvoyFilter_EnvoyConfigObjectMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 8} +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectMatch.Merge(m, src) +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectMatch proto.InternalMessageInfo + +type isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes interface { + isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes() + MarshalTo([]byte) (int, error) + Size() int +} + +type EnvoyFilter_EnvoyConfigObjectMatch_Listener struct { + Listener *EnvoyFilter_ListenerMatch `protobuf:"bytes,3,opt,name=listener,proto3,oneof"` +} +type EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration struct { + RouteConfiguration *EnvoyFilter_RouteConfigurationMatch `protobuf:"bytes,4,opt,name=route_configuration,json=routeConfiguration,proto3,oneof"` +} +type EnvoyFilter_EnvoyConfigObjectMatch_Cluster struct { + Cluster *EnvoyFilter_ClusterMatch `protobuf:"bytes,5,opt,name=cluster,proto3,oneof"` +} + +func (*EnvoyFilter_EnvoyConfigObjectMatch_Listener) isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes() { +} +func (*EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration) isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes() { +} +func (*EnvoyFilter_EnvoyConfigObjectMatch_Cluster) isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes() { +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) GetObjectTypes() isEnvoyFilter_EnvoyConfigObjectMatch_ObjectTypes { + if m != nil { + return m.ObjectTypes + } + return nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) GetContext() EnvoyFilter_PatchContext { + if m != nil { + return m.Context + } + return EnvoyFilter_ANY +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) GetProxy() *EnvoyFilter_ProxyMatch { + if m != nil { + return m.Proxy + } + return nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) GetListener() *EnvoyFilter_ListenerMatch { + if x, ok := m.GetObjectTypes().(*EnvoyFilter_EnvoyConfigObjectMatch_Listener); ok { + return x.Listener + } + return nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) GetRouteConfiguration() *EnvoyFilter_RouteConfigurationMatch { + if x, ok := m.GetObjectTypes().(*EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration); ok { + return x.RouteConfiguration + } + return nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) GetCluster() *EnvoyFilter_ClusterMatch { + if x, ok := m.GetObjectTypes().(*EnvoyFilter_EnvoyConfigObjectMatch_Cluster); ok { + return x.Cluster + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*EnvoyFilter_EnvoyConfigObjectMatch) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*EnvoyFilter_EnvoyConfigObjectMatch_Listener)(nil), + (*EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration)(nil), + (*EnvoyFilter_EnvoyConfigObjectMatch_Cluster)(nil), + } +} + +// Changes to be made to various envoy config objects. +type EnvoyFilter_EnvoyConfigObjectPatch struct { + // Specifies where in the Envoy configuration, the patch should be + // applied. The match is expected to select the appropriate + // object based on applyTo. For example, an applyTo with + // HTTP_FILTER is expected to have a match condition on the + // listeners, with a network filter selection on + // envoy.http_connection_manager and a sub filter selection on the + // HTTP filter relative to which the insertion should be + // performed. Similarly, an applyTo on CLUSTER should have a match + // (if provided) on the cluster and not on a listener. + ApplyTo EnvoyFilter_ApplyTo `protobuf:"varint,1,opt,name=apply_to,json=applyTo,proto3,enum=istio.networking.v1alpha3.EnvoyFilter_ApplyTo" json:"apply_to,omitempty"` + // Match on listener/route configuration/cluster. + Match *EnvoyFilter_EnvoyConfigObjectMatch `protobuf:"bytes,2,opt,name=match,proto3" json:"match,omitempty"` + // The patch to apply along with the operation. + Patch *EnvoyFilter_Patch `protobuf:"bytes,3,opt,name=patch,proto3" json:"patch,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnvoyFilter_EnvoyConfigObjectPatch) Reset() { *m = EnvoyFilter_EnvoyConfigObjectPatch{} } +func (m *EnvoyFilter_EnvoyConfigObjectPatch) String() string { return proto.CompactTextString(m) } +func (*EnvoyFilter_EnvoyConfigObjectPatch) ProtoMessage() {} +func (*EnvoyFilter_EnvoyConfigObjectPatch) Descriptor() ([]byte, []int) { + return fileDescriptor_16d9b2922bd3e4a9, []int{0, 9} +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectPatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectPatch.Merge(m, src) +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) XXX_Size() int { + return m.Size() +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) XXX_DiscardUnknown() { + xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectPatch.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvoyFilter_EnvoyConfigObjectPatch proto.InternalMessageInfo + +func (m *EnvoyFilter_EnvoyConfigObjectPatch) GetApplyTo() EnvoyFilter_ApplyTo { + if m != nil { + return m.ApplyTo + } + return EnvoyFilter_INVALID +} + +func (m *EnvoyFilter_EnvoyConfigObjectPatch) GetMatch() *EnvoyFilter_EnvoyConfigObjectMatch { + if m != nil { + return m.Match + } + return nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectPatch) GetPatch() *EnvoyFilter_Patch { + if m != nil { + return m.Patch + } + return nil +} + +func init() { + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_ApplyTo", EnvoyFilter_ApplyTo_name, EnvoyFilter_ApplyTo_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_PatchContext", EnvoyFilter_PatchContext_name, EnvoyFilter_PatchContext_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_DeprecatedListenerMatch_ListenerType", EnvoyFilter_DeprecatedListenerMatch_ListenerType_name, EnvoyFilter_DeprecatedListenerMatch_ListenerType_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol", EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol_name, EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_InsertPosition_Index", EnvoyFilter_InsertPosition_Index_name, EnvoyFilter_InsertPosition_Index_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_Filter_FilterType", EnvoyFilter_Filter_FilterType_name, EnvoyFilter_Filter_FilterType_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action", EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action_name, EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action_value) + proto.RegisterEnum("istio.networking.v1alpha3.EnvoyFilter_Patch_Operation", EnvoyFilter_Patch_Operation_name, EnvoyFilter_Patch_Operation_value) + proto.RegisterType((*EnvoyFilter)(nil), "istio.networking.v1alpha3.EnvoyFilter") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.EnvoyFilter.WorkloadLabelsEntry") + proto.RegisterType((*EnvoyFilter_DeprecatedListenerMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.DeprecatedListenerMatch") + proto.RegisterType((*EnvoyFilter_InsertPosition)(nil), "istio.networking.v1alpha3.EnvoyFilter.InsertPosition") + proto.RegisterType((*EnvoyFilter_Filter)(nil), "istio.networking.v1alpha3.EnvoyFilter.Filter") + proto.RegisterType((*EnvoyFilter_ProxyMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ProxyMatch") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.EnvoyFilter.ProxyMatch.MetadataEntry") + proto.RegisterType((*EnvoyFilter_ClusterMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ClusterMatch") + proto.RegisterType((*EnvoyFilter_RouteConfigurationMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.RouteConfigurationMatch") + proto.RegisterType((*EnvoyFilter_RouteConfigurationMatch_RouteMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.RouteConfigurationMatch.RouteMatch") + proto.RegisterType((*EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.RouteConfigurationMatch.VirtualHostMatch") + proto.RegisterType((*EnvoyFilter_ListenerMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ListenerMatch") + proto.RegisterType((*EnvoyFilter_ListenerMatch_FilterChainMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ListenerMatch.FilterChainMatch") + proto.RegisterType((*EnvoyFilter_ListenerMatch_FilterMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ListenerMatch.FilterMatch") + proto.RegisterType((*EnvoyFilter_ListenerMatch_SubFilterMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.ListenerMatch.SubFilterMatch") + proto.RegisterType((*EnvoyFilter_Patch)(nil), "istio.networking.v1alpha3.EnvoyFilter.Patch") + proto.RegisterType((*EnvoyFilter_EnvoyConfigObjectMatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.EnvoyConfigObjectMatch") + proto.RegisterType((*EnvoyFilter_EnvoyConfigObjectPatch)(nil), "istio.networking.v1alpha3.EnvoyFilter.EnvoyConfigObjectPatch") +} + +func init() { + proto.RegisterFile("networking/v1alpha3/envoy_filter.proto", fileDescriptor_16d9b2922bd3e4a9) +} + +var fileDescriptor_16d9b2922bd3e4a9 = []byte{ + // 1618 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcd, 0x73, 0xe3, 0x4a, + 0x11, 0x8f, 0x6c, 0xcb, 0x1f, 0xed, 0x8f, 0x28, 0x93, 0xe5, 0xc5, 0x08, 0x6a, 0x5f, 0x30, 0xaf, + 0xa8, 0x54, 0xc1, 0x2a, 0xbc, 0x04, 0xa8, 0xad, 0xdd, 0x62, 0x17, 0xc5, 0x51, 0x12, 0xd5, 0x3a, + 0xb6, 0x19, 0x2b, 0x59, 0x16, 0x8a, 0x55, 0xc9, 0xf6, 0x24, 0x11, 0xab, 0x58, 0x46, 0x92, 0x9d, + 0xb8, 0x8a, 0x2b, 0xf0, 0x87, 0xf0, 0x57, 0x70, 0xe7, 0x40, 0xc1, 0x85, 0x23, 0xc7, 0xad, 0xbd, + 0x72, 0xa0, 0x8a, 0x03, 0x27, 0x0e, 0xd4, 0x7c, 0x48, 0xb6, 0xb3, 0xce, 0x96, 0x93, 0xec, 0xc9, + 0x9a, 0x9e, 0xe9, 0xdf, 0xfc, 0xba, 0xa7, 0xbb, 0xa7, 0xc7, 0xf0, 0xbd, 0x01, 0x89, 0xae, 0xfc, + 0xe0, 0x9d, 0x3b, 0x38, 0xdf, 0x1e, 0x7f, 0xed, 0x78, 0xc3, 0x0b, 0x67, 0x77, 0x9b, 0x0c, 0xc6, + 0xfe, 0xc4, 0x3e, 0x73, 0xbd, 0x88, 0x04, 0xda, 0x30, 0xf0, 0x23, 0x1f, 0x7d, 0xd3, 0x0d, 0x23, + 0xd7, 0xd7, 0xa6, 0xab, 0xb5, 0x78, 0xb5, 0xfa, 0xe5, 0xb9, 0xef, 0x9f, 0x7b, 0x64, 0xdb, 0x19, + 0xba, 0xdb, 0x67, 0x2e, 0xf1, 0xfa, 0x76, 0x97, 0x5c, 0x38, 0x63, 0xd7, 0x17, 0xba, 0xea, 0xb7, + 0xc5, 0x02, 0x36, 0xea, 0x8e, 0xce, 0xb6, 0xc3, 0x28, 0x18, 0xf5, 0x22, 0x31, 0xfb, 0x9d, 0x45, + 0x0c, 0x42, 0xb7, 0x4f, 0x7a, 0x8e, 0x00, 0xa8, 0xfd, 0xa1, 0x06, 0x45, 0x83, 0x72, 0x3a, 0x60, + 0x94, 0xd0, 0x39, 0xac, 0x52, 0x0d, 0xcf, 0x77, 0xfa, 0xb6, 0xe7, 0x74, 0x89, 0x17, 0x56, 0xa5, + 0xcd, 0xf4, 0x56, 0x71, 0xe7, 0x99, 0x76, 0x2b, 0x4d, 0x6d, 0x06, 0x40, 0x7b, 0x2d, 0xb4, 0x1b, + 0x4c, 0xd9, 0x18, 0x44, 0xc1, 0x64, 0x2f, 0x55, 0x95, 0x70, 0xe5, 0x6a, 0x6e, 0x02, 0xbd, 0x82, + 0x1c, 0xf7, 0x42, 0x58, 0x4d, 0xb1, 0x0d, 0x9e, 0x2c, 0xb9, 0x01, 0xff, 0x61, 0x98, 0x31, 0x02, + 0xfa, 0x05, 0xac, 0x25, 0xac, 0x43, 0xe2, 0x91, 0x5e, 0xe4, 0x07, 0xd5, 0xf4, 0xa6, 0xb4, 0x55, + 0xdc, 0xf9, 0xfe, 0x27, 0x60, 0x63, 0xae, 0x1d, 0xa1, 0x82, 0x95, 0xab, 0x1b, 0x12, 0xe4, 0x42, + 0xa5, 0xe7, 0x0f, 0xce, 0xdc, 0x73, 0x7b, 0xe8, 0x44, 0xbd, 0x0b, 0x12, 0x56, 0x33, 0x8c, 0xed, + 0x4f, 0x97, 0x64, 0xcb, 0xbe, 0xeb, 0x0c, 0xa1, 0xd5, 0xfd, 0x0d, 0xe9, 0x45, 0x6d, 0x0a, 0xb3, + 0x97, 0x7e, 0xaf, 0xa7, 0x70, 0x99, 0x23, 0xb7, 0x39, 0xb0, 0xfa, 0xef, 0x34, 0x6c, 0xec, 0x93, + 0x61, 0x40, 0x7a, 0x4e, 0x44, 0xfa, 0x0d, 0x37, 0x8c, 0xc8, 0x80, 0x04, 0xc7, 0x74, 0x16, 0x7d, + 0x09, 0xc5, 0xa1, 0x1f, 0x44, 0xf6, 0x60, 0x74, 0xd9, 0x25, 0x41, 0x55, 0xda, 0x94, 0xb6, 0xca, + 0x18, 0xa8, 0xa8, 0xc9, 0x24, 0x68, 0x0b, 0x14, 0xbe, 0xc0, 0xb9, 0x24, 0xf6, 0x30, 0x20, 0x67, + 0xee, 0x75, 0x35, 0xb5, 0x29, 0x6d, 0x15, 0x70, 0x85, 0xad, 0x72, 0x2e, 0x49, 0x9b, 0x49, 0xd1, + 0x10, 0xca, 0x9e, 0xc0, 0xb6, 0xa3, 0xc9, 0x90, 0x30, 0x3f, 0x55, 0x76, 0x5e, 0x2d, 0x69, 0xd0, + 0x2d, 0x0c, 0xb5, 0x78, 0x64, 0x4d, 0x86, 0x04, 0x97, 0xbc, 0x99, 0x11, 0xfa, 0x1d, 0xac, 0x25, + 0x3b, 0xb2, 0xa8, 0xeb, 0xf9, 0x5e, 0x35, 0xc3, 0x76, 0x6d, 0x7d, 0xa6, 0x5d, 0xdb, 0x02, 0x16, + 0x2b, 0xde, 0x0d, 0x09, 0xaa, 0x42, 0xce, 0xe9, 0xf7, 0x03, 0x12, 0x86, 0x55, 0x79, 0x33, 0xbd, + 0x55, 0xc0, 0xf1, 0xb0, 0xd6, 0x82, 0xd2, 0x2c, 0x6b, 0x94, 0x83, 0xb4, 0xde, 0x7c, 0xa3, 0xac, + 0xa0, 0x75, 0x58, 0xed, 0x98, 0xfb, 0x46, 0x5d, 0xc7, 0xb6, 0xd9, 0xdc, 0x6b, 0x9d, 0x34, 0xf7, + 0x15, 0x09, 0x3d, 0x02, 0x25, 0x16, 0xb6, 0x4e, 0x2c, 0x2e, 0x4d, 0xa1, 0x22, 0xe4, 0x0e, 0x75, + 0xcb, 0x78, 0xad, 0xbf, 0x51, 0xd2, 0x35, 0x0d, 0x94, 0x9b, 0x84, 0x18, 0x68, 0xa3, 0xa1, 0xac, + 0xa0, 0x3c, 0x64, 0x8e, 0x2c, 0xab, 0xad, 0x48, 0x54, 0x64, 0xd5, 0xdb, 0x4a, 0x4a, 0xfd, 0xb3, + 0x04, 0x15, 0x73, 0x10, 0x92, 0x20, 0x6a, 0xfb, 0xa1, 0x1b, 0xb9, 0xfe, 0x00, 0xfd, 0x1c, 0x64, + 0x77, 0xd0, 0x27, 0xd7, 0xec, 0x88, 0x2b, 0x3b, 0xcf, 0x97, 0xf4, 0xcf, 0x3c, 0x8a, 0x66, 0x52, + 0x08, 0xcc, 0x91, 0x68, 0xec, 0x04, 0xc4, 0x73, 0x22, 0x77, 0x4c, 0xec, 0xc8, 0x17, 0x51, 0x01, + 0xb1, 0xc8, 0xf2, 0x6b, 0xbb, 0x20, 0x33, 0x05, 0x54, 0x00, 0xf9, 0xc0, 0xc4, 0x1d, 0x8b, 0xb3, + 0x6d, 0xe8, 0x1d, 0x4b, 0x91, 0x10, 0x40, 0x76, 0xcf, 0x38, 0x68, 0x61, 0x43, 0x49, 0xd1, 0x05, + 0xfa, 0x81, 0x65, 0x60, 0x25, 0xad, 0xfe, 0x2d, 0x0d, 0x59, 0x51, 0x33, 0x08, 0x54, 0x92, 0xf3, + 0xbd, 0xa4, 0xc7, 0xc2, 0xc8, 0x17, 0x77, 0x5e, 0x3c, 0xec, 0x70, 0x71, 0x12, 0xa7, 0x3c, 0x07, + 0xde, 0xc2, 0xaa, 0xcb, 0xcc, 0xb4, 0x87, 0xc2, 0x4e, 0x66, 0x4b, 0x71, 0xe7, 0xc7, 0xf7, 0x72, + 0x12, 0xae, 0xb8, 0xf3, 0xae, 0x7f, 0x0b, 0x45, 0x5e, 0x4f, 0x66, 0xd3, 0xe2, 0xe9, 0x9d, 0xaa, + 0x92, 0xf8, 0xa1, 0xd1, 0xc4, 0x53, 0x1c, 0xce, 0x12, 0x01, 0xfa, 0x2a, 0xc1, 0xa7, 0x49, 0xca, + 0x12, 0xa0, 0x30, 0xb7, 0x8a, 0x26, 0x29, 0xd2, 0xa1, 0x2c, 0x56, 0xf1, 0xea, 0x50, 0x95, 0x99, + 0x8d, 0x1b, 0x1a, 0xaf, 0xf4, 0x5a, 0x5c, 0xe9, 0xb5, 0x0e, 0xab, 0xf4, 0x1c, 0xa0, 0xc4, 0x55, + 0x78, 0x9d, 0xa9, 0xfd, 0x10, 0x60, 0xca, 0x83, 0x46, 0xa8, 0xd9, 0x3c, 0xd5, 0x1b, 0xe6, 0xfe, + 0x5c, 0x10, 0x16, 0x21, 0xd7, 0x34, 0xac, 0xd7, 0x2d, 0xfc, 0x4a, 0x49, 0xa9, 0x7f, 0x97, 0x00, + 0xda, 0x81, 0x7f, 0x3d, 0xe1, 0x9e, 0xfe, 0x2e, 0x94, 0x87, 0x74, 0x64, 0x8f, 0x49, 0x10, 0x52, + 0x3f, 0x4b, 0x2c, 0x66, 0x4a, 0x4c, 0x78, 0xca, 0x65, 0xe8, 0x57, 0x90, 0xbf, 0x24, 0x91, 0xd3, + 0x77, 0x22, 0x47, 0x54, 0xf0, 0x97, 0x4b, 0xfa, 0x6a, 0xba, 0x93, 0x76, 0x2c, 0x10, 0xd8, 0x3d, + 0x81, 0x13, 0x40, 0xf5, 0x39, 0x94, 0xe7, 0xa6, 0x90, 0x02, 0xe9, 0x77, 0x64, 0x22, 0x88, 0xd0, + 0x4f, 0xf4, 0x08, 0xe4, 0xb1, 0xe3, 0x8d, 0x88, 0x08, 0x68, 0x3e, 0x78, 0x96, 0x7a, 0x2a, 0xa9, + 0x23, 0x28, 0xd5, 0xbd, 0x51, 0x18, 0x2d, 0x5d, 0x3c, 0xab, 0x90, 0x0b, 0x49, 0x30, 0x76, 0x7b, + 0x31, 0x58, 0x3c, 0x44, 0x5f, 0x40, 0x36, 0x1c, 0x75, 0x43, 0x12, 0xb1, 0x70, 0x28, 0x60, 0x31, + 0x42, 0x08, 0x32, 0xd3, 0x43, 0xc4, 0xec, 0x5b, 0xfd, 0x4b, 0x06, 0x36, 0xb0, 0x3f, 0x8a, 0x08, + 0x3f, 0x86, 0x51, 0xe0, 0xd0, 0xb0, 0x5a, 0x92, 0xc2, 0xb7, 0xa0, 0x90, 0xd4, 0x6f, 0x41, 0x22, + 0x1f, 0x17, 0x6e, 0xca, 0xef, 0xdc, 0x89, 0xc8, 0x95, 0x33, 0x11, 0x34, 0xe2, 0x21, 0x22, 0x20, + 0x8f, 0x2f, 0xfc, 0x30, 0x62, 0x44, 0x8a, 0x4b, 0x97, 0xd3, 0x5b, 0x68, 0x6a, 0xa7, 0x6e, 0x10, + 0x8d, 0x1c, 0xef, 0xc8, 0x0f, 0x23, 0x9e, 0x82, 0x1c, 0x3d, 0x31, 0x57, 0x9e, 0x31, 0x97, 0xc6, + 0x0c, 0xc3, 0xe1, 0x16, 0xc6, 0x4b, 0xa4, 0xe9, 0x12, 0x74, 0x01, 0x59, 0xa7, 0x97, 0x24, 0x6a, + 0x65, 0xa7, 0xfd, 0x40, 0x7a, 0xd3, 0xed, 0x34, 0x9d, 0xe1, 0x62, 0x81, 0x5f, 0x7b, 0x09, 0x59, + 0x2e, 0x99, 0x16, 0xf1, 0x02, 0xc8, 0xb8, 0x75, 0x62, 0x19, 0x8a, 0x84, 0x4a, 0x90, 0xc7, 0xc6, + 0xbe, 0x89, 0x8d, 0xba, 0xa5, 0xa4, 0x68, 0x75, 0xe7, 0xdf, 0x36, 0x36, 0x3a, 0xed, 0x56, 0xb3, + 0x63, 0x28, 0x69, 0xf5, 0x8f, 0x12, 0x28, 0x37, 0xad, 0x5f, 0x68, 0x93, 0x0d, 0x72, 0x40, 0x69, + 0x88, 0xda, 0x63, 0x7e, 0x36, 0x93, 0x30, 0xc7, 0x55, 0xff, 0x93, 0x81, 0xf2, 0x1d, 0x2f, 0xff, + 0x4f, 0x06, 0xcf, 0x05, 0x94, 0xe2, 0x82, 0x72, 0xe1, 0xb8, 0x03, 0xd1, 0x16, 0x19, 0x4b, 0xf2, + 0x9e, 0xbf, 0x6e, 0xb9, 0xb0, 0x4e, 0x71, 0x38, 0x67, 0x51, 0xd1, 0x98, 0x64, 0x61, 0x52, 0xfc, + 0x57, 0x02, 0xe5, 0xa6, 0xd6, 0x42, 0xbf, 0x2a, 0x90, 0x0e, 0x07, 0xae, 0x60, 0x4f, 0x3f, 0xd1, + 0x13, 0x40, 0x51, 0xe0, 0x0c, 0x42, 0x66, 0x5a, 0xd2, 0x37, 0xf0, 0x04, 0x58, 0x4b, 0x66, 0x92, + 0x8b, 0x76, 0x17, 0xbe, 0xe1, 0x0c, 0x87, 0x9e, 0xdb, 0x63, 0xde, 0x4d, 0x14, 0x42, 0x41, 0xe7, + 0xd1, 0xcc, 0x64, 0xac, 0x43, 0x1b, 0xc7, 0x2c, 0xb7, 0x40, 0x94, 0xd9, 0x9f, 0x3d, 0xc0, 0x2d, + 0xdc, 0x23, 0x02, 0x4f, 0xfd, 0xbd, 0x04, 0xc5, 0x19, 0xf9, 0x42, 0x9b, 0xbb, 0x00, 0xe1, 0xa8, + 0x2b, 0x5e, 0x03, 0x22, 0xa0, 0xea, 0xf7, 0x62, 0xd0, 0x19, 0x75, 0x67, 0x49, 0x14, 0xc2, 0x78, + 0xac, 0x7e, 0x05, 0x95, 0xf9, 0xc9, 0x45, 0x4c, 0xd4, 0x7f, 0x49, 0x20, 0xb3, 0x3e, 0x14, 0x59, + 0x50, 0xf0, 0x87, 0x84, 0x87, 0xa8, 0x68, 0x42, 0x7e, 0xb2, 0x6c, 0x5d, 0x67, 0x54, 0x5a, 0xb1, + 0x36, 0x9e, 0x02, 0xa1, 0x27, 0xb3, 0xc5, 0xfa, 0xf6, 0xdb, 0x4c, 0x54, 0xf1, 0xda, 0xaf, 0xa1, + 0x90, 0xc0, 0xcc, 0x5f, 0x60, 0x05, 0x90, 0x8f, 0x0d, 0x7c, 0x68, 0xf0, 0x36, 0x4a, 0xdf, 0xa7, + 0x3d, 0x18, 0x40, 0x16, 0x1b, 0xc7, 0xad, 0x53, 0x43, 0x49, 0xa3, 0x35, 0x28, 0x9b, 0xcd, 0x8e, + 0x81, 0x2d, 0x5b, 0x34, 0x2d, 0x19, 0xa4, 0x40, 0x49, 0x88, 0x78, 0xef, 0x22, 0xab, 0xff, 0x4c, + 0xc3, 0x17, 0x1f, 0x35, 0xe6, 0xdc, 0x39, 0xc7, 0x90, 0xeb, 0xf9, 0x83, 0x88, 0x5c, 0x47, 0xc2, + 0xf8, 0xdd, 0xbb, 0x18, 0x5f, 0xe7, 0xaa, 0x38, 0xc6, 0x40, 0x87, 0x20, 0xb3, 0x4b, 0x53, 0xd8, + 0xfd, 0xf5, 0x9d, 0x6f, 0x48, 0xcc, 0xf5, 0x11, 0x86, 0x7c, 0xdc, 0x0d, 0x89, 0x0c, 0xfe, 0xd1, + 0x7d, 0x02, 0xe5, 0x68, 0x05, 0x27, 0x38, 0xe8, 0xb7, 0xb0, 0xce, 0x4a, 0x8e, 0xe8, 0x34, 0x44, + 0x5d, 0x12, 0x57, 0xc9, 0x8b, 0x87, 0x15, 0xb6, 0xa3, 0x15, 0x8c, 0x82, 0x8f, 0xa6, 0x50, 0x0b, + 0x72, 0x3d, 0x7e, 0x35, 0x8b, 0x84, 0x5b, 0xd6, 0xbd, 0xb3, 0x17, 0xfa, 0xd1, 0x0a, 0x8e, 0x51, + 0xf6, 0x2a, 0x50, 0xf2, 0xd9, 0xf1, 0xb1, 0xa6, 0x2d, 0x54, 0xff, 0x27, 0x2d, 0x38, 0x5a, 0x1e, + 0xd9, 0x26, 0xe4, 0x69, 0x0d, 0x98, 0xd0, 0x26, 0x98, 0x9f, 0xad, 0xb6, 0xe4, 0xe6, 0x3a, 0x55, + 0xb3, 0x7c, 0x9c, 0x73, 0xf8, 0x07, 0xea, 0x80, 0xcc, 0x1b, 0x5d, 0x7e, 0xac, 0xf7, 0x7e, 0x0c, + 0x8a, 0x23, 0x66, 0x58, 0x68, 0x0f, 0x64, 0xf6, 0xc6, 0x14, 0xe7, 0xfb, 0x83, 0xbb, 0x04, 0x1e, + 0xe6, 0xaa, 0xaa, 0x0e, 0xeb, 0x0b, 0x1e, 0xe0, 0x77, 0xe9, 0x9e, 0x6a, 0x7f, 0x92, 0x20, 0x27, + 0x0c, 0x9e, 0x4f, 0xbd, 0x12, 0xe4, 0x1b, 0x66, 0xc7, 0x32, 0x9a, 0x06, 0x56, 0x24, 0x9a, 0x55, + 0x07, 0x66, 0xc3, 0x32, 0xb0, 0x5d, 0x3f, 0xd2, 0xcd, 0xa6, 0x92, 0x42, 0x08, 0x2a, 0xa2, 0xa3, + 0xb4, 0xf9, 0x8c, 0x92, 0x46, 0xab, 0x50, 0xa4, 0xfd, 0x66, 0x2c, 0xc8, 0xa0, 0x0d, 0x58, 0x67, + 0xb7, 0xb2, 0x5d, 0x6f, 0x35, 0x0f, 0xcc, 0xc3, 0x13, 0xac, 0x5b, 0x66, 0xab, 0xa9, 0xc8, 0x14, + 0xef, 0xd4, 0xc4, 0xd6, 0x89, 0xde, 0xb0, 0x8f, 0x5a, 0x1d, 0x4b, 0xc9, 0xa2, 0x0a, 0x00, 0xd3, + 0xe5, 0xb7, 0x78, 0x8e, 0x92, 0xa9, 0x37, 0x4e, 0x3a, 0x14, 0x27, 0x4f, 0xdf, 0x6e, 0xb3, 0x19, + 0xf7, 0xe0, 0xb7, 0xdb, 0x9e, 0xf6, 0xd7, 0x0f, 0x8f, 0xa5, 0x7f, 0x7c, 0x78, 0x2c, 0xbd, 0xff, + 0xf0, 0x58, 0xfa, 0xe5, 0x26, 0xf7, 0xbd, 0xeb, 0xb3, 0xbf, 0x5e, 0x16, 0xfc, 0x8d, 0xd2, 0xcd, + 0xb2, 0xd2, 0xb5, 0xfb, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x50, 0xdf, 0x9f, 0x24, 0xe6, 0x11, + 0x00, 0x00, +} + +func (m *EnvoyFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ConfigPatches) > 0 { + for iNdEx := len(m.ConfigPatches) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConfigPatches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.WorkloadSelector != nil { + { + size, err := m.WorkloadSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.WorkloadLabels) > 0 { + for k := range m.WorkloadLabels { + v := m.WorkloadLabels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintEnvoyFilter(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Address) > 0 { + for iNdEx := len(m.Address) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Address[iNdEx]) + copy(dAtA[i:], m.Address[iNdEx]) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Address[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.ListenerProtocol != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.ListenerProtocol)) + i-- + dAtA[i] = 0x20 + } + if m.ListenerType != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.ListenerType)) + i-- + dAtA[i] = 0x18 + } + if len(m.PortNamePrefix) > 0 { + i -= len(m.PortNamePrefix) + copy(dAtA[i:], m.PortNamePrefix) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.PortNamePrefix))) + i-- + dAtA[i] = 0x12 + } + if m.PortNumber != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.PortNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_InsertPosition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_InsertPosition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_InsertPosition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RelativeTo) > 0 { + i -= len(m.RelativeTo) + copy(dAtA[i:], m.RelativeTo) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.RelativeTo))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_Filter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_Filter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_Filter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.FilterConfig != nil { + { + size, err := m.FilterConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.FilterName) > 0 { + i -= len(m.FilterName) + copy(dAtA[i:], m.FilterName) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.FilterName))) + i-- + dAtA[i] = 0x22 + } + if m.FilterType != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.FilterType)) + i-- + dAtA[i] = 0x18 + } + if m.InsertPosition != nil { + { + size, err := m.InsertPosition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ListenerMatch != nil { + { + size, err := m.ListenerMatch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_ProxyMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_ProxyMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_ProxyMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metadata) > 0 { + for k := range m.Metadata { + v := m.Metadata[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintEnvoyFilter(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ProxyVersion) > 0 { + i -= len(m.ProxyVersion) + copy(dAtA[i:], m.ProxyVersion) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.ProxyVersion))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_ClusterMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_ClusterMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_ClusterMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if len(m.Subset) > 0 { + i -= len(m.Subset) + copy(dAtA[i:], m.Subset) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Subset))) + i-- + dAtA[i] = 0x1a + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x12 + } + if m.PortNumber != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.PortNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_RouteConfigurationMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + } + if m.Vhost != nil { + { + size, err := m.Vhost.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Gateway) > 0 { + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0x1a + } + if len(m.PortName) > 0 { + i -= len(m.PortName) + copy(dAtA[i:], m.PortName) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.PortName))) + i-- + dAtA[i] = 0x12 + } + if m.PortNumber != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.PortNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Action != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Route != nil { + { + size, err := m.Route.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_ListenerMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_ListenerMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_ListenerMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.FilterChain != nil { + { + size, err := m.FilterChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.PortName) > 0 { + i -= len(m.PortName) + copy(dAtA[i:], m.PortName) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.PortName))) + i-- + dAtA[i] = 0x12 + } + if m.PortNumber != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.PortNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ApplicationProtocols) > 0 { + i -= len(m.ApplicationProtocols) + copy(dAtA[i:], m.ApplicationProtocols) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.ApplicationProtocols))) + i-- + dAtA[i] = 0x22 + } + if len(m.TransportProtocol) > 0 { + i -= len(m.TransportProtocol) + copy(dAtA[i:], m.TransportProtocol) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.TransportProtocol))) + i-- + dAtA[i] = 0x1a + } + if len(m.Sni) > 0 { + i -= len(m.Sni) + copy(dAtA[i:], m.Sni) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Sni))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SubFilter != nil { + { + size, err := m.SubFilter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEnvoyFilter(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_Patch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Operation != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.Operation)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ObjectTypes != nil { + { + size := m.ObjectTypes.Size() + i -= size + if _, err := m.ObjectTypes.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Proxy != nil { + { + size, err := m.Proxy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Context != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.Context)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch_Listener) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch_Listener) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Listener != nil { + { + size, err := m.Listener.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RouteConfiguration != nil { + { + size, err := m.RouteConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch_Cluster) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch_Cluster) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Cluster != nil { + { + size, err := m.Cluster.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvoyFilter_EnvoyConfigObjectPatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvoyFilter_EnvoyConfigObjectPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Patch != nil { + { + size, err := m.Patch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Match != nil { + { + size, err := m.Match.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvoyFilter(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ApplyTo != 0 { + i = encodeVarintEnvoyFilter(dAtA, i, uint64(m.ApplyTo)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEnvoyFilter(dAtA []byte, offset int, v uint64) int { + offset -= sovEnvoyFilter(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EnvoyFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.WorkloadLabels) > 0 { + for k, v := range m.WorkloadLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovEnvoyFilter(uint64(len(k))) + 1 + len(v) + sovEnvoyFilter(uint64(len(v))) + n += mapEntrySize + 1 + sovEnvoyFilter(uint64(mapEntrySize)) + } + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + } + if m.WorkloadSelector != nil { + l = m.WorkloadSelector.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if len(m.ConfigPatches) > 0 { + for _, e := range m.ConfigPatches { + l = e.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_DeprecatedListenerMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortNumber != 0 { + n += 1 + sovEnvoyFilter(uint64(m.PortNumber)) + } + l = len(m.PortNamePrefix) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.ListenerType != 0 { + n += 1 + sovEnvoyFilter(uint64(m.ListenerType)) + } + if m.ListenerProtocol != 0 { + n += 1 + sovEnvoyFilter(uint64(m.ListenerProtocol)) + } + if len(m.Address) > 0 { + for _, s := range m.Address { + l = len(s) + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_InsertPosition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovEnvoyFilter(uint64(m.Index)) + } + l = len(m.RelativeTo) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_Filter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListenerMatch != nil { + l = m.ListenerMatch.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.InsertPosition != nil { + l = m.InsertPosition.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.FilterType != 0 { + n += 1 + sovEnvoyFilter(uint64(m.FilterType)) + } + l = len(m.FilterName) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.FilterConfig != nil { + l = m.FilterConfig.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_ProxyMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProxyVersion) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovEnvoyFilter(uint64(len(k))) + 1 + len(v) + sovEnvoyFilter(uint64(len(v))) + n += mapEntrySize + 1 + sovEnvoyFilter(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_ClusterMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortNumber != 0 { + n += 1 + sovEnvoyFilter(uint64(m.PortNumber)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.Subset) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_RouteConfigurationMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortNumber != 0 { + n += 1 + sovEnvoyFilter(uint64(m.PortNumber)) + } + l = len(m.PortName) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.Gateway) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.Vhost != nil { + l = m.Vhost.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovEnvoyFilter(uint64(m.Action)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.Route != nil { + l = m.Route.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_ListenerMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PortNumber != 0 { + n += 1 + sovEnvoyFilter(uint64(m.PortNumber)) + } + l = len(m.PortName) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.FilterChain != nil { + l = m.FilterChain.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.Sni) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.TransportProtocol) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + l = len(m.ApplicationProtocols) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_ListenerMatch_FilterMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.SubFilter != nil { + l = m.SubFilter.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_Patch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Operation != 0 { + n += 1 + sovEnvoyFilter(uint64(m.Operation)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Context != 0 { + n += 1 + sovEnvoyFilter(uint64(m.Context)) + } + if m.Proxy != nil { + l = m.Proxy.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.ObjectTypes != nil { + n += m.ObjectTypes.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnvoyFilter_EnvoyConfigObjectMatch_Listener) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Listener != nil { + l = m.Listener.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + return n +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RouteConfiguration != nil { + l = m.RouteConfiguration.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + return n +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch_Cluster) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + return n +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplyTo != 0 { + n += 1 + sovEnvoyFilter(uint64(m.ApplyTo)) + } + if m.Match != nil { + l = m.Match.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.Patch != nil { + l = m.Patch.Size() + n += 1 + l + sovEnvoyFilter(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEnvoyFilter(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEnvoyFilter(x uint64) (n int) { + return sovEnvoyFilter(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EnvoyFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvoyFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvoyFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkloadLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkloadLabels == nil { + m.WorkloadLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEnvoyFilter + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEnvoyFilter + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.WorkloadLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &EnvoyFilter_Filter{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkloadSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkloadSelector == nil { + m.WorkloadSelector = &WorkloadSelector{} + } + if err := m.WorkloadSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigPatches", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigPatches = append(m.ConfigPatches, &EnvoyFilter_EnvoyConfigObjectPatch{}) + if err := m.ConfigPatches[len(m.ConfigPatches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_DeprecatedListenerMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeprecatedListenerMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeprecatedListenerMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PortNumber", wireType) + } + m.PortNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PortNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortNamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortNamePrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ListenerType", wireType) + } + m.ListenerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ListenerType |= EnvoyFilter_DeprecatedListenerMatch_ListenerType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ListenerProtocol", wireType) + } + m.ListenerProtocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ListenerProtocol |= EnvoyFilter_DeprecatedListenerMatch_ListenerProtocol(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = append(m.Address, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_InsertPosition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InsertPosition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InsertPosition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= EnvoyFilter_InsertPosition_Index(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RelativeTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RelativeTo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_Filter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListenerMatch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListenerMatch == nil { + m.ListenerMatch = &EnvoyFilter_DeprecatedListenerMatch{} + } + if err := m.ListenerMatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InsertPosition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InsertPosition == nil { + m.InsertPosition = &EnvoyFilter_InsertPosition{} + } + if err := m.InsertPosition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FilterType", wireType) + } + m.FilterType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FilterType |= EnvoyFilter_Filter_FilterType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilterConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FilterConfig == nil { + m.FilterConfig = &types.Struct{} + } + if err := m.FilterConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_ProxyMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProxyVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEnvoyFilter + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEnvoyFilter + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_ClusterMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PortNumber", wireType) + } + m.PortNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PortNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subset", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subset = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_RouteConfigurationMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteConfigurationMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteConfigurationMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PortNumber", wireType) + } + m.PortNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PortNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vhost", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vhost == nil { + m.Vhost = &EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch{} + } + if err := m.Vhost.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_RouteConfigurationMatch_RouteMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= EnvoyFilter_RouteConfigurationMatch_RouteMatch_Action(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VirtualHostMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VirtualHostMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Route", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Route == nil { + m.Route = &EnvoyFilter_RouteConfigurationMatch_RouteMatch{} + } + if err := m.Route.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_ListenerMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenerMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenerMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PortNumber", wireType) + } + m.PortNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PortNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilterChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FilterChain == nil { + m.FilterChain = &EnvoyFilter_ListenerMatch_FilterChainMatch{} + } + if err := m.FilterChain.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_ListenerMatch_FilterChainMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FilterChainMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FilterChainMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sni", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sni = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransportProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TransportProtocol = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationProtocols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApplicationProtocols = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EnvoyFilter_ListenerMatch_FilterMatch{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_ListenerMatch_FilterMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FilterMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FilterMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubFilter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SubFilter == nil { + m.SubFilter = &EnvoyFilter_ListenerMatch_SubFilterMatch{} + } + if err := m.SubFilter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_ListenerMatch_SubFilterMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubFilterMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubFilterMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + m.Operation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Operation |= EnvoyFilter_Patch_Operation(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &types.Struct{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_EnvoyConfigObjectMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvoyConfigObjectMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvoyConfigObjectMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + m.Context = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Context |= EnvoyFilter_PatchContext(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proxy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proxy == nil { + m.Proxy = &EnvoyFilter_ProxyMatch{} + } + if err := m.Proxy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Listener", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EnvoyFilter_ListenerMatch{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.ObjectTypes = &EnvoyFilter_EnvoyConfigObjectMatch_Listener{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouteConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EnvoyFilter_RouteConfigurationMatch{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.ObjectTypes = &EnvoyFilter_EnvoyConfigObjectMatch_RouteConfiguration{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EnvoyFilter_ClusterMatch{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.ObjectTypes = &EnvoyFilter_EnvoyConfigObjectMatch_Cluster{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvoyFilter_EnvoyConfigObjectPatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvoyConfigObjectPatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvoyConfigObjectPatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplyTo", wireType) + } + m.ApplyTo = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ApplyTo |= EnvoyFilter_ApplyTo(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Match == nil { + m.Match = &EnvoyFilter_EnvoyConfigObjectMatch{} + } + if err := m.Match.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvoyFilter + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvoyFilter + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Patch == nil { + m.Patch = &EnvoyFilter_Patch{} + } + if err := m.Patch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvoyFilter(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvoyFilter + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEnvoyFilter(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEnvoyFilter + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthEnvoyFilter + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvoyFilter + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEnvoyFilter(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthEnvoyFilter + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEnvoyFilter = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEnvoyFilter = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_deepcopy.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_deepcopy.gen.go new file mode 100644 index 0000000000..d2d560ddb0 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_deepcopy.gen.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/envoy_filter.proto + +// `EnvoyFilter` provides a mechanism to customize the Envoy +// configuration generated by Istio Pilot. Use EnvoyFilter to modify +// values for certain fields, add specific filters, or even add +// entirely new listeners, clusters, etc. This feature must be used +// with care, as incorrect configurations could potentially +// destabilize the entire mesh. Unlike other Istio networking objects, +// EnvoyFilters are additively applied. Any number of EnvoyFilters can +// exist for a given workload in a specific namespace. The order of +// application of these EnvoyFilters is as follows: all EnvoyFilters +// in the config [root +// namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig), +// followed by all matching EnvoyFilters in the workload's namespace. +// +// **NOTE 1**: Since this is break glass configuration, there will not +// be any backward compatibility across different Istio releases. In +// other words, this configuration is subject to change based on +// internal implementation of Istio networking subsystem. +// +// **NOTE 2**: The envoy configuration provided through this mechanism +// should be carefully monitored across Istio proxy version upgrades, +// to ensure that deprecated fields are removed and replaced +// appropriately. +// +// **NOTE 3**: When multiple EnvoyFilters are bound to the same +// workload in a given namespace, all patches will be processed +// sequentially in order of creation time. The behavior is undefined +// if multiple EnvoyFilter configurations conflict with each other. +// +// **NOTE 4**: *_To apply an EnvoyFilter resource to all workloads +// (sidecars and gateways) in the system, define the resource in the +// config [root +// namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig), +// without a workloadSelector. +// +// The example below declares a global default EnvoyFilter resource in +// the root namespace called `istio-config`, that adds a custom +// protocol filter on all sidecars in the system, for outbound port +// 9307. The filter should be added before the terminating tcp_proxy +// filter to take effect. In addition, it sets a 30s idle timeout for +// all HTTP connections in both gateays and sidecars. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: custom-protocol +// namespace: istio-config # as defined in meshConfig resource. +// spec: +// configPatches: +// - applyTo: NETWORK_FILTER +// match: +// context: SIDECAR_OUTBOUND # will match outbound listeners in all sidecars +// listener: +// portNumber: 9307 +// filterChain: +// filter: +// name: "envoy.tcp_proxy" +// patch: +// operation: INSERT_BEFORE +// value: +// name: "envoy.config.filter.network.custom_protocol" +// config: +// ... +// - applyTo: NETWORK_FILTER # http connection manager is a filter in Envoy +// match: +// # context omitted so that this applies to both sidecars and gateways +// listener: +// filterChain: +// filter: +// name: "envoy.http_connection_manager" +// patch: +// operation: MERGE +// value: +// typed_config: +// "@type": "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" +// idle_timeout: 30s +//``` +// +// The following example enables Envoy's Lua filter for all inbound +// HTTP calls arriving at service port 8080 of the reviews service pod +// with labels "app: reviews", in the bookinfo namespace. The lua +// filter calls out to an external service internal.org.net:8888 that +// requires a special cluster definition in envoy. The cluster is also +// added to the sidecar as part of this configuration. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: reviews-lua +// namespace: bookinfo +// spec: +// workloadSelector: +// labels: +// app: reviews +// configPatches: +// # The first patch adds the lua filter to the listener/http connection manager +// - applyTo: HTTP_FILTER +// match: +// context: SIDECAR_INBOUND +// listener: +// portNumber: 8080 +// filterChain: +// filter: +// name: "envoy.http_connection_manager" +// subFilter: +// name: "envoy.router" +// patch: +// operation: INSERT_BEFORE +// value: # lua filter specification +// name: envoy.lua +// config: +// inlineCode: | +// function envoy_on_request(request_handle) +// -- Make an HTTP call to an upstream host with the following headers, body, and timeout. +// local headers, body = request_handle:httpCall( +// "lua_cluster", +// { +// [":method"] = "POST", +// [":path"] = "/acl", +// [":authority"] = "internal.org.net" +// }, +// "authorize call", +// 5000) +// end +// # The second patch adds the cluster that is referenced by the lua code +// # cds match is omitted as a new cluster is being added +// - applyTo: CLUSTER +// match: +// context: SIDECAR_OUTBOUND +// patch: +// operation: ADD +// value: # cluster specification +// name: "lua_cluster" +// type: STRICT_DNS +// connect_timeout: 0.5s +// lb_policy: ROUND_ROBIN +// hosts: +// - socket_address: +// protocol: TCP +// address: "internal.org.net" +// port_value: 8888 +// +// ``` +// +// The following example overwrites certain fields (HTTP idle timeout +// and X-Forward-For trusted hops) in the HTTP connection manager in a +// listener on the ingress gateway in istio-system namespace for the +// SNI host app.example.com: +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: hcm-tweaks +// namespace: istio-system +// spec: +// workloadSelector: +// labels: +// istio: ingress-gateway +// configPatches: +// - applyTo: NETWORK_FILTER # http connection manager is a filter in Envoy +// match: +// context: GATEWAY +// listener: +// filterChain: +// sni: app.example.com +// filter: +// name: "envoy.http_connection_manager" +// patch: +// operation: MERGE +// value: +// idle_timeout: 30s +// xff_num_trusted_hops: 5 +//``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// DeepCopyInto supports using EnvoyFilter within kubernetes types, where deepcopy-gen is used. +func (in *EnvoyFilter) DeepCopyInto(out *EnvoyFilter) { + p := proto.Clone(in).(*EnvoyFilter) + *out = *p +} diff --git a/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_json.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_json.gen.go new file mode 100644 index 0000000000..0d9e075682 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/envoy_filter_json.gen.go @@ -0,0 +1,377 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/envoy_filter.proto + +// `EnvoyFilter` provides a mechanism to customize the Envoy +// configuration generated by Istio Pilot. Use EnvoyFilter to modify +// values for certain fields, add specific filters, or even add +// entirely new listeners, clusters, etc. This feature must be used +// with care, as incorrect configurations could potentially +// destabilize the entire mesh. Unlike other Istio networking objects, +// EnvoyFilters are additively applied. Any number of EnvoyFilters can +// exist for a given workload in a specific namespace. The order of +// application of these EnvoyFilters is as follows: all EnvoyFilters +// in the config [root +// namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig), +// followed by all matching EnvoyFilters in the workload's namespace. +// +// **NOTE 1**: Since this is break glass configuration, there will not +// be any backward compatibility across different Istio releases. In +// other words, this configuration is subject to change based on +// internal implementation of Istio networking subsystem. +// +// **NOTE 2**: The envoy configuration provided through this mechanism +// should be carefully monitored across Istio proxy version upgrades, +// to ensure that deprecated fields are removed and replaced +// appropriately. +// +// **NOTE 3**: When multiple EnvoyFilters are bound to the same +// workload in a given namespace, all patches will be processed +// sequentially in order of creation time. The behavior is undefined +// if multiple EnvoyFilter configurations conflict with each other. +// +// **NOTE 4**: *_To apply an EnvoyFilter resource to all workloads +// (sidecars and gateways) in the system, define the resource in the +// config [root +// namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig), +// without a workloadSelector. +// +// The example below declares a global default EnvoyFilter resource in +// the root namespace called `istio-config`, that adds a custom +// protocol filter on all sidecars in the system, for outbound port +// 9307. The filter should be added before the terminating tcp_proxy +// filter to take effect. In addition, it sets a 30s idle timeout for +// all HTTP connections in both gateays and sidecars. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: custom-protocol +// namespace: istio-config # as defined in meshConfig resource. +// spec: +// configPatches: +// - applyTo: NETWORK_FILTER +// match: +// context: SIDECAR_OUTBOUND # will match outbound listeners in all sidecars +// listener: +// portNumber: 9307 +// filterChain: +// filter: +// name: "envoy.tcp_proxy" +// patch: +// operation: INSERT_BEFORE +// value: +// name: "envoy.config.filter.network.custom_protocol" +// config: +// ... +// - applyTo: NETWORK_FILTER # http connection manager is a filter in Envoy +// match: +// # context omitted so that this applies to both sidecars and gateways +// listener: +// filterChain: +// filter: +// name: "envoy.http_connection_manager" +// patch: +// operation: MERGE +// value: +// typed_config: +// "@type": "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" +// idle_timeout: 30s +//``` +// +// The following example enables Envoy's Lua filter for all inbound +// HTTP calls arriving at service port 8080 of the reviews service pod +// with labels "app: reviews", in the bookinfo namespace. The lua +// filter calls out to an external service internal.org.net:8888 that +// requires a special cluster definition in envoy. The cluster is also +// added to the sidecar as part of this configuration. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: reviews-lua +// namespace: bookinfo +// spec: +// workloadSelector: +// labels: +// app: reviews +// configPatches: +// # The first patch adds the lua filter to the listener/http connection manager +// - applyTo: HTTP_FILTER +// match: +// context: SIDECAR_INBOUND +// listener: +// portNumber: 8080 +// filterChain: +// filter: +// name: "envoy.http_connection_manager" +// subFilter: +// name: "envoy.router" +// patch: +// operation: INSERT_BEFORE +// value: # lua filter specification +// name: envoy.lua +// config: +// inlineCode: | +// function envoy_on_request(request_handle) +// -- Make an HTTP call to an upstream host with the following headers, body, and timeout. +// local headers, body = request_handle:httpCall( +// "lua_cluster", +// { +// [":method"] = "POST", +// [":path"] = "/acl", +// [":authority"] = "internal.org.net" +// }, +// "authorize call", +// 5000) +// end +// # The second patch adds the cluster that is referenced by the lua code +// # cds match is omitted as a new cluster is being added +// - applyTo: CLUSTER +// match: +// context: SIDECAR_OUTBOUND +// patch: +// operation: ADD +// value: # cluster specification +// name: "lua_cluster" +// type: STRICT_DNS +// connect_timeout: 0.5s +// lb_policy: ROUND_ROBIN +// hosts: +// - socket_address: +// protocol: TCP +// address: "internal.org.net" +// port_value: 8888 +// +// ``` +// +// The following example overwrites certain fields (HTTP idle timeout +// and X-Forward-For trusted hops) in the HTTP connection manager in a +// listener on the ingress gateway in istio-system namespace for the +// SNI host app.example.com: +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: EnvoyFilter +// metadata: +// name: hcm-tweaks +// namespace: istio-system +// spec: +// workloadSelector: +// labels: +// istio: ingress-gateway +// configPatches: +// - applyTo: NETWORK_FILTER # http connection manager is a filter in Envoy +// match: +// context: GATEWAY +// listener: +// filterChain: +// sni: app.example.com +// filter: +// name: "envoy.http_connection_manager" +// patch: +// operation: MERGE +// value: +// idle_timeout: 30s +// xff_num_trusted_hops: 5 +//``` +// + +package v1alpha3 + +import ( + bytes "bytes" + fmt "fmt" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// MarshalJSON is a custom marshaler for EnvoyFilter +func (this *EnvoyFilter) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter +func (this *EnvoyFilter) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_DeprecatedListenerMatch +func (this *EnvoyFilter_DeprecatedListenerMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_DeprecatedListenerMatch +func (this *EnvoyFilter_DeprecatedListenerMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_InsertPosition +func (this *EnvoyFilter_InsertPosition) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_InsertPosition +func (this *EnvoyFilter_InsertPosition) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_Filter +func (this *EnvoyFilter_Filter) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_Filter +func (this *EnvoyFilter_Filter) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_ProxyMatch +func (this *EnvoyFilter_ProxyMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_ProxyMatch +func (this *EnvoyFilter_ProxyMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_ClusterMatch +func (this *EnvoyFilter_ClusterMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_ClusterMatch +func (this *EnvoyFilter_ClusterMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_RouteConfigurationMatch +func (this *EnvoyFilter_RouteConfigurationMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_RouteConfigurationMatch +func (this *EnvoyFilter_RouteConfigurationMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_RouteConfigurationMatch_RouteMatch +func (this *EnvoyFilter_RouteConfigurationMatch_RouteMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_RouteConfigurationMatch_RouteMatch +func (this *EnvoyFilter_RouteConfigurationMatch_RouteMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch +func (this *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch +func (this *EnvoyFilter_RouteConfigurationMatch_VirtualHostMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_ListenerMatch +func (this *EnvoyFilter_ListenerMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_ListenerMatch +func (this *EnvoyFilter_ListenerMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_ListenerMatch_FilterChainMatch +func (this *EnvoyFilter_ListenerMatch_FilterChainMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_ListenerMatch_FilterChainMatch +func (this *EnvoyFilter_ListenerMatch_FilterChainMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_ListenerMatch_FilterMatch +func (this *EnvoyFilter_ListenerMatch_FilterMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_ListenerMatch_FilterMatch +func (this *EnvoyFilter_ListenerMatch_FilterMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_ListenerMatch_SubFilterMatch +func (this *EnvoyFilter_ListenerMatch_SubFilterMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_ListenerMatch_SubFilterMatch +func (this *EnvoyFilter_ListenerMatch_SubFilterMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_Patch +func (this *EnvoyFilter_Patch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_Patch +func (this *EnvoyFilter_Patch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_EnvoyConfigObjectMatch +func (this *EnvoyFilter_EnvoyConfigObjectMatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_EnvoyConfigObjectMatch +func (this *EnvoyFilter_EnvoyConfigObjectMatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for EnvoyFilter_EnvoyConfigObjectPatch +func (this *EnvoyFilter_EnvoyConfigObjectPatch) MarshalJSON() ([]byte, error) { + str, err := EnvoyFilterMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for EnvoyFilter_EnvoyConfigObjectPatch +func (this *EnvoyFilter_EnvoyConfigObjectPatch) UnmarshalJSON(b []byte) error { + return EnvoyFilterUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +var ( + EnvoyFilterMarshaler = &github_com_gogo_protobuf_jsonpb.Marshaler{} + EnvoyFilterUnmarshaler = &github_com_gogo_protobuf_jsonpb.Unmarshaler{} +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/gateway.pb.go b/test/vendor/istio.io/api/networking/v1alpha3/gateway.pb.go new file mode 100644 index 0000000000..f46920bc62 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/gateway.pb.go @@ -0,0 +1,2391 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/gateway.proto + +// `Gateway` describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. The specification +// describes a set of ports that should be exposed, the type of protocol to +// use, SNI configuration for the load balancer, etc. +// +// For example, the following Gateway configuration sets up a proxy to act +// as a load balancer exposing port 80 and 9080 (http), 443 (https), +// 9443(https) and port 2379 (TCP) for ingress. The gateway will be +// applied to the proxy running on a pod with labels `app: +// my-gateway-controller`. While Istio will configure the proxy to listen +// on these ports, it is the responsibility of the user to ensure that +// external traffic to these ports are allowed into the mesh. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// namespace: some-config-namespace +// spec: +// selector: +// app: my-gateway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// httpsRedirect: true # sends 301 redirect for http requests +// - port: +// number: 443 +// name: https-443 +// protocol: HTTPS +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// mode: SIMPLE # enables HTTPS on this port +// serverCertificate: /etc/certs/servercert.pem +// privateKey: /etc/certs/privatekey.pem +// - port: +// number: 9443 +// name: https-9443 +// protocol: HTTPS +// hosts: +// - "bookinfo-namespace/*.bookinfo.com" +// tls: +// mode: SIMPLE # enables HTTPS on this port +// credentialName: bookinfo-secret # fetches certs from Kubernetes secret +// - port: +// number: 9080 +// name: http-wildcard +// protocol: HTTP +// hosts: +// - "*" +// - port: +// number: 2379 # to expose internal service via external port 2379 +// name: mongo +// protocol: MONGO +// hosts: +// - "*" +// ``` +// +// The Gateway specification above describes the L4-L6 properties of a load +// balancer. A `VirtualService` can then be bound to a gateway to control +// the forwarding of traffic arriving at a particular host or gateway port. +// +// For example, the following VirtualService splits traffic for +// `https://uk.bookinfo.com/reviews`, `https://eu.bookinfo.com/reviews`, +// `http://uk.bookinfo.com:9080/reviews`, +// `http://eu.bookinfo.com:9080/reviews` into two versions (prod and qa) of +// an internal reviews service on port 9080. In addition, requests +// containing the cookie "user: dev-123" will be sent to special port 7777 +// in the qa version. The same rule is also applicable inside the mesh for +// requests to the "reviews.prod.svc.cluster.local" service. This rule is +// applicable across ports 443, 9080. Note that `http://uk.bookinfo.com` +// gets redirected to `https://uk.bookinfo.com` (i.e. 80 redirects to 443). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-rule +// namespace: bookinfo-namespace +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// - uk.bookinfo.com +// - eu.bookinfo.com +// gateways: +// - some-config-namespace/my-gateway +// - mesh # applies to all the sidecars in the mesh +// http: +// - match: +// - headers: +// cookie: +// exact: "user=dev-123" +// route: +// - destination: +// port: +// number: 7777 +// host: reviews.qa.svc.cluster.local +// - match: +// - uri: +// prefix: /reviews/ +// route: +// - destination: +// port: +// number: 9080 # can be omitted if it's the only port for reviews +// host: reviews.prod.svc.cluster.local +// weight: 80 +// - destination: +// host: reviews.qa.svc.cluster.local +// weight: 20 +// ``` +// +// The following VirtualService forwards traffic arriving at (external) +// port 27017 to internal Mongo server on port 5555. This rule is not +// applicable internally in the mesh as the gateway list omits the +// reserved name `mesh`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// namespace: bookinfo-namespace +// spec: +// hosts: +// - mongosvr.prod.svc.cluster.local # name of internal Mongo service +// gateways: +// - some-config-namespace/my-gateway # can omit the namespace if gateway is in same +// namespace as virtual service. +// tcp: +// - match: +// - port: 27017 +// route: +// - destination: +// host: mongo.prod.svc.cluster.local +// port: +// number: 5555 +// ``` +// +// It is possible to restrict the set of virtual services that can bind to +// a gateway server using the namespace/hostname syntax in the hosts field. +// For example, the following Gateway allows any virtual service in the ns1 +// namespace to bind to it, while restricting only the virtual service with +// foo.bar.com host in the ns2 namespace to bind to it. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// namespace: some-config-namespace +// spec: +// selector: +// app: my-gateway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - "ns1/*" +// - "ns2/foo.bar.com" +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// TLS modes enforced by the proxy +type Server_TLSOptions_TLSmode int32 + +const ( + // The SNI string presented by the client will be used as the match + // criterion in a VirtualService TLS route to determine the + // destination service from the service registry. + Server_TLSOptions_PASSTHROUGH Server_TLSOptions_TLSmode = 0 + // Secure connections with standard TLS semantics. + Server_TLSOptions_SIMPLE Server_TLSOptions_TLSmode = 1 + // Secure connections to the downstream using mutual TLS by presenting + // server certificates for authentication. + Server_TLSOptions_MUTUAL Server_TLSOptions_TLSmode = 2 + // Similar to the passthrough mode, except servers with this TLS mode + // do not require an associated VirtualService to map from the SNI + // value to service in the registry. The destination details such as + // the service/subset/port are encoded in the SNI value. The proxy + // will forward to the upstream (Envoy) cluster (a group of + // endpoints) specified by the SNI value. This server is typically + // used to provide connectivity between services in disparate L3 + // networks that otherwise do not have direct connectivity between + // their respective endpoints. Use of this mode assumes that both the + // source and the destination are using Istio mTLS to secure traffic. + Server_TLSOptions_AUTO_PASSTHROUGH Server_TLSOptions_TLSmode = 3 + // Secure connections from the downstream using mutual TLS by presenting + // server certificates for authentication. + // Compared to Mutual mode, this mode uses certificates, representing + // gateway workload identity, generated automatically by Istio for + // mTLS authentication. When this mode is used, all other fields in + // `TLSOptions` should be empty. + Server_TLSOptions_ISTIO_MUTUAL Server_TLSOptions_TLSmode = 4 +) + +var Server_TLSOptions_TLSmode_name = map[int32]string{ + 0: "PASSTHROUGH", + 1: "SIMPLE", + 2: "MUTUAL", + 3: "AUTO_PASSTHROUGH", + 4: "ISTIO_MUTUAL", +} + +var Server_TLSOptions_TLSmode_value = map[string]int32{ + "PASSTHROUGH": 0, + "SIMPLE": 1, + "MUTUAL": 2, + "AUTO_PASSTHROUGH": 3, + "ISTIO_MUTUAL": 4, +} + +func (x Server_TLSOptions_TLSmode) String() string { + return proto.EnumName(Server_TLSOptions_TLSmode_name, int32(x)) +} + +func (Server_TLSOptions_TLSmode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_067d98d02f84cc0b, []int{1, 0, 0} +} + +// TLS protocol versions. +type Server_TLSOptions_TLSProtocol int32 + +const ( + // Automatically choose the optimal TLS version. + Server_TLSOptions_TLS_AUTO Server_TLSOptions_TLSProtocol = 0 + // TLS version 1.0 + Server_TLSOptions_TLSV1_0 Server_TLSOptions_TLSProtocol = 1 + // TLS version 1.1 + Server_TLSOptions_TLSV1_1 Server_TLSOptions_TLSProtocol = 2 + // TLS version 1.2 + Server_TLSOptions_TLSV1_2 Server_TLSOptions_TLSProtocol = 3 + // TLS version 1.3 + Server_TLSOptions_TLSV1_3 Server_TLSOptions_TLSProtocol = 4 +) + +var Server_TLSOptions_TLSProtocol_name = map[int32]string{ + 0: "TLS_AUTO", + 1: "TLSV1_0", + 2: "TLSV1_1", + 3: "TLSV1_2", + 4: "TLSV1_3", +} + +var Server_TLSOptions_TLSProtocol_value = map[string]int32{ + "TLS_AUTO": 0, + "TLSV1_0": 1, + "TLSV1_1": 2, + "TLSV1_2": 3, + "TLSV1_3": 4, +} + +func (x Server_TLSOptions_TLSProtocol) String() string { + return proto.EnumName(Server_TLSOptions_TLSProtocol_name, int32(x)) +} + +func (Server_TLSOptions_TLSProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_067d98d02f84cc0b, []int{1, 0, 1} +} + +// Gateway describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. +// +// +type Gateway struct { + // A list of server specifications. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + // One or more labels that indicate a specific set of pods/VMs + // on which this gateway configuration should be applied. The scope of + // label search is restricted to the configuration namespace in which the + // the resource is present. In other words, the Gateway resource must + // reside in the same namespace as the gateway workload instance. + Selector map[string]string `protobuf:"bytes,2,rep,name=selector,proto3" json:"selector,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gateway) Reset() { *m = Gateway{} } +func (m *Gateway) String() string { return proto.CompactTextString(m) } +func (*Gateway) ProtoMessage() {} +func (*Gateway) Descriptor() ([]byte, []int) { + return fileDescriptor_067d98d02f84cc0b, []int{0} +} +func (m *Gateway) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gateway) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Gateway.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Gateway) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gateway.Merge(m, src) +} +func (m *Gateway) XXX_Size() int { + return m.Size() +} +func (m *Gateway) XXX_DiscardUnknown() { + xxx_messageInfo_Gateway.DiscardUnknown(m) +} + +var xxx_messageInfo_Gateway proto.InternalMessageInfo + +func (m *Gateway) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +func (m *Gateway) GetSelector() map[string]string { + if m != nil { + return m.Selector + } + return nil +} + +// `Server` describes the properties of the proxy on a given load balancer +// port. For example, +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-ingress +// spec: +// selector: +// app: my-ingress-gateway +// servers: +// - port: +// number: 80 +// name: http2 +// protocol: HTTP2 +// hosts: +// - "*" +// ``` +// +// Another example +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-tcp-ingress +// spec: +// selector: +// app: my-tcp-ingress-gateway +// servers: +// - port: +// number: 27018 +// name: mongo +// protocol: MONGO +// hosts: +// - "*" +// ``` +// +// The following is an example of TLS configuration for port 443 +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-tls-ingress +// spec: +// selector: +// app: my-tls-ingress-gateway +// servers: +// - port: +// number: 443 +// name: https +// protocol: HTTPS +// hosts: +// - "*" +// tls: +// mode: SIMPLE +// serverCertificate: /etc/certs/server.pem +// privateKey: /etc/certs/privatekey.pem +// ``` +type Server struct { + // The Port on which the proxy should listen for incoming + // connections. + Port *Port `protobuf:"bytes,1,opt,name=port,proto3" json:"port,omitempty"` + // $hide_from_docs + // The ip or the Unix domain socket to which the listener should be bound + // to. Format: `x.x.x.x` or `unix:///path/to/uds` or `unix://@foobar` + // (Linux abstract namespace). When using Unix domain sockets, the port + // number should be 0. + Bind string `protobuf:"bytes,4,opt,name=bind,proto3" json:"bind,omitempty"` + // One or more hosts exposed by this gateway. + // While typically applicable to + // HTTP services, it can also be used for TCP services using TLS with SNI. + // A host is specified as a `dnsName` with an optional `namespace/` prefix. + // The `dnsName` should be specified using FQDN format, optionally including + // a wildcard character in the left-most component (e.g., `prod/*.example.com`). + // Set the `dnsName` to `*` to select all `VirtualService` hosts from the + // specified namespace (e.g.,`prod/*`). + // + // The `namespace` can be set to `*` or `.`, representing any or the current + // namespace, respectively. For example, `*/foo.example.com` selects the + // service from any available namespace while `./foo.example.com` only selects + // the service from the namespace of the sidecar. The default, if no `namespace/` + // is specified, is `*/`, that is, select services from any namespace. + // Any associated `DestinationRule` in the selected namespace will also be used. + // + // A `VirtualService` must be bound to the gateway and must have one or + // more hosts that match the hosts specified in a server. The match + // could be an exact match or a suffix match with the server's hosts. For + // example, if the server's hosts specifies `*.example.com`, a + // `VirtualService` with hosts `dev.example.com` or `prod.example.com` will + // match. However, a `VirtualService` with host `example.com` or + // `newexample.com` will not match. + // + // NOTE: Only virtual services exported to the gateway's namespace + // (e.g., `exportTo` value of `*`) can be referenced. + // Private configurations (e.g., `exportTo` set to `.`) will not be + // available. Refer to the `exportTo` setting in `VirtualService`, + // `DestinationRule`, and `ServiceEntry` configurations for details. + Hosts []string `protobuf:"bytes,2,rep,name=hosts,proto3" json:"hosts,omitempty"` + // Set of TLS related options that govern the server's behavior. Use + // these options to control if all http requests should be redirected to + // https, and the TLS modes to use. + Tls *Server_TLSOptions `protobuf:"bytes,3,opt,name=tls,proto3" json:"tls,omitempty"` + // The loopback IP endpoint or Unix domain socket to which traffic should + // be forwarded to by default. Format should be `127.0.0.1:PORT` or + // `unix:///path/to/socket` or `unix://@foobar` (Linux abstract namespace). + DefaultEndpoint string `protobuf:"bytes,5,opt,name=default_endpoint,json=defaultEndpoint,proto3" json:"default_endpoint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_067d98d02f84cc0b, []int{1} +} +func (m *Server) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(m, src) +} +func (m *Server) XXX_Size() int { + return m.Size() +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetPort() *Port { + if m != nil { + return m.Port + } + return nil +} + +func (m *Server) GetBind() string { + if m != nil { + return m.Bind + } + return "" +} + +func (m *Server) GetHosts() []string { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *Server) GetTls() *Server_TLSOptions { + if m != nil { + return m.Tls + } + return nil +} + +func (m *Server) GetDefaultEndpoint() string { + if m != nil { + return m.DefaultEndpoint + } + return "" +} + +type Server_TLSOptions struct { + // If set to true, the load balancer will send a 301 redirect for all + // http connections, asking the clients to use HTTPS. + HttpsRedirect bool `protobuf:"varint,1,opt,name=https_redirect,json=httpsRedirect,proto3" json:"https_redirect,omitempty"` + // Optional: Indicates whether connections to this port should be + // secured using TLS. The value of this field determines how TLS is + // enforced. + Mode Server_TLSOptions_TLSmode `protobuf:"varint,2,opt,name=mode,proto3,enum=istio.networking.v1alpha3.Server_TLSOptions_TLSmode" json:"mode,omitempty"` + // REQUIRED if mode is `SIMPLE` or `MUTUAL`. The path to the file + // holding the server-side TLS certificate to use. + ServerCertificate string `protobuf:"bytes,3,opt,name=server_certificate,json=serverCertificate,proto3" json:"server_certificate,omitempty"` + // REQUIRED if mode is `SIMPLE` or `MUTUAL`. The path to the file + // holding the server's private key. + PrivateKey string `protobuf:"bytes,4,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // REQUIRED if mode is `MUTUAL`. The path to a file containing + // certificate authority certificates to use in verifying a presented + // client side certificate. + CaCertificates string `protobuf:"bytes,5,opt,name=ca_certificates,json=caCertificates,proto3" json:"ca_certificates,omitempty"` + // The credentialName stands for a unique identifier that can be used + // to identify the serverCertificate and the privateKey. The + // credentialName appended with suffix "-cacert" is used to identify + // the CaCertificates associated with this server. Gateway workloads + // capable of fetching credentials from a remote credential store such + // as Kubernetes secrets, will be configured to retrieve the + // serverCertificate and the privateKey using credentialName, instead + // of using the file system paths specified above. If using mutual TLS, + // gateway workload instances will retrieve the CaCertificates using + // credentialName-cacert. The semantics of the name are platform + // dependent. In Kubernetes, the default Istio supplied credential + // server expects the credentialName to match the name of the + // Kubernetes secret that holds the server certificate, the private + // key, and the CA certificate (if using mutual TLS). Set the + // `ISTIO_META_USER_SDS` metadata variable in the gateway's proxy to + // enable the dynamic credential fetching feature. + CredentialName string `protobuf:"bytes,10,opt,name=credential_name,json=credentialName,proto3" json:"credential_name,omitempty"` + // A list of alternate names to verify the subject identity in the + // certificate presented by the client. + SubjectAltNames []string `protobuf:"bytes,6,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"` + // An optional list of base64-encoded SHA-256 hashes of the SKPIs of + // authorized client certificates. + // Note: When both verify_certificate_hash and verify_certificate_spki + // are specified, a hash matching either value will result in the + // certificate being accepted. + VerifyCertificateSpki []string `protobuf:"bytes,11,rep,name=verify_certificate_spki,json=verifyCertificateSpki,proto3" json:"verify_certificate_spki,omitempty"` + // An optional list of hex-encoded SHA-256 hashes of the + // authorized client certificates. Both simple and colon separated + // formats are acceptable. + // Note: When both verify_certificate_hash and verify_certificate_spki + // are specified, a hash matching either value will result in the + // certificate being accepted. + VerifyCertificateHash []string `protobuf:"bytes,12,rep,name=verify_certificate_hash,json=verifyCertificateHash,proto3" json:"verify_certificate_hash,omitempty"` + // Optional: Minimum TLS protocol version. + MinProtocolVersion Server_TLSOptions_TLSProtocol `protobuf:"varint,7,opt,name=min_protocol_version,json=minProtocolVersion,proto3,enum=istio.networking.v1alpha3.Server_TLSOptions_TLSProtocol" json:"min_protocol_version,omitempty"` + // Optional: Maximum TLS protocol version. + MaxProtocolVersion Server_TLSOptions_TLSProtocol `protobuf:"varint,8,opt,name=max_protocol_version,json=maxProtocolVersion,proto3,enum=istio.networking.v1alpha3.Server_TLSOptions_TLSProtocol" json:"max_protocol_version,omitempty"` + // Optional: If specified, only support the specified cipher list. + // Otherwise default to the default cipher list supported by Envoy. + CipherSuites []string `protobuf:"bytes,9,rep,name=cipher_suites,json=cipherSuites,proto3" json:"cipher_suites,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server_TLSOptions) Reset() { *m = Server_TLSOptions{} } +func (m *Server_TLSOptions) String() string { return proto.CompactTextString(m) } +func (*Server_TLSOptions) ProtoMessage() {} +func (*Server_TLSOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_067d98d02f84cc0b, []int{1, 0} +} +func (m *Server_TLSOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Server_TLSOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Server_TLSOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Server_TLSOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server_TLSOptions.Merge(m, src) +} +func (m *Server_TLSOptions) XXX_Size() int { + return m.Size() +} +func (m *Server_TLSOptions) XXX_DiscardUnknown() { + xxx_messageInfo_Server_TLSOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_Server_TLSOptions proto.InternalMessageInfo + +func (m *Server_TLSOptions) GetHttpsRedirect() bool { + if m != nil { + return m.HttpsRedirect + } + return false +} + +func (m *Server_TLSOptions) GetMode() Server_TLSOptions_TLSmode { + if m != nil { + return m.Mode + } + return Server_TLSOptions_PASSTHROUGH +} + +func (m *Server_TLSOptions) GetServerCertificate() string { + if m != nil { + return m.ServerCertificate + } + return "" +} + +func (m *Server_TLSOptions) GetPrivateKey() string { + if m != nil { + return m.PrivateKey + } + return "" +} + +func (m *Server_TLSOptions) GetCaCertificates() string { + if m != nil { + return m.CaCertificates + } + return "" +} + +func (m *Server_TLSOptions) GetCredentialName() string { + if m != nil { + return m.CredentialName + } + return "" +} + +func (m *Server_TLSOptions) GetSubjectAltNames() []string { + if m != nil { + return m.SubjectAltNames + } + return nil +} + +func (m *Server_TLSOptions) GetVerifyCertificateSpki() []string { + if m != nil { + return m.VerifyCertificateSpki + } + return nil +} + +func (m *Server_TLSOptions) GetVerifyCertificateHash() []string { + if m != nil { + return m.VerifyCertificateHash + } + return nil +} + +func (m *Server_TLSOptions) GetMinProtocolVersion() Server_TLSOptions_TLSProtocol { + if m != nil { + return m.MinProtocolVersion + } + return Server_TLSOptions_TLS_AUTO +} + +func (m *Server_TLSOptions) GetMaxProtocolVersion() Server_TLSOptions_TLSProtocol { + if m != nil { + return m.MaxProtocolVersion + } + return Server_TLSOptions_TLS_AUTO +} + +func (m *Server_TLSOptions) GetCipherSuites() []string { + if m != nil { + return m.CipherSuites + } + return nil +} + +// Port describes the properties of a specific port of a service. +type Port struct { + // A valid non-negative integer port number. + Number uint32 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"` + // The protocol exposed on the port. + // MUST BE one of HTTP|HTTPS|GRPC|HTTP2|MONGO|TCP|TLS. + // TLS implies the connection will be routed based on the SNI header to + // the destination without terminating the TLS connection. + Protocol string `protobuf:"bytes,2,opt,name=protocol,proto3" json:"protocol,omitempty"` + // Label assigned to the port. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Port) Reset() { *m = Port{} } +func (m *Port) String() string { return proto.CompactTextString(m) } +func (*Port) ProtoMessage() {} +func (*Port) Descriptor() ([]byte, []int) { + return fileDescriptor_067d98d02f84cc0b, []int{2} +} +func (m *Port) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Port) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Port.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Port) XXX_Merge(src proto.Message) { + xxx_messageInfo_Port.Merge(m, src) +} +func (m *Port) XXX_Size() int { + return m.Size() +} +func (m *Port) XXX_DiscardUnknown() { + xxx_messageInfo_Port.DiscardUnknown(m) +} + +var xxx_messageInfo_Port proto.InternalMessageInfo + +func (m *Port) GetNumber() uint32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Port) GetProtocol() string { + if m != nil { + return m.Protocol + } + return "" +} + +func (m *Port) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("istio.networking.v1alpha3.Server_TLSOptions_TLSmode", Server_TLSOptions_TLSmode_name, Server_TLSOptions_TLSmode_value) + proto.RegisterEnum("istio.networking.v1alpha3.Server_TLSOptions_TLSProtocol", Server_TLSOptions_TLSProtocol_name, Server_TLSOptions_TLSProtocol_value) + proto.RegisterType((*Gateway)(nil), "istio.networking.v1alpha3.Gateway") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.Gateway.SelectorEntry") + proto.RegisterType((*Server)(nil), "istio.networking.v1alpha3.Server") + proto.RegisterType((*Server_TLSOptions)(nil), "istio.networking.v1alpha3.Server.TLSOptions") + proto.RegisterType((*Port)(nil), "istio.networking.v1alpha3.Port") +} + +func init() { proto.RegisterFile("networking/v1alpha3/gateway.proto", fileDescriptor_067d98d02f84cc0b) } + +var fileDescriptor_067d98d02f84cc0b = []byte{ + // 762 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x5d, 0x6f, 0x22, 0x37, + 0x14, 0xdd, 0x01, 0xc2, 0xc7, 0x05, 0xc2, 0xac, 0x95, 0xaa, 0xb3, 0xa9, 0x14, 0xb2, 0x54, 0x55, + 0xb7, 0x55, 0x3b, 0x6c, 0x48, 0x55, 0x45, 0xad, 0x54, 0x95, 0xad, 0xa2, 0x10, 0x95, 0x14, 0x3a, + 0x03, 0x51, 0x95, 0x97, 0x91, 0x19, 0x0c, 0xe3, 0x30, 0x8c, 0x47, 0xb6, 0x21, 0xe1, 0xff, 0xf5, + 0xa1, 0x8f, 0x7d, 0xef, 0x4b, 0x14, 0xa9, 0xff, 0xa3, 0x1a, 0x7b, 0x08, 0xf4, 0x23, 0xa9, 0xa2, + 0x7d, 0xb3, 0x8f, 0xcf, 0x39, 0xf7, 0xda, 0xd7, 0xf7, 0xc2, 0xeb, 0x88, 0xc8, 0x1b, 0xc6, 0x67, + 0x34, 0x9a, 0x36, 0x97, 0x47, 0x38, 0x8c, 0x03, 0x7c, 0xdc, 0x9c, 0x62, 0x49, 0x6e, 0xf0, 0xca, + 0x8e, 0x39, 0x93, 0x0c, 0xbd, 0xa2, 0x42, 0x52, 0x66, 0x6f, 0x88, 0xf6, 0x9a, 0xb8, 0x5f, 0x9f, + 0x32, 0x36, 0x0d, 0x49, 0x13, 0xc7, 0xb4, 0x39, 0xa1, 0x24, 0x1c, 0x7b, 0x23, 0x12, 0xe0, 0x25, + 0x65, 0x5c, 0x6b, 0x1b, 0x7f, 0x18, 0x50, 0x38, 0xd3, 0x6e, 0xe8, 0x7b, 0x28, 0x08, 0xc2, 0x97, + 0x84, 0x0b, 0xcb, 0x38, 0xcc, 0xbe, 0x29, 0xb7, 0x5e, 0xdb, 0x8f, 0x3a, 0xdb, 0xae, 0x62, 0xbe, + 0xcb, 0xde, 0xb5, 0x33, 0xce, 0x5a, 0x86, 0x7e, 0x86, 0xa2, 0x20, 0x21, 0xf1, 0x25, 0xe3, 0x56, + 0x46, 0x59, 0xbc, 0x7d, 0xc2, 0x22, 0x8d, 0x6b, 0xbb, 0xa9, 0xe4, 0x34, 0x92, 0x7c, 0xa5, 0x1d, + 0x1f, 0x6c, 0xf6, 0xbf, 0x85, 0xea, 0xdf, 0xce, 0x91, 0x09, 0xd9, 0x19, 0x59, 0x59, 0xc6, 0xa1, + 0xf1, 0xa6, 0xe4, 0x24, 0x4b, 0xb4, 0x07, 0x3b, 0x4b, 0x1c, 0x2e, 0x88, 0x95, 0x51, 0x98, 0xde, + 0x7c, 0x93, 0x39, 0x31, 0x1a, 0xbf, 0x16, 0x21, 0xaf, 0x13, 0x45, 0x27, 0x90, 0x8b, 0x19, 0x97, + 0x4a, 0x57, 0x6e, 0xd5, 0x9f, 0x48, 0xab, 0xcf, 0xb8, 0xd4, 0x59, 0x28, 0x05, 0x42, 0x90, 0x1b, + 0xd1, 0x68, 0x6c, 0xe5, 0x94, 0xbb, 0x5a, 0xa3, 0x57, 0xb0, 0x13, 0x30, 0x21, 0x85, 0xba, 0x65, + 0x49, 0xb3, 0x35, 0x82, 0xbe, 0x83, 0xac, 0x0c, 0x85, 0x95, 0x55, 0x71, 0xbe, 0xf8, 0xdf, 0x17, + 0xb4, 0x07, 0x5d, 0xb7, 0x17, 0x4b, 0xca, 0x22, 0xe1, 0x24, 0x42, 0xf4, 0x19, 0x98, 0x63, 0x32, + 0xc1, 0x8b, 0x50, 0x7a, 0x24, 0x1a, 0xc7, 0x8c, 0x46, 0xd2, 0xda, 0x51, 0xa1, 0x6b, 0x29, 0x7e, + 0x9a, 0xc2, 0xfb, 0x7f, 0xe6, 0x01, 0x36, 0x72, 0xf4, 0x09, 0xec, 0x06, 0x52, 0xc6, 0xc2, 0xe3, + 0x64, 0x4c, 0x39, 0xf1, 0xf5, 0x65, 0x8b, 0x4e, 0x55, 0xa1, 0x4e, 0x0a, 0xa2, 0x0e, 0xe4, 0xe6, + 0x6c, 0xac, 0x5f, 0x6b, 0xb7, 0xf5, 0xd5, 0x73, 0x32, 0x4c, 0x96, 0x89, 0xd6, 0x51, 0x0e, 0xe8, + 0x4b, 0x40, 0xba, 0xf2, 0x9e, 0x4f, 0xb8, 0xa4, 0x13, 0xea, 0x63, 0x49, 0xd4, 0xcd, 0x4b, 0xce, + 0x4b, 0x7d, 0xf2, 0xc3, 0xe6, 0x00, 0xd5, 0xa1, 0x1c, 0x73, 0xba, 0xc4, 0x92, 0x78, 0x49, 0x05, + 0xf5, 0x7b, 0x42, 0x0a, 0xfd, 0x48, 0x56, 0xe8, 0x53, 0xa8, 0xf9, 0x78, 0xdb, 0x4b, 0xa4, 0x37, + 0xdf, 0xf5, 0xf1, 0x96, 0x91, 0x50, 0x44, 0x4e, 0xc6, 0x24, 0x92, 0x14, 0x87, 0x5e, 0x84, 0xe7, + 0xc4, 0x82, 0x94, 0xf8, 0x00, 0xff, 0x84, 0xe7, 0x04, 0x7d, 0x0e, 0x2f, 0xc5, 0x62, 0x74, 0x4d, + 0x7c, 0xe9, 0xe1, 0x50, 0x2a, 0xa6, 0xb0, 0xf2, 0x49, 0xcd, 0x9c, 0x5a, 0x7a, 0xd0, 0x0e, 0x65, + 0x42, 0x15, 0xe8, 0x6b, 0xf8, 0x70, 0x49, 0x38, 0x9d, 0xac, 0xb6, 0x33, 0xf0, 0x44, 0x3c, 0xa3, + 0x56, 0x59, 0x29, 0x3e, 0xd0, 0xc7, 0x5b, 0x99, 0xb8, 0xf1, 0x8c, 0x3e, 0xa2, 0x0b, 0xb0, 0x08, + 0xac, 0xca, 0x23, 0xba, 0x0e, 0x16, 0x01, 0xba, 0x86, 0xbd, 0x39, 0x8d, 0x3c, 0xd5, 0x87, 0x3e, + 0x0b, 0xbd, 0xa4, 0x83, 0x28, 0x8b, 0xac, 0x82, 0xaa, 0xcb, 0xc9, 0x73, 0xeb, 0xd2, 0x4f, 0x7d, + 0x1c, 0x34, 0xa7, 0xd1, 0x7a, 0x73, 0xa9, 0x3d, 0x55, 0x2c, 0x7c, 0xfb, 0xef, 0x58, 0xc5, 0xf7, + 0x8e, 0x85, 0x6f, 0xff, 0x19, 0xeb, 0x63, 0xa8, 0xfa, 0x34, 0x0e, 0x08, 0xf7, 0xc4, 0x82, 0x26, + 0x35, 0x2c, 0xa9, 0x57, 0xa8, 0x68, 0xd0, 0x55, 0x58, 0xe3, 0x0a, 0x0a, 0xe9, 0x5f, 0x42, 0x35, + 0x28, 0xf7, 0xdb, 0xae, 0x3b, 0xe8, 0x38, 0xbd, 0xe1, 0x59, 0xc7, 0x7c, 0x81, 0x00, 0xf2, 0xee, + 0xf9, 0x45, 0xbf, 0x7b, 0x6a, 0x1a, 0xc9, 0xfa, 0x62, 0x38, 0x18, 0xb6, 0xbb, 0x66, 0x06, 0xed, + 0x81, 0xd9, 0x1e, 0x0e, 0x7a, 0xde, 0x36, 0x3b, 0x8b, 0x4c, 0xa8, 0x9c, 0xbb, 0x83, 0xf3, 0x9e, + 0x97, 0xf2, 0x72, 0x8d, 0x1e, 0x94, 0xb7, 0x72, 0x44, 0x15, 0x28, 0x0e, 0xba, 0xae, 0x97, 0x48, + 0xcd, 0x17, 0xa8, 0xac, 0x02, 0x5f, 0x1e, 0x79, 0x6f, 0x4d, 0x63, 0xb3, 0x39, 0x32, 0x33, 0x9b, + 0x4d, 0xcb, 0xcc, 0x6e, 0x36, 0xc7, 0x66, 0xae, 0xf1, 0x0b, 0xe4, 0x92, 0xa1, 0x80, 0x3e, 0x82, + 0x7c, 0xb4, 0x98, 0x8f, 0x08, 0x57, 0x8d, 0x55, 0xd5, 0x6d, 0x9f, 0x42, 0xa8, 0x0e, 0xc5, 0xf5, + 0xf3, 0xea, 0x41, 0x94, 0x4e, 0xb2, 0x35, 0x98, 0xcc, 0x11, 0xf5, 0x53, 0x75, 0x7f, 0xa8, 0xf5, + 0x3b, 0xfb, 0xb7, 0xfb, 0x03, 0xe3, 0xf7, 0xfb, 0x03, 0xe3, 0xee, 0xfe, 0xc0, 0xb8, 0x3a, 0xd4, + 0x65, 0xa0, 0x4c, 0xcd, 0xeb, 0xff, 0x18, 0xfc, 0xa3, 0xbc, 0x72, 0x3b, 0xfe, 0x2b, 0x00, 0x00, + 0xff, 0xff, 0x52, 0x8a, 0xc5, 0xe7, 0x16, 0x06, 0x00, 0x00, +} + +func (m *Gateway) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gateway) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gateway) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Selector) > 0 { + for k := range m.Selector { + v := m.Selector[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Servers) > 0 { + for iNdEx := len(m.Servers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Servers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Server) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Server) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Server) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultEndpoint) > 0 { + i -= len(m.DefaultEndpoint) + copy(dAtA[i:], m.DefaultEndpoint) + i = encodeVarintGateway(dAtA, i, uint64(len(m.DefaultEndpoint))) + i-- + dAtA[i] = 0x2a + } + if len(m.Bind) > 0 { + i -= len(m.Bind) + copy(dAtA[i:], m.Bind) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Bind))) + i-- + dAtA[i] = 0x22 + } + if m.Tls != nil { + { + size, err := m.Tls.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Server_TLSOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Server_TLSOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Server_TLSOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.VerifyCertificateHash) > 0 { + for iNdEx := len(m.VerifyCertificateHash) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateHash[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateHash[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.VerifyCertificateHash[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.VerifyCertificateSpki) > 0 { + for iNdEx := len(m.VerifyCertificateSpki) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VerifyCertificateSpki[iNdEx]) + copy(dAtA[i:], m.VerifyCertificateSpki[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.VerifyCertificateSpki[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if len(m.CredentialName) > 0 { + i -= len(m.CredentialName) + copy(dAtA[i:], m.CredentialName) + i = encodeVarintGateway(dAtA, i, uint64(len(m.CredentialName))) + i-- + dAtA[i] = 0x52 + } + if len(m.CipherSuites) > 0 { + for iNdEx := len(m.CipherSuites) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CipherSuites[iNdEx]) + copy(dAtA[i:], m.CipherSuites[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.CipherSuites[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if m.MaxProtocolVersion != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.MaxProtocolVersion)) + i-- + dAtA[i] = 0x40 + } + if m.MinProtocolVersion != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.MinProtocolVersion)) + i-- + dAtA[i] = 0x38 + } + if len(m.SubjectAltNames) > 0 { + for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SubjectAltNames[iNdEx]) + copy(dAtA[i:], m.SubjectAltNames[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.SubjectAltNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.CaCertificates) > 0 { + i -= len(m.CaCertificates) + copy(dAtA[i:], m.CaCertificates) + i = encodeVarintGateway(dAtA, i, uint64(len(m.CaCertificates))) + i-- + dAtA[i] = 0x2a + } + if len(m.PrivateKey) > 0 { + i -= len(m.PrivateKey) + copy(dAtA[i:], m.PrivateKey) + i = encodeVarintGateway(dAtA, i, uint64(len(m.PrivateKey))) + i-- + dAtA[i] = 0x22 + } + if len(m.ServerCertificate) > 0 { + i -= len(m.ServerCertificate) + copy(dAtA[i:], m.ServerCertificate) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ServerCertificate))) + i-- + dAtA[i] = 0x1a + } + if m.Mode != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if m.HttpsRedirect { + i-- + if m.HttpsRedirect { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Port) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Port) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Port) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Protocol) > 0 { + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + } + if m.Number != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { + offset -= sovGateway(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Gateway) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Servers) > 0 { + for _, e := range m.Servers { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Server) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.Tls != nil { + l = m.Tls.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Bind) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.DefaultEndpoint) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Server_TLSOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HttpsRedirect { + n += 2 + } + if m.Mode != 0 { + n += 1 + sovGateway(uint64(m.Mode)) + } + l = len(m.ServerCertificate) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.PrivateKey) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.CaCertificates) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.SubjectAltNames) > 0 { + for _, s := range m.SubjectAltNames { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.MinProtocolVersion != 0 { + n += 1 + sovGateway(uint64(m.MinProtocolVersion)) + } + if m.MaxProtocolVersion != 0 { + n += 1 + sovGateway(uint64(m.MaxProtocolVersion)) + } + if len(m.CipherSuites) > 0 { + for _, s := range m.CipherSuites { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + l = len(m.CredentialName) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.VerifyCertificateSpki) > 0 { + for _, s := range m.VerifyCertificateSpki { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.VerifyCertificateHash) > 0 { + for _, s := range m.VerifyCertificateHash { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Port) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Number != 0 { + n += 1 + sovGateway(uint64(m.Number)) + } + l = len(m.Protocol) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovGateway(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGateway(x uint64) (n int) { + return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Gateway) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gateway: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gateway: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Servers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Servers = append(m.Servers, &Server{}) + if err := m.Servers[len(m.Servers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Server) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Server: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Server: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &Port{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tls == nil { + m.Tls = &Server_TLSOptions{} + } + if err := m.Tls.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultEndpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultEndpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Server_TLSOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpsRedirect", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HttpsRedirect = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= Server_TLSOptions_TLSmode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerCertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerCertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrivateKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrivateKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CaCertificates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CaCertificates = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubjectAltNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubjectAltNames = append(m.SubjectAltNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinProtocolVersion", wireType) + } + m.MinProtocolVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinProtocolVersion |= Server_TLSOptions_TLSProtocol(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxProtocolVersion", wireType) + } + m.MaxProtocolVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxProtocolVersion |= Server_TLSOptions_TLSProtocol(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CipherSuites", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CipherSuites = append(m.CipherSuites, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CredentialName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyCertificateSpki", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VerifyCertificateSpki = append(m.VerifyCertificateSpki, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyCertificateHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VerifyCertificateHash = append(m.VerifyCertificateHash, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Port) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Port: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Port: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGateway(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGateway + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGateway + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGateway(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGateway + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGateway = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGateway = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/gateway_deepcopy.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/gateway_deepcopy.gen.go new file mode 100644 index 0000000000..f3f199307c --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/gateway_deepcopy.gen.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/gateway.proto + +// `Gateway` describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. The specification +// describes a set of ports that should be exposed, the type of protocol to +// use, SNI configuration for the load balancer, etc. +// +// For example, the following Gateway configuration sets up a proxy to act +// as a load balancer exposing port 80 and 9080 (http), 443 (https), +// 9443(https) and port 2379 (TCP) for ingress. The gateway will be +// applied to the proxy running on a pod with labels `app: +// my-gateway-controller`. While Istio will configure the proxy to listen +// on these ports, it is the responsibility of the user to ensure that +// external traffic to these ports are allowed into the mesh. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// namespace: some-config-namespace +// spec: +// selector: +// app: my-gateway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// httpsRedirect: true # sends 301 redirect for http requests +// - port: +// number: 443 +// name: https-443 +// protocol: HTTPS +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// mode: SIMPLE # enables HTTPS on this port +// serverCertificate: /etc/certs/servercert.pem +// privateKey: /etc/certs/privatekey.pem +// - port: +// number: 9443 +// name: https-9443 +// protocol: HTTPS +// hosts: +// - "bookinfo-namespace/*.bookinfo.com" +// tls: +// mode: SIMPLE # enables HTTPS on this port +// credentialName: bookinfo-secret # fetches certs from Kubernetes secret +// - port: +// number: 9080 +// name: http-wildcard +// protocol: HTTP +// hosts: +// - "*" +// - port: +// number: 2379 # to expose internal service via external port 2379 +// name: mongo +// protocol: MONGO +// hosts: +// - "*" +// ``` +// +// The Gateway specification above describes the L4-L6 properties of a load +// balancer. A `VirtualService` can then be bound to a gateway to control +// the forwarding of traffic arriving at a particular host or gateway port. +// +// For example, the following VirtualService splits traffic for +// `https://uk.bookinfo.com/reviews`, `https://eu.bookinfo.com/reviews`, +// `http://uk.bookinfo.com:9080/reviews`, +// `http://eu.bookinfo.com:9080/reviews` into two versions (prod and qa) of +// an internal reviews service on port 9080. In addition, requests +// containing the cookie "user: dev-123" will be sent to special port 7777 +// in the qa version. The same rule is also applicable inside the mesh for +// requests to the "reviews.prod.svc.cluster.local" service. This rule is +// applicable across ports 443, 9080. Note that `http://uk.bookinfo.com` +// gets redirected to `https://uk.bookinfo.com` (i.e. 80 redirects to 443). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-rule +// namespace: bookinfo-namespace +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// - uk.bookinfo.com +// - eu.bookinfo.com +// gateways: +// - some-config-namespace/my-gateway +// - mesh # applies to all the sidecars in the mesh +// http: +// - match: +// - headers: +// cookie: +// exact: "user=dev-123" +// route: +// - destination: +// port: +// number: 7777 +// host: reviews.qa.svc.cluster.local +// - match: +// - uri: +// prefix: /reviews/ +// route: +// - destination: +// port: +// number: 9080 # can be omitted if it's the only port for reviews +// host: reviews.prod.svc.cluster.local +// weight: 80 +// - destination: +// host: reviews.qa.svc.cluster.local +// weight: 20 +// ``` +// +// The following VirtualService forwards traffic arriving at (external) +// port 27017 to internal Mongo server on port 5555. This rule is not +// applicable internally in the mesh as the gateway list omits the +// reserved name `mesh`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// namespace: bookinfo-namespace +// spec: +// hosts: +// - mongosvr.prod.svc.cluster.local # name of internal Mongo service +// gateways: +// - some-config-namespace/my-gateway # can omit the namespace if gateway is in same +// namespace as virtual service. +// tcp: +// - match: +// - port: 27017 +// route: +// - destination: +// host: mongo.prod.svc.cluster.local +// port: +// number: 5555 +// ``` +// +// It is possible to restrict the set of virtual services that can bind to +// a gateway server using the namespace/hostname syntax in the hosts field. +// For example, the following Gateway allows any virtual service in the ns1 +// namespace to bind to it, while restricting only the virtual service with +// foo.bar.com host in the ns2 namespace to bind to it. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// namespace: some-config-namespace +// spec: +// selector: +// app: my-gateway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - "ns1/*" +// - "ns2/foo.bar.com" +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// DeepCopyInto supports using Gateway within kubernetes types, where deepcopy-gen is used. +func (in *Gateway) DeepCopyInto(out *Gateway) { + p := proto.Clone(in).(*Gateway) + *out = *p +} diff --git a/test/vendor/istio.io/api/networking/v1alpha3/gateway_json.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/gateway_json.gen.go new file mode 100644 index 0000000000..72750e2528 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/gateway_json.gen.go @@ -0,0 +1,239 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/gateway.proto + +// `Gateway` describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. The specification +// describes a set of ports that should be exposed, the type of protocol to +// use, SNI configuration for the load balancer, etc. +// +// For example, the following Gateway configuration sets up a proxy to act +// as a load balancer exposing port 80 and 9080 (http), 443 (https), +// 9443(https) and port 2379 (TCP) for ingress. The gateway will be +// applied to the proxy running on a pod with labels `app: +// my-gateway-controller`. While Istio will configure the proxy to listen +// on these ports, it is the responsibility of the user to ensure that +// external traffic to these ports are allowed into the mesh. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// namespace: some-config-namespace +// spec: +// selector: +// app: my-gateway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// httpsRedirect: true # sends 301 redirect for http requests +// - port: +// number: 443 +// name: https-443 +// protocol: HTTPS +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// mode: SIMPLE # enables HTTPS on this port +// serverCertificate: /etc/certs/servercert.pem +// privateKey: /etc/certs/privatekey.pem +// - port: +// number: 9443 +// name: https-9443 +// protocol: HTTPS +// hosts: +// - "bookinfo-namespace/*.bookinfo.com" +// tls: +// mode: SIMPLE # enables HTTPS on this port +// credentialName: bookinfo-secret # fetches certs from Kubernetes secret +// - port: +// number: 9080 +// name: http-wildcard +// protocol: HTTP +// hosts: +// - "*" +// - port: +// number: 2379 # to expose internal service via external port 2379 +// name: mongo +// protocol: MONGO +// hosts: +// - "*" +// ``` +// +// The Gateway specification above describes the L4-L6 properties of a load +// balancer. A `VirtualService` can then be bound to a gateway to control +// the forwarding of traffic arriving at a particular host or gateway port. +// +// For example, the following VirtualService splits traffic for +// `https://uk.bookinfo.com/reviews`, `https://eu.bookinfo.com/reviews`, +// `http://uk.bookinfo.com:9080/reviews`, +// `http://eu.bookinfo.com:9080/reviews` into two versions (prod and qa) of +// an internal reviews service on port 9080. In addition, requests +// containing the cookie "user: dev-123" will be sent to special port 7777 +// in the qa version. The same rule is also applicable inside the mesh for +// requests to the "reviews.prod.svc.cluster.local" service. This rule is +// applicable across ports 443, 9080. Note that `http://uk.bookinfo.com` +// gets redirected to `https://uk.bookinfo.com` (i.e. 80 redirects to 443). +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-rule +// namespace: bookinfo-namespace +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// - uk.bookinfo.com +// - eu.bookinfo.com +// gateways: +// - some-config-namespace/my-gateway +// - mesh # applies to all the sidecars in the mesh +// http: +// - match: +// - headers: +// cookie: +// exact: "user=dev-123" +// route: +// - destination: +// port: +// number: 7777 +// host: reviews.qa.svc.cluster.local +// - match: +// - uri: +// prefix: /reviews/ +// route: +// - destination: +// port: +// number: 9080 # can be omitted if it's the only port for reviews +// host: reviews.prod.svc.cluster.local +// weight: 80 +// - destination: +// host: reviews.qa.svc.cluster.local +// weight: 20 +// ``` +// +// The following VirtualService forwards traffic arriving at (external) +// port 27017 to internal Mongo server on port 5555. This rule is not +// applicable internally in the mesh as the gateway list omits the +// reserved name `mesh`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// namespace: bookinfo-namespace +// spec: +// hosts: +// - mongosvr.prod.svc.cluster.local # name of internal Mongo service +// gateways: +// - some-config-namespace/my-gateway # can omit the namespace if gateway is in same +// namespace as virtual service. +// tcp: +// - match: +// - port: 27017 +// route: +// - destination: +// host: mongo.prod.svc.cluster.local +// port: +// number: 5555 +// ``` +// +// It is possible to restrict the set of virtual services that can bind to +// a gateway server using the namespace/hostname syntax in the hosts field. +// For example, the following Gateway allows any virtual service in the ns1 +// namespace to bind to it, while restricting only the virtual service with +// foo.bar.com host in the ns2 namespace to bind to it. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// namespace: some-config-namespace +// spec: +// selector: +// app: my-gateway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - "ns1/*" +// - "ns2/foo.bar.com" +// ``` +// + +package v1alpha3 + +import ( + bytes "bytes" + fmt "fmt" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + proto "github.com/gogo/protobuf/proto" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// MarshalJSON is a custom marshaler for Gateway +func (this *Gateway) MarshalJSON() ([]byte, error) { + str, err := GatewayMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Gateway +func (this *Gateway) UnmarshalJSON(b []byte) error { + return GatewayUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Server +func (this *Server) MarshalJSON() ([]byte, error) { + str, err := GatewayMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Server +func (this *Server) UnmarshalJSON(b []byte) error { + return GatewayUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Server_TLSOptions +func (this *Server_TLSOptions) MarshalJSON() ([]byte, error) { + str, err := GatewayMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Server_TLSOptions +func (this *Server_TLSOptions) UnmarshalJSON(b []byte) error { + return GatewayUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Port +func (this *Port) MarshalJSON() ([]byte, error) { + str, err := GatewayMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Port +func (this *Port) UnmarshalJSON(b []byte) error { + return GatewayUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +var ( + GatewayMarshaler = &github_com_gogo_protobuf_jsonpb.Marshaler{} + GatewayUnmarshaler = &github_com_gogo_protobuf_jsonpb.Unmarshaler{} +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/service_entry.pb.go b/test/vendor/istio.io/api/networking/v1alpha3/service_entry.pb.go new file mode 100644 index 0000000000..608976bdb9 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/service_entry.pb.go @@ -0,0 +1,1885 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/service_entry.proto + +// `ServiceEntry` enables adding additional entries into Istio's internal +// service registry, so that auto-discovered services in the mesh can +// access/route to these manually specified services. A service entry +// describes the properties of a service (DNS name, VIPs, ports, protocols, +// endpoints). These services could be external to the mesh (e.g., web +// APIs) or mesh-internal services that are not part of the platform's +// service registry (e.g., a set of VMs talking to services in Kubernetes). +// +// The following example declares a few external APIs accessed by internal +// applications over HTTPS. The sidecar inspects the SNI value in the +// ClientHello message to route to the appropriate external service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-https +// spec: +// hosts: +// - api.dropboxapi.com +// - www.googleapis.com +// - api.facebook.com +// location: MESH_EXTERNAL +// ports: +// - number: 443 +// name: https +// protocol: TLS +// resolution: DNS +// ``` +// +// The following configuration adds a set of MongoDB instances running on +// unmanaged VMs to Istio's registry, so that these services can be treated +// as any other service in the mesh. The associated DestinationRule is used +// to initiate mTLS connections to the database instances. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-mongocluster +// spec: +// hosts: +// - mymongodb.somedomain # not used +// addresses: +// - 192.192.192.192/24 # VIPs +// ports: +// - number: 27018 +// name: mongodb +// protocol: MONGO +// location: MESH_INTERNAL +// resolution: STATIC +// endpoints: +// - address: 2.2.2.2 +// - address: 3.3.3.3 +// ``` +// +// and the associated DestinationRule +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: mtls-mongocluster +// spec: +// host: mymongodb.somedomain +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// ``` +// +// The following example uses a combination of service entry and TLS +// routing in a virtual service to steer traffic based on the SNI value to +// an internal egress firewall. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-redirect +// spec: +// hosts: +// - wikipedia.org +// - "*.wikipedia.org" +// location: MESH_EXTERNAL +// ports: +// - number: 443 +// name: https +// protocol: TLS +// resolution: NONE +// ``` +// +// And the associated VirtualService to route based on the SNI value. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: tls-routing +// spec: +// hosts: +// - wikipedia.org +// - "*.wikipedia.org" +// tls: +// - match: +// - sniHosts: +// - wikipedia.org +// - "*.wikipedia.org" +// route: +// - destination: +// host: internal-egress-firewall.ns1.svc.cluster.local +// ``` +// +// The virtual service with TLS match serves to override the default SNI +// match. In the absence of a virtual service, traffic will be forwarded to +// the wikipedia domains. +// +// The following example demonstrates the use of a dedicated egress gateway +// through which all external service traffic is forwarded. +// The 'exportTo' field allows for control over the visibility of a service +// declaration to other namespaces in the mesh. By default, a service is exported +// to all namespaces. The following example restricts the visibility to the +// current namespace, represented by ".", so that it cannot be used by other +// namespaces. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-httpbin +// namespace : egress +// spec: +// hosts: +// - httpbin.com +// exportTo: +// - "." +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: DNS +// ``` +// +// Define a gateway to handle all egress traffic. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: istio-egressgateway +// namespace: istio-system +// spec: +// selector: +// istio: egressgateway +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - "*" +// ``` +// +// And the associated `VirtualService` to route from the sidecar to the +// gateway service (`istio-egressgateway.istio-system.svc.cluster.local`), as +// well as route from the gateway to the external service. Note that the +// virtual service is exported to all namespaces enabling them to route traffic +// through the gateway to the external service. Forcing traffic to go through +// a managed middle proxy like this is a common practice. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: gateway-routing +// namespace: egress +// spec: +// hosts: +// - httpbin.com +// exportTo: +// - "*" +// gateways: +// - mesh +// - istio-egressgateway +// http: +// - match: +// - port: 80 +// gateways: +// - mesh +// route: +// - destination: +// host: istio-egressgateway.istio-system.svc.cluster.local +// - match: +// - port: 80 +// gateways: +// - istio-egressgateway +// route: +// - destination: +// host: httpbin.com +// ``` +// +// The following example demonstrates the use of wildcards in the hosts for +// external services. If the connection has to be routed to the IP address +// requested by the application (i.e. application resolves DNS and attempts +// to connect to a specific IP), the discovery mode must be set to `NONE`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-wildcard-example +// spec: +// hosts: +// - "*.bar.com" +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: NONE +// ``` +// +// The following example demonstrates a service that is available via a +// Unix Domain Socket on the host of the client. The resolution must be +// set to STATIC to use Unix address endpoints. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: unix-domain-socket-example +// spec: +// hosts: +// - "example.unix.local" +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: STATIC +// endpoints: +// - address: unix:///var/run/example/socket +// ``` +// +// For HTTP-based services, it is possible to create a `VirtualService` +// backed by multiple DNS addressable endpoints. In such a scenario, the +// application can use the `HTTP_PROXY` environment variable to transparently +// reroute API calls for the `VirtualService` to a chosen backend. For +// example, the following configuration creates a non-existent external +// service called foo.bar.com backed by three domains: us.foo.bar.com:8080, +// uk.foo.bar.com:9080, and in.foo.bar.com:7080 +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-dns +// spec: +// hosts: +// - foo.bar.com +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: DNS +// endpoints: +// - address: us.foo.bar.com +// ports: +// https: 8080 +// - address: uk.foo.bar.com +// ports: +// https: 9080 +// - address: in.foo.bar.com +// ports: +// https: 7080 +// ``` +// +// With `HTTP_PROXY=http://localhost/`, calls from the application to +// `http://foo.bar.com` will be load balanced across the three domains +// specified above. In other words, a call to `http://foo.bar.com/baz` would +// be translated to `http://uk.foo.bar.com/baz`. +// +// The following example illustrates the usage of a `ServiceEntry` +// containing a subject alternate name +// whose format conforms to the [SPIFFE standard](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md): +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: httpbin +// namespace : httpbin-ns +// spec: +// hosts: +// - httpbin.com +// location: MESH_INTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: STATIC +// endpoints: +// - address: 2.2.2.2 +// - address: 3.3.3.3 +// subjectAltNames: +// - "spiffe://cluster.local/ns/httpbin-ns/sa/httpbin-service-account" +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Location specifies whether the service is part of Istio mesh or +// outside the mesh. Location determines the behavior of several +// features, such as service-to-service mTLS authentication, policy +// enforcement, etc. When communicating with services outside the mesh, +// Istio's mTLS authentication is disabled, and policy enforcement is +// performed on the client-side as opposed to server-side. +type ServiceEntry_Location int32 + +const ( + // Signifies that the service is external to the mesh. Typically used + // to indicate external services consumed through APIs. + ServiceEntry_MESH_EXTERNAL ServiceEntry_Location = 0 + // Signifies that the service is part of the mesh. Typically used to + // indicate services added explicitly as part of expanding the service + // mesh to include unmanaged infrastructure (e.g., VMs added to a + // Kubernetes based service mesh). + ServiceEntry_MESH_INTERNAL ServiceEntry_Location = 1 +) + +var ServiceEntry_Location_name = map[int32]string{ + 0: "MESH_EXTERNAL", + 1: "MESH_INTERNAL", +} + +var ServiceEntry_Location_value = map[string]int32{ + "MESH_EXTERNAL": 0, + "MESH_INTERNAL": 1, +} + +func (x ServiceEntry_Location) String() string { + return proto.EnumName(ServiceEntry_Location_name, int32(x)) +} + +func (ServiceEntry_Location) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9220e0fa673c4bf8, []int{0, 0} +} + +// Resolution determines how the proxy will resolve the IP addresses of +// the network endpoints associated with the service, so that it can +// route to one of them. The resolution mode specified here has no impact +// on how the application resolves the IP address associated with the +// service. The application may still have to use DNS to resolve the +// service to an IP so that the outbound traffic can be captured by the +// Proxy. Alternatively, for HTTP services, the application could +// directly communicate with the proxy (e.g., by setting HTTP_PROXY) to +// talk to these services. +type ServiceEntry_Resolution int32 + +const ( + // Assume that incoming connections have already been resolved (to a + // specific destination IP address). Such connections are typically + // routed via the proxy using mechanisms such as IP table REDIRECT/ + // eBPF. After performing any routing related transformations, the + // proxy will forward the connection to the IP address to which the + // connection was bound. + ServiceEntry_NONE ServiceEntry_Resolution = 0 + // Use the static IP addresses specified in endpoints (see below) as the + // backing instances associated with the service. + ServiceEntry_STATIC ServiceEntry_Resolution = 1 + // Attempt to resolve the IP address by querying the ambient DNS, + // during request processing. If no endpoints are specified, the proxy + // will resolve the DNS address specified in the hosts field, if + // wildcards are not used. If endpoints are specified, the DNS + // addresses specified in the endpoints will be resolved to determine + // the destination IP address. DNS resolution cannot be used with Unix + // domain socket endpoints. + ServiceEntry_DNS ServiceEntry_Resolution = 2 +) + +var ServiceEntry_Resolution_name = map[int32]string{ + 0: "NONE", + 1: "STATIC", + 2: "DNS", +} + +var ServiceEntry_Resolution_value = map[string]int32{ + "NONE": 0, + "STATIC": 1, + "DNS": 2, +} + +func (x ServiceEntry_Resolution) String() string { + return proto.EnumName(ServiceEntry_Resolution_name, int32(x)) +} + +func (ServiceEntry_Resolution) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9220e0fa673c4bf8, []int{0, 1} +} + +// ServiceEntry enables adding additional entries into Istio's internal +// service registry. +// +// +type ServiceEntry struct { + // The hosts associated with the ServiceEntry. Could be a DNS + // name with wildcard prefix. + // + // 1. The hosts field is used to select matching hosts in VirtualServices and DestinationRules. + // 2. For HTTP traffic the HTTP Host/Authority header will be matched against the hosts field. + // 3. For HTTPs or TLS traffic containing Server Name Indication (SNI), the SNI value + // will be matched against the hosts field. + // + // Note that when resolution is set to type DNS + // and no endpoints are specified, the host field will be used as the DNS name + // of the endpoint to route traffic to. + Hosts []string `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + // The virtual IP addresses associated with the service. Could be CIDR + // prefix. For HTTP traffic, generated route configurations will include http route + // domains for both the `addresses` and `hosts` field values and the destination will + // be identified based on the HTTP Host/Authority header. + // If one or more IP addresses are specified, + // the incoming traffic will be identified as belonging to this service + // if the destination IP matches the IP/CIDRs specified in the addresses + // field. If the Addresses field is empty, traffic will be identified + // solely based on the destination port. In such scenarios, the port on + // which the service is being accessed must not be shared by any other + // service in the mesh. In other words, the sidecar will behave as a + // simple TCP proxy, forwarding incoming traffic on a specified port to + // the specified destination endpoint IP/host. Unix domain socket + // addresses are not supported in this field. + Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` + // The ports associated with the external service. If the + // Endpoints are Unix domain socket addresses, there must be exactly one + // port. + Ports []*Port `protobuf:"bytes,3,rep,name=ports,proto3" json:"ports,omitempty"` + // Specify whether the service should be considered external to the mesh + // or part of the mesh. + Location ServiceEntry_Location `protobuf:"varint,4,opt,name=location,proto3,enum=istio.networking.v1alpha3.ServiceEntry_Location" json:"location,omitempty"` + // Service discovery mode for the hosts. Care must be taken + // when setting the resolution mode to NONE for a TCP port without + // accompanying IP addresses. In such cases, traffic to any IP on + // said port will be allowed (i.e. 0.0.0.0:). + Resolution ServiceEntry_Resolution `protobuf:"varint,5,opt,name=resolution,proto3,enum=istio.networking.v1alpha3.ServiceEntry_Resolution" json:"resolution,omitempty"` + // One or more endpoints associated with the service. + Endpoints []*ServiceEntry_Endpoint `protobuf:"bytes,6,rep,name=endpoints,proto3" json:"endpoints,omitempty"` + // A list of namespaces to which this service is exported. Exporting a service + // allows it to be used by sidecars, gateways and virtual services defined in + // other namespaces. This feature provides a mechanism for service owners + // and mesh administrators to control the visibility of services across + // namespace boundaries. + // + // If no namespaces are specified then the service is exported to all + // namespaces by default. + // + // The value "." is reserved and defines an export to the same namespace that + // the service is declared in. Similarly the value "*" is reserved and + // defines an export to all namespaces. + // + // For a Kubernetes Service, the equivalent effect can be achieved by setting + // the annotation "networking.istio.io/exportTo" to a comma-separated list + // of namespace names. + // + // NOTE: in the current release, the `exportTo` value is restricted to + // "." or "*" (i.e., the current namespace or all namespaces). + ExportTo []string `protobuf:"bytes,7,rep,name=export_to,json=exportTo,proto3" json:"export_to,omitempty"` + // The list of subject alternate names allowed for workload instances that + // implement this service. This information is used to enforce + // [secure-naming](https://istio.io/docs/concepts/security/#secure-naming). + // If specified, the proxy will verify that the server + // certificate's subject alternate name matches one of the specified values. + SubjectAltNames []string `protobuf:"bytes,8,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceEntry) Reset() { *m = ServiceEntry{} } +func (m *ServiceEntry) String() string { return proto.CompactTextString(m) } +func (*ServiceEntry) ProtoMessage() {} +func (*ServiceEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_9220e0fa673c4bf8, []int{0} +} +func (m *ServiceEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceEntry.Merge(m, src) +} +func (m *ServiceEntry) XXX_Size() int { + return m.Size() +} +func (m *ServiceEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceEntry proto.InternalMessageInfo + +func (m *ServiceEntry) GetHosts() []string { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *ServiceEntry) GetAddresses() []string { + if m != nil { + return m.Addresses + } + return nil +} + +func (m *ServiceEntry) GetPorts() []*Port { + if m != nil { + return m.Ports + } + return nil +} + +func (m *ServiceEntry) GetLocation() ServiceEntry_Location { + if m != nil { + return m.Location + } + return ServiceEntry_MESH_EXTERNAL +} + +func (m *ServiceEntry) GetResolution() ServiceEntry_Resolution { + if m != nil { + return m.Resolution + } + return ServiceEntry_NONE +} + +func (m *ServiceEntry) GetEndpoints() []*ServiceEntry_Endpoint { + if m != nil { + return m.Endpoints + } + return nil +} + +func (m *ServiceEntry) GetExportTo() []string { + if m != nil { + return m.ExportTo + } + return nil +} + +func (m *ServiceEntry) GetSubjectAltNames() []string { + if m != nil { + return m.SubjectAltNames + } + return nil +} + +// Endpoint defines a network address (IP or hostname) associated with +// the mesh service. +type ServiceEntry_Endpoint struct { + // Address associated with the network endpoint without the + // port. Domain names can be used if and only if the resolution is set + // to DNS, and must be fully-qualified without wildcards. Use the form + // unix:///absolute/path/to/socket for Unix domain socket endpoints. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Set of ports associated with the endpoint. The ports must be + // associated with a port name that was declared as part of the + // service. Do not use for `unix://` addresses. + Ports map[string]uint32 `protobuf:"bytes,2,rep,name=ports,proto3" json:"ports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // One or more labels associated with the endpoint. + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Network enables Istio to group endpoints resident in the same L3 + // domain/network. All endpoints in the same network are assumed to be + // directly reachable from one another. When endpoints in different + // networks cannot reach each other directly, an Istio Gateway can be + // used to establish connectivity (usually using the + // AUTO_PASSTHROUGH mode in a Gateway Server). This is + // an advanced configuration used typically for spanning an Istio mesh + // over multiple clusters. + Network string `protobuf:"bytes,4,opt,name=network,proto3" json:"network,omitempty"` + // The locality associated with the endpoint. A locality corresponds + // to a failure domain (e.g., country/region/zone). Arbitrary failure + // domain hierarchies can be represented by separating each + // encapsulating failure domain by /. For example, the locality of an + // an endpoint in US, in US-East-1 region, within availability zone + // az-1, in data center rack r11 can be represented as + // us/us-east-1/az-1/r11. Istio will configure the sidecar to route to + // endpoints within the same locality as the sidecar. If none of the + // endpoints in the locality are available, endpoints parent locality + // (but within the same network ID) will be chosen. For example, if + // there are two endpoints in same network (networkID "n1"), say e1 + // with locality us/us-east-1/az-1/r11 and e2 with locality + // us/us-east-1/az-2/r12, a sidecar from us/us-east-1/az-1/r11 locality + // will prefer e1 from the same locality over e2 from a different + // locality. Endpoint e2 could be the IP associated with a gateway + // (that bridges networks n1 and n2), or the IP associated with a + // standard service endpoint. + Locality string `protobuf:"bytes,5,opt,name=locality,proto3" json:"locality,omitempty"` + // The load balancing weight associated with the endpoint. Endpoints + // with higher weights will receive proportionally higher traffic. + Weight uint32 `protobuf:"varint,6,opt,name=weight,proto3" json:"weight,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceEntry_Endpoint) Reset() { *m = ServiceEntry_Endpoint{} } +func (m *ServiceEntry_Endpoint) String() string { return proto.CompactTextString(m) } +func (*ServiceEntry_Endpoint) ProtoMessage() {} +func (*ServiceEntry_Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_9220e0fa673c4bf8, []int{0, 0} +} +func (m *ServiceEntry_Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceEntry_Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceEntry_Endpoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceEntry_Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceEntry_Endpoint.Merge(m, src) +} +func (m *ServiceEntry_Endpoint) XXX_Size() int { + return m.Size() +} +func (m *ServiceEntry_Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceEntry_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceEntry_Endpoint proto.InternalMessageInfo + +func (m *ServiceEntry_Endpoint) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *ServiceEntry_Endpoint) GetPorts() map[string]uint32 { + if m != nil { + return m.Ports + } + return nil +} + +func (m *ServiceEntry_Endpoint) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ServiceEntry_Endpoint) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ServiceEntry_Endpoint) GetLocality() string { + if m != nil { + return m.Locality + } + return "" +} + +func (m *ServiceEntry_Endpoint) GetWeight() uint32 { + if m != nil { + return m.Weight + } + return 0 +} + +func init() { + proto.RegisterEnum("istio.networking.v1alpha3.ServiceEntry_Location", ServiceEntry_Location_name, ServiceEntry_Location_value) + proto.RegisterEnum("istio.networking.v1alpha3.ServiceEntry_Resolution", ServiceEntry_Resolution_name, ServiceEntry_Resolution_value) + proto.RegisterType((*ServiceEntry)(nil), "istio.networking.v1alpha3.ServiceEntry") + proto.RegisterType((*ServiceEntry_Endpoint)(nil), "istio.networking.v1alpha3.ServiceEntry.Endpoint") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.ServiceEntry.Endpoint.LabelsEntry") + proto.RegisterMapType((map[string]uint32)(nil), "istio.networking.v1alpha3.ServiceEntry.Endpoint.PortsEntry") +} + +func init() { + proto.RegisterFile("networking/v1alpha3/service_entry.proto", fileDescriptor_9220e0fa673c4bf8) +} + +var fileDescriptor_9220e0fa673c4bf8 = []byte{ + // 558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0x5e, 0x92, 0x35, 0x4d, 0xce, 0x18, 0x74, 0x16, 0x42, 0x5e, 0x80, 0x2d, 0xec, 0x86, 0x0a, + 0xa4, 0x74, 0x6c, 0x37, 0x63, 0x70, 0xd3, 0x41, 0x24, 0x26, 0x95, 0x00, 0x69, 0x25, 0x10, 0x37, + 0x91, 0xdb, 0x9a, 0xd6, 0xcc, 0xc4, 0x55, 0xec, 0xb6, 0xf4, 0x41, 0x78, 0x19, 0x9e, 0x80, 0x4b, + 0x1e, 0x61, 0xea, 0x93, 0xa0, 0x3a, 0x49, 0xdb, 0x8b, 0x01, 0xdb, 0x5d, 0xce, 0x67, 0x7f, 0xdf, + 0xf9, 0xf9, 0x8e, 0x03, 0x8f, 0x53, 0xaa, 0xa6, 0x22, 0xbb, 0x60, 0xe9, 0xa0, 0x31, 0x79, 0x46, + 0xf8, 0x68, 0x48, 0x8e, 0x1b, 0x92, 0x66, 0x13, 0xd6, 0xa3, 0x09, 0x4d, 0x55, 0x36, 0x0b, 0x46, + 0x99, 0x50, 0x02, 0xed, 0x32, 0xa9, 0x98, 0x08, 0x56, 0xd7, 0x83, 0xf2, 0xba, 0xb7, 0x3f, 0x10, + 0x62, 0xc0, 0x69, 0x83, 0x8c, 0x58, 0xe3, 0x0b, 0xa3, 0xbc, 0x9f, 0x74, 0xe9, 0x90, 0x4c, 0x98, + 0xc8, 0x72, 0xae, 0xf7, 0xe8, 0xaa, 0x24, 0x03, 0xa2, 0xe8, 0x94, 0x14, 0xf2, 0x07, 0x3f, 0xab, + 0x70, 0xab, 0x9d, 0xa7, 0x0d, 0x17, 0x59, 0xd1, 0x2e, 0x54, 0x86, 0x42, 0x2a, 0x89, 0x0d, 0xdf, + 0xaa, 0xbb, 0x67, 0xd6, 0x65, 0xd3, 0x8c, 0x73, 0x04, 0x3d, 0x00, 0x97, 0xf4, 0xfb, 0x19, 0x95, + 0x92, 0x4a, 0x6c, 0x2e, 0x8e, 0xe3, 0x15, 0x80, 0x4e, 0xa1, 0x32, 0x12, 0x99, 0x92, 0xd8, 0xf2, + 0xad, 0xfa, 0xd6, 0xd1, 0x7e, 0xf0, 0xd7, 0xc2, 0x83, 0xf7, 0x22, 0x53, 0x85, 0xb2, 0xa6, 0xa0, + 0x16, 0x38, 0x5c, 0xf4, 0x88, 0x62, 0x22, 0xc5, 0x9b, 0xbe, 0x51, 0xbf, 0x7d, 0x74, 0xf8, 0x0f, + 0xfa, 0x7a, 0xbd, 0x41, 0xab, 0xe0, 0xc5, 0x4b, 0x05, 0xf4, 0x11, 0x20, 0xa3, 0x52, 0xf0, 0xb1, + 0xd6, 0xab, 0x68, 0xbd, 0xa3, 0xeb, 0xea, 0xc5, 0x4b, 0x66, 0x5e, 0xe1, 0x9a, 0x14, 0x8a, 0xc0, + 0xa5, 0x69, 0x7f, 0x24, 0x58, 0xaa, 0x24, 0xb6, 0x75, 0x9b, 0xd7, 0xae, 0x33, 0x2c, 0x88, 0xf1, + 0x4a, 0x02, 0xdd, 0x07, 0x97, 0x7e, 0x5f, 0x4c, 0x20, 0x51, 0x02, 0x57, 0xf5, 0x40, 0x9d, 0x1c, + 0xe8, 0x08, 0xf4, 0x04, 0x76, 0xe4, 0xb8, 0xfb, 0x95, 0xf6, 0x54, 0x42, 0xb8, 0x4a, 0x52, 0xf2, + 0x8d, 0x4a, 0xec, 0xe8, 0x4b, 0x77, 0x8a, 0x83, 0x26, 0x57, 0xd1, 0x02, 0xf6, 0x7e, 0x58, 0xe0, + 0x94, 0x09, 0xd0, 0x43, 0xa8, 0x16, 0xae, 0x60, 0xc3, 0x37, 0x4a, 0x0f, 0x4b, 0x0c, 0x7d, 0x28, + 0x7d, 0x32, 0x75, 0x03, 0x2f, 0x6e, 0xda, 0x80, 0x76, 0x4f, 0x6a, 0xac, 0xb4, 0xaf, 0x03, 0x36, + 0x27, 0x5d, 0xca, 0x4b, 0xef, 0x5f, 0xde, 0x58, 0xb3, 0xa5, 0xe9, 0xb9, 0x68, 0xa1, 0x85, 0x30, + 0x54, 0x0b, 0x01, 0xbd, 0x13, 0x6e, 0x5c, 0x86, 0xc8, 0xcb, 0xd7, 0x85, 0x33, 0x35, 0xd3, 0xf6, + 0xba, 0xf1, 0x32, 0x46, 0xf7, 0xc0, 0x9e, 0x52, 0x36, 0x18, 0x2a, 0x6c, 0xfb, 0x46, 0x7d, 0x3b, + 0x2e, 0x22, 0xef, 0x04, 0x60, 0x55, 0x38, 0xaa, 0x81, 0x75, 0x41, 0x67, 0xf9, 0x7c, 0xe2, 0xc5, + 0x27, 0xba, 0x0b, 0x95, 0x09, 0xe1, 0x63, 0x8a, 0x4d, 0x4d, 0xcb, 0x83, 0x53, 0xf3, 0xc4, 0xf0, + 0x9e, 0xc3, 0xd6, 0x5a, 0x79, 0xff, 0xa3, 0xba, 0x6b, 0xd4, 0x83, 0x43, 0x70, 0xca, 0xfd, 0x44, + 0x3b, 0xb0, 0xfd, 0x36, 0x6c, 0xbf, 0x49, 0xc2, 0x4f, 0x9d, 0x30, 0x8e, 0x9a, 0xad, 0xda, 0xc6, + 0x12, 0x3a, 0x8f, 0x0a, 0xc8, 0x38, 0x78, 0x0a, 0xb0, 0xda, 0x40, 0xe4, 0xc0, 0x66, 0xf4, 0x2e, + 0x0a, 0x6b, 0x1b, 0x08, 0xc0, 0x6e, 0x77, 0x9a, 0x9d, 0xf3, 0x57, 0x35, 0x03, 0x55, 0xc1, 0x7a, + 0x1d, 0xb5, 0x6b, 0xe6, 0x59, 0xf0, 0x6b, 0xbe, 0x67, 0xfc, 0x9e, 0xef, 0x19, 0x97, 0xf3, 0x3d, + 0xe3, 0xb3, 0x9f, 0x0f, 0x9d, 0x09, 0xfd, 0x43, 0xb8, 0xe2, 0xe9, 0x77, 0x6d, 0xfd, 0xe6, 0x8f, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x56, 0x61, 0xed, 0x81, 0x7d, 0x04, 0x00, 0x00, +} + +func (m *ServiceEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.SubjectAltNames) > 0 { + for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SubjectAltNames[iNdEx]) + copy(dAtA[i:], m.SubjectAltNames[iNdEx]) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.SubjectAltNames[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.ExportTo) > 0 { + for iNdEx := len(m.ExportTo) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExportTo[iNdEx]) + copy(dAtA[i:], m.ExportTo[iNdEx]) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.ExportTo[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintServiceEntry(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Resolution != 0 { + i = encodeVarintServiceEntry(dAtA, i, uint64(m.Resolution)) + i-- + dAtA[i] = 0x28 + } + if m.Location != 0 { + i = encodeVarintServiceEntry(dAtA, i, uint64(m.Location)) + i-- + dAtA[i] = 0x20 + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintServiceEntry(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceEntry_Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceEntry_Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceEntry_Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Weight != 0 { + i = encodeVarintServiceEntry(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x30 + } + if len(m.Locality) > 0 { + i -= len(m.Locality) + copy(dAtA[i:], m.Locality) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.Locality))) + i-- + dAtA[i] = 0x2a + } + if len(m.Network) > 0 { + i -= len(m.Network) + copy(dAtA[i:], m.Network) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.Network))) + i-- + dAtA[i] = 0x22 + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintServiceEntry(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Ports) > 0 { + for k := range m.Ports { + v := m.Ports[k] + baseI := i + i = encodeVarintServiceEntry(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintServiceEntry(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintServiceEntry(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintServiceEntry(dAtA []byte, offset int, v uint64) int { + offset -= sovServiceEntry(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServiceEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovServiceEntry(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovServiceEntry(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovServiceEntry(uint64(l)) + } + } + if m.Location != 0 { + n += 1 + sovServiceEntry(uint64(m.Location)) + } + if m.Resolution != 0 { + n += 1 + sovServiceEntry(uint64(m.Resolution)) + } + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovServiceEntry(uint64(l)) + } + } + if len(m.ExportTo) > 0 { + for _, s := range m.ExportTo { + l = len(s) + n += 1 + l + sovServiceEntry(uint64(l)) + } + } + if len(m.SubjectAltNames) > 0 { + for _, s := range m.SubjectAltNames { + l = len(s) + n += 1 + l + sovServiceEntry(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ServiceEntry_Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovServiceEntry(uint64(l)) + } + if len(m.Ports) > 0 { + for k, v := range m.Ports { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovServiceEntry(uint64(len(k))) + 1 + sovServiceEntry(uint64(v)) + n += mapEntrySize + 1 + sovServiceEntry(uint64(mapEntrySize)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovServiceEntry(uint64(len(k))) + 1 + len(v) + sovServiceEntry(uint64(len(v))) + n += mapEntrySize + 1 + sovServiceEntry(uint64(mapEntrySize)) + } + } + l = len(m.Network) + if l > 0 { + n += 1 + l + sovServiceEntry(uint64(l)) + } + l = len(m.Locality) + if l > 0 { + n += 1 + l + sovServiceEntry(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovServiceEntry(uint64(m.Weight)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovServiceEntry(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozServiceEntry(x uint64) (n int) { + return sovServiceEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ServiceEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &Port{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + m.Location = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Location |= ServiceEntry_Location(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Resolution", wireType) + } + m.Resolution = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Resolution |= ServiceEntry_Resolution(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, &ServiceEntry_Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportTo = append(m.ExportTo, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubjectAltNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubjectAltNames = append(m.SubjectAltNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipServiceEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthServiceEntry + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthServiceEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceEntry_Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ports == nil { + m.Ports = make(map[string]uint32) + } + var mapkey string + var mapvalue uint32 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthServiceEntry + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthServiceEntry + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipServiceEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthServiceEntry + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Ports[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthServiceEntry + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthServiceEntry + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthServiceEntry + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthServiceEntry + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipServiceEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthServiceEntry + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Locality", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceEntry + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceEntry + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Locality = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipServiceEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthServiceEntry + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthServiceEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipServiceEntry(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthServiceEntry + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthServiceEntry + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipServiceEntry(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthServiceEntry + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthServiceEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowServiceEntry = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/service_entry_deepcopy.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/service_entry_deepcopy.gen.go new file mode 100644 index 0000000000..acc4674e84 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/service_entry_deepcopy.gen.go @@ -0,0 +1,335 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/service_entry.proto + +// `ServiceEntry` enables adding additional entries into Istio's internal +// service registry, so that auto-discovered services in the mesh can +// access/route to these manually specified services. A service entry +// describes the properties of a service (DNS name, VIPs, ports, protocols, +// endpoints). These services could be external to the mesh (e.g., web +// APIs) or mesh-internal services that are not part of the platform's +// service registry (e.g., a set of VMs talking to services in Kubernetes). +// +// The following example declares a few external APIs accessed by internal +// applications over HTTPS. The sidecar inspects the SNI value in the +// ClientHello message to route to the appropriate external service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-https +// spec: +// hosts: +// - api.dropboxapi.com +// - www.googleapis.com +// - api.facebook.com +// location: MESH_EXTERNAL +// ports: +// - number: 443 +// name: https +// protocol: TLS +// resolution: DNS +// ``` +// +// The following configuration adds a set of MongoDB instances running on +// unmanaged VMs to Istio's registry, so that these services can be treated +// as any other service in the mesh. The associated DestinationRule is used +// to initiate mTLS connections to the database instances. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-mongocluster +// spec: +// hosts: +// - mymongodb.somedomain # not used +// addresses: +// - 192.192.192.192/24 # VIPs +// ports: +// - number: 27018 +// name: mongodb +// protocol: MONGO +// location: MESH_INTERNAL +// resolution: STATIC +// endpoints: +// - address: 2.2.2.2 +// - address: 3.3.3.3 +// ``` +// +// and the associated DestinationRule +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: mtls-mongocluster +// spec: +// host: mymongodb.somedomain +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// ``` +// +// The following example uses a combination of service entry and TLS +// routing in a virtual service to steer traffic based on the SNI value to +// an internal egress firewall. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-redirect +// spec: +// hosts: +// - wikipedia.org +// - "*.wikipedia.org" +// location: MESH_EXTERNAL +// ports: +// - number: 443 +// name: https +// protocol: TLS +// resolution: NONE +// ``` +// +// And the associated VirtualService to route based on the SNI value. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: tls-routing +// spec: +// hosts: +// - wikipedia.org +// - "*.wikipedia.org" +// tls: +// - match: +// - sniHosts: +// - wikipedia.org +// - "*.wikipedia.org" +// route: +// - destination: +// host: internal-egress-firewall.ns1.svc.cluster.local +// ``` +// +// The virtual service with TLS match serves to override the default SNI +// match. In the absence of a virtual service, traffic will be forwarded to +// the wikipedia domains. +// +// The following example demonstrates the use of a dedicated egress gateway +// through which all external service traffic is forwarded. +// The 'exportTo' field allows for control over the visibility of a service +// declaration to other namespaces in the mesh. By default, a service is exported +// to all namespaces. The following example restricts the visibility to the +// current namespace, represented by ".", so that it cannot be used by other +// namespaces. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-httpbin +// namespace : egress +// spec: +// hosts: +// - httpbin.com +// exportTo: +// - "." +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: DNS +// ``` +// +// Define a gateway to handle all egress traffic. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: istio-egressgateway +// namespace: istio-system +// spec: +// selector: +// istio: egressgateway +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - "*" +// ``` +// +// And the associated `VirtualService` to route from the sidecar to the +// gateway service (`istio-egressgateway.istio-system.svc.cluster.local`), as +// well as route from the gateway to the external service. Note that the +// virtual service is exported to all namespaces enabling them to route traffic +// through the gateway to the external service. Forcing traffic to go through +// a managed middle proxy like this is a common practice. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: gateway-routing +// namespace: egress +// spec: +// hosts: +// - httpbin.com +// exportTo: +// - "*" +// gateways: +// - mesh +// - istio-egressgateway +// http: +// - match: +// - port: 80 +// gateways: +// - mesh +// route: +// - destination: +// host: istio-egressgateway.istio-system.svc.cluster.local +// - match: +// - port: 80 +// gateways: +// - istio-egressgateway +// route: +// - destination: +// host: httpbin.com +// ``` +// +// The following example demonstrates the use of wildcards in the hosts for +// external services. If the connection has to be routed to the IP address +// requested by the application (i.e. application resolves DNS and attempts +// to connect to a specific IP), the discovery mode must be set to `NONE`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-wildcard-example +// spec: +// hosts: +// - "*.bar.com" +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: NONE +// ``` +// +// The following example demonstrates a service that is available via a +// Unix Domain Socket on the host of the client. The resolution must be +// set to STATIC to use Unix address endpoints. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: unix-domain-socket-example +// spec: +// hosts: +// - "example.unix.local" +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: STATIC +// endpoints: +// - address: unix:///var/run/example/socket +// ``` +// +// For HTTP-based services, it is possible to create a `VirtualService` +// backed by multiple DNS addressable endpoints. In such a scenario, the +// application can use the `HTTP_PROXY` environment variable to transparently +// reroute API calls for the `VirtualService` to a chosen backend. For +// example, the following configuration creates a non-existent external +// service called foo.bar.com backed by three domains: us.foo.bar.com:8080, +// uk.foo.bar.com:9080, and in.foo.bar.com:7080 +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-dns +// spec: +// hosts: +// - foo.bar.com +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: DNS +// endpoints: +// - address: us.foo.bar.com +// ports: +// https: 8080 +// - address: uk.foo.bar.com +// ports: +// https: 9080 +// - address: in.foo.bar.com +// ports: +// https: 7080 +// ``` +// +// With `HTTP_PROXY=http://localhost/`, calls from the application to +// `http://foo.bar.com` will be load balanced across the three domains +// specified above. In other words, a call to `http://foo.bar.com/baz` would +// be translated to `http://uk.foo.bar.com/baz`. +// +// The following example illustrates the usage of a `ServiceEntry` +// containing a subject alternate name +// whose format conforms to the [SPIFFE standard](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md): +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: httpbin +// namespace : httpbin-ns +// spec: +// hosts: +// - httpbin.com +// location: MESH_INTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: STATIC +// endpoints: +// - address: 2.2.2.2 +// - address: 3.3.3.3 +// subjectAltNames: +// - "spiffe://cluster.local/ns/httpbin-ns/sa/httpbin-service-account" +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// DeepCopyInto supports using ServiceEntry within kubernetes types, where deepcopy-gen is used. +func (in *ServiceEntry) DeepCopyInto(out *ServiceEntry) { + p := proto.Clone(in).(*ServiceEntry) + *out = *p +} diff --git a/test/vendor/istio.io/api/networking/v1alpha3/service_entry_json.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/service_entry_json.gen.go new file mode 100644 index 0000000000..e947d5903d --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/service_entry_json.gen.go @@ -0,0 +1,358 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/service_entry.proto + +// `ServiceEntry` enables adding additional entries into Istio's internal +// service registry, so that auto-discovered services in the mesh can +// access/route to these manually specified services. A service entry +// describes the properties of a service (DNS name, VIPs, ports, protocols, +// endpoints). These services could be external to the mesh (e.g., web +// APIs) or mesh-internal services that are not part of the platform's +// service registry (e.g., a set of VMs talking to services in Kubernetes). +// +// The following example declares a few external APIs accessed by internal +// applications over HTTPS. The sidecar inspects the SNI value in the +// ClientHello message to route to the appropriate external service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-https +// spec: +// hosts: +// - api.dropboxapi.com +// - www.googleapis.com +// - api.facebook.com +// location: MESH_EXTERNAL +// ports: +// - number: 443 +// name: https +// protocol: TLS +// resolution: DNS +// ``` +// +// The following configuration adds a set of MongoDB instances running on +// unmanaged VMs to Istio's registry, so that these services can be treated +// as any other service in the mesh. The associated DestinationRule is used +// to initiate mTLS connections to the database instances. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-mongocluster +// spec: +// hosts: +// - mymongodb.somedomain # not used +// addresses: +// - 192.192.192.192/24 # VIPs +// ports: +// - number: 27018 +// name: mongodb +// protocol: MONGO +// location: MESH_INTERNAL +// resolution: STATIC +// endpoints: +// - address: 2.2.2.2 +// - address: 3.3.3.3 +// ``` +// +// and the associated DestinationRule +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: mtls-mongocluster +// spec: +// host: mymongodb.somedomain +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// ``` +// +// The following example uses a combination of service entry and TLS +// routing in a virtual service to steer traffic based on the SNI value to +// an internal egress firewall. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-redirect +// spec: +// hosts: +// - wikipedia.org +// - "*.wikipedia.org" +// location: MESH_EXTERNAL +// ports: +// - number: 443 +// name: https +// protocol: TLS +// resolution: NONE +// ``` +// +// And the associated VirtualService to route based on the SNI value. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: tls-routing +// spec: +// hosts: +// - wikipedia.org +// - "*.wikipedia.org" +// tls: +// - match: +// - sniHosts: +// - wikipedia.org +// - "*.wikipedia.org" +// route: +// - destination: +// host: internal-egress-firewall.ns1.svc.cluster.local +// ``` +// +// The virtual service with TLS match serves to override the default SNI +// match. In the absence of a virtual service, traffic will be forwarded to +// the wikipedia domains. +// +// The following example demonstrates the use of a dedicated egress gateway +// through which all external service traffic is forwarded. +// The 'exportTo' field allows for control over the visibility of a service +// declaration to other namespaces in the mesh. By default, a service is exported +// to all namespaces. The following example restricts the visibility to the +// current namespace, represented by ".", so that it cannot be used by other +// namespaces. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-httpbin +// namespace : egress +// spec: +// hosts: +// - httpbin.com +// exportTo: +// - "." +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: DNS +// ``` +// +// Define a gateway to handle all egress traffic. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: istio-egressgateway +// namespace: istio-system +// spec: +// selector: +// istio: egressgateway +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - "*" +// ``` +// +// And the associated `VirtualService` to route from the sidecar to the +// gateway service (`istio-egressgateway.istio-system.svc.cluster.local`), as +// well as route from the gateway to the external service. Note that the +// virtual service is exported to all namespaces enabling them to route traffic +// through the gateway to the external service. Forcing traffic to go through +// a managed middle proxy like this is a common practice. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: gateway-routing +// namespace: egress +// spec: +// hosts: +// - httpbin.com +// exportTo: +// - "*" +// gateways: +// - mesh +// - istio-egressgateway +// http: +// - match: +// - port: 80 +// gateways: +// - mesh +// route: +// - destination: +// host: istio-egressgateway.istio-system.svc.cluster.local +// - match: +// - port: 80 +// gateways: +// - istio-egressgateway +// route: +// - destination: +// host: httpbin.com +// ``` +// +// The following example demonstrates the use of wildcards in the hosts for +// external services. If the connection has to be routed to the IP address +// requested by the application (i.e. application resolves DNS and attempts +// to connect to a specific IP), the discovery mode must be set to `NONE`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-wildcard-example +// spec: +// hosts: +// - "*.bar.com" +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: NONE +// ``` +// +// The following example demonstrates a service that is available via a +// Unix Domain Socket on the host of the client. The resolution must be +// set to STATIC to use Unix address endpoints. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: unix-domain-socket-example +// spec: +// hosts: +// - "example.unix.local" +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: STATIC +// endpoints: +// - address: unix:///var/run/example/socket +// ``` +// +// For HTTP-based services, it is possible to create a `VirtualService` +// backed by multiple DNS addressable endpoints. In such a scenario, the +// application can use the `HTTP_PROXY` environment variable to transparently +// reroute API calls for the `VirtualService` to a chosen backend. For +// example, the following configuration creates a non-existent external +// service called foo.bar.com backed by three domains: us.foo.bar.com:8080, +// uk.foo.bar.com:9080, and in.foo.bar.com:7080 +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-dns +// spec: +// hosts: +// - foo.bar.com +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: DNS +// endpoints: +// - address: us.foo.bar.com +// ports: +// https: 8080 +// - address: uk.foo.bar.com +// ports: +// https: 9080 +// - address: in.foo.bar.com +// ports: +// https: 7080 +// ``` +// +// With `HTTP_PROXY=http://localhost/`, calls from the application to +// `http://foo.bar.com` will be load balanced across the three domains +// specified above. In other words, a call to `http://foo.bar.com/baz` would +// be translated to `http://uk.foo.bar.com/baz`. +// +// The following example illustrates the usage of a `ServiceEntry` +// containing a subject alternate name +// whose format conforms to the [SPIFFE standard](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md): +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: httpbin +// namespace : httpbin-ns +// spec: +// hosts: +// - httpbin.com +// location: MESH_INTERNAL +// ports: +// - number: 80 +// name: http +// protocol: HTTP +// resolution: STATIC +// endpoints: +// - address: 2.2.2.2 +// - address: 3.3.3.3 +// subjectAltNames: +// - "spiffe://cluster.local/ns/httpbin-ns/sa/httpbin-service-account" +// ``` +// + +package v1alpha3 + +import ( + bytes "bytes" + fmt "fmt" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + proto "github.com/gogo/protobuf/proto" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// MarshalJSON is a custom marshaler for ServiceEntry +func (this *ServiceEntry) MarshalJSON() ([]byte, error) { + str, err := ServiceEntryMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ServiceEntry +func (this *ServiceEntry) UnmarshalJSON(b []byte) error { + return ServiceEntryUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for ServiceEntry_Endpoint +func (this *ServiceEntry_Endpoint) MarshalJSON() ([]byte, error) { + str, err := ServiceEntryMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ServiceEntry_Endpoint +func (this *ServiceEntry_Endpoint) UnmarshalJSON(b []byte) error { + return ServiceEntryUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +var ( + ServiceEntryMarshaler = &github_com_gogo_protobuf_jsonpb.Marshaler{} + ServiceEntryUnmarshaler = &github_com_gogo_protobuf_jsonpb.Unmarshaler{} +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/sidecar.pb.go b/test/vendor/istio.io/api/networking/v1alpha3/sidecar.pb.go new file mode 100644 index 0000000000..08a035b216 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/sidecar.pb.go @@ -0,0 +1,2095 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/sidecar.proto + +// `Sidecar` describes the configuration of the sidecar proxy that mediates +// inbound and outbound communication to the workload instance it is attached to. By +// default, Istio will program all sidecar proxies in the mesh with the +// necessary configuration required to reach every workload instance in the mesh, as +// well as accept traffic on all the ports associated with the +// workload. The `Sidecar` configuration provides a way to fine tune the set of +// ports, protocols that the proxy will accept when forwarding traffic to +// and from the workload. In addition, it is possible to restrict the set +// of services that the proxy can reach when forwarding outbound traffic +// from workload instances. +// +// Services and configuration in a mesh are organized into one or more +// namespaces (e.g., a Kubernetes namespace or a CF org/space). A `Sidecar` +// configuration in a namespace will apply to one or more workload instances in the same +// namespace, selected using the `workloadSelector` field. In the absence of a +// `workloadSelector`, it will apply to all workload instances in the same +// namespace. When determining the `Sidecar` configuration to be applied to a +// workload instance, preference will be given to the resource with a +// `workloadSelector` that selects this workload instance, over a `Sidecar` configuration +// without any `workloadSelector`. +// +// NOTE 1: *_Each namespace can have only one `Sidecar` configuration without any +// `workloadSelector`_*. The behavior of the system is undefined if more +// than one selector-less `Sidecar` configurations exist in a given namespace. The +// behavior of the system is undefined if two or more `Sidecar` configurations +// with a `workloadSelector` select the same workload instance. +// +// NOTE 2: *_A `Sidecar` configuration in the `MeshConfig` +// [root namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig) +// will be applied by default to all namespaces without a `Sidecar` +// configuration_*. This global default `Sidecar` configuration should not have +// any `workloadSelector`. +// +// The example below declares a global default `Sidecar` configuration in the +// root namespace called `istio-config`, that configures sidecars in +// all namespaces to allow egress traffic only to other workloads in +// the same namespace, and to services in the `istio-system` namespace. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: istio-config +// spec: +// egress: +// - hosts: +// - "./*" +// - "istio-system/*" +//``` +// +// The example below declares a `Sidecar` configuration in the `prod-us1` +// namespace that overrides the global default defined above, and +// configures the sidecars in the namespace to allow egress traffic to +// public services in the `prod-us1`, `prod-apis`, and the `istio-system` +// namespaces. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: prod-us1 +// spec: +// egress: +// - hosts: +// - "prod-us1/*" +// - "prod-apis/*" +// - "istio-system/*" +// ``` +// +// The example below declares a `Sidecar` configuration in the `prod-us1` namespace +// that accepts inbound HTTP traffic on port 9080 and forwards +// it to the attached workload instance listening on a Unix domain socket. In the +// egress direction, in addition to the `istio-system` namespace, the sidecar +// proxies only HTTP traffic bound for port 9080 for services in the +// `prod-us1` namespace. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: prod-us1 +// spec: +// ingress: +// - port: +// number: 9080 +// protocol: HTTP +// name: somename +// defaultEndpoint: unix:///var/run/someuds.sock +// egress: +// - port: +// number: 9080 +// protocol: HTTP +// name: egresshttp +// hosts: +// - "prod-us1/*" +// - hosts: +// - "istio-system/*" +// ``` +// +// If the workload is deployed without IPTables-based traffic capture, the +// `Sidecar` configuration is the only way to configure the ports on the proxy +// attached to the workload instance. The following example declares a `Sidecar` +// configuration in the `prod-us1` namespace for all pods with labels +// `app: productpage` belonging to the `productpage.prod-us1` service. Assuming +// that these pods are deployed without IPtable rules (i.e. the `istio-init` +// container) and the proxy metadata `ISTIO_META_INTERCEPTION_MODE` is set to +// `NONE`, the specification, below, allows such pods to receive HTTP traffic +// on port 9080 and forward it to the application listening on +// `127.0.0.1:8080`. It also allows the application to communicate with a +// backing MySQL database on `127.0.0.1:3306`, that then gets proxied to the +// externally hosted MySQL service at `mysql.foo.com:3306`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: no-ip-tables +// namespace: prod-us1 +// spec: +// workloadSelector: +// labels: +// app: productpage +// ingress: +// - port: +// number: 9080 # binds to proxy_instance_ip:9080 (0.0.0.0:9080, if no unicast IP is available for the instance) +// protocol: HTTP +// name: somename +// defaultEndpoint: 127.0.0.1:8080 +// captureMode: NONE # not needed if metadata is set for entire proxy +// egress: +// - port: +// number: 3306 +// protocol: MYSQL +// name: egressmysql +// captureMode: NONE # not needed if metadata is set for entire proxy +// bind: 127.0.0.1 +// hosts: +// - "*/mysql.foo.com" +// ``` +// +// And the associated service entry for routing to `mysql.foo.com:3306` +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-mysql +// namespace: ns1 +// spec: +// hosts: +// - mysql.foo.com +// ports: +// - number: 3306 +// name: mysql +// protocol: MYSQL +// location: MESH_EXTERNAL +// resolution: DNS +// ``` +// +// It is also possible to mix and match traffic capture modes in a single +// proxy. For example, consider a setup where internal services are on the +// `192.168.0.0/16` subnet. So, IP tables are setup on the VM to capture all +// outbound traffic on `192.168.0.0/16` subnet. Assume that the VM has an +// additional network interface on `172.16.0.0/16` subnet for inbound +// traffic. The following `Sidecar` configuration allows the VM to expose a +// listener on `172.16.1.32:80` (the VM's IP) for traffic arriving from the +// `172.16.0.0/16` subnet. Note that in this scenario, the +// `ISTIO_META_INTERCEPTION_MODE` metadata on the proxy in the VM should +// contain `REDIRECT` or `TPROXY` as its value, implying that IP tables +// based traffic capture is active. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: partial-ip-tables +// namespace: prod-us1 +// spec: +// workloadSelector: +// labels: +// app: productpage +// ingress: +// - bind: 172.16.1.32 +// port: +// number: 80 # binds to 172.16.1.32:80 +// protocol: HTTP +// name: somename +// defaultEndpoint: 127.0.0.1:8080 +// captureMode: NONE +// egress: +// # use the system detected defaults +// # sets up configuration to handle outbound traffic to services +// # in 192.168.0.0/16 subnet, based on information provided by the +// # service registry +// - captureMode: IPTABLES +// hosts: +// - "*/*" +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `CaptureMode` describes how traffic to a listener is expected to be +// captured. Applicable only when the listener is bound to an IP. +type CaptureMode int32 + +const ( + // The default capture mode defined by the environment. + CaptureMode_DEFAULT CaptureMode = 0 + // Capture traffic using IPtables redirection. + CaptureMode_IPTABLES CaptureMode = 1 + // No traffic capture. When used in an egress listener, the application is + // expected to explicitly communicate with the listener port or Unix + // domain socket. When used in an ingress listener, care needs to be taken + // to ensure that the listener port is not in use by other processes on + // the host. + CaptureMode_NONE CaptureMode = 2 +) + +var CaptureMode_name = map[int32]string{ + 0: "DEFAULT", + 1: "IPTABLES", + 2: "NONE", +} + +var CaptureMode_value = map[string]int32{ + "DEFAULT": 0, + "IPTABLES": 1, + "NONE": 2, +} + +func (x CaptureMode) String() string { + return proto.EnumName(CaptureMode_name, int32(x)) +} + +func (CaptureMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{0} +} + +type OutboundTrafficPolicy_Mode int32 + +const ( + // Outbound traffic will be restricted to services defined in the + // service registry as well as those defined through `ServiceEntry` configurations. + OutboundTrafficPolicy_REGISTRY_ONLY OutboundTrafficPolicy_Mode = 0 + // Outbound traffic to unknown destinations will be allowed, in case + // there are no services or `ServiceEntry` configurations for the destination port. + OutboundTrafficPolicy_ALLOW_ANY OutboundTrafficPolicy_Mode = 1 +) + +var OutboundTrafficPolicy_Mode_name = map[int32]string{ + 0: "REGISTRY_ONLY", + 1: "ALLOW_ANY", +} + +var OutboundTrafficPolicy_Mode_value = map[string]int32{ + "REGISTRY_ONLY": 0, + "ALLOW_ANY": 1, +} + +func (x OutboundTrafficPolicy_Mode) String() string { + return proto.EnumName(OutboundTrafficPolicy_Mode_name, int32(x)) +} + +func (OutboundTrafficPolicy_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{4, 0} +} + +// `Sidecar` describes the configuration of the sidecar proxy that mediates +// inbound and outbound communication of the workload instance to which it is +// attached. +// +// +type Sidecar struct { + // Criteria used to select the specific set of pods/VMs on which this + // `Sidecar` configuration should be applied. If omitted, the `Sidecar` + // configuration will be applied to all workload instances in the same namespace. + WorkloadSelector *WorkloadSelector `protobuf:"bytes,1,opt,name=workload_selector,json=workloadSelector,proto3" json:"workload_selector,omitempty"` + // Ingress specifies the configuration of the sidecar for processing + // inbound traffic to the attached workload instance. If omitted, Istio will + // automatically configure the sidecar based on the information about the workload + // obtained from the orchestration platform (e.g., exposed ports, services, + // etc.). If specified, inbound ports are configured if and only if the + // workload instance is associated with a service. + Ingress []*IstioIngressListener `protobuf:"bytes,2,rep,name=ingress,proto3" json:"ingress,omitempty"` + // Egress specifies the configuration of the sidecar for processing + // outbound traffic from the attached workload instance to other services in the + // mesh. + Egress []*IstioEgressListener `protobuf:"bytes,3,rep,name=egress,proto3" json:"egress,omitempty"` + // This allows to configure the outbound traffic policy. + // If your application uses one or more external + // services that are not known apriori, setting the policy to `ALLOW_ANY` + // will cause the sidecars to route any unknown traffic originating from + // the application to its requested destination. + OutboundTrafficPolicy *OutboundTrafficPolicy `protobuf:"bytes,4,opt,name=outbound_traffic_policy,json=outboundTrafficPolicy,proto3" json:"outbound_traffic_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sidecar) Reset() { *m = Sidecar{} } +func (m *Sidecar) String() string { return proto.CompactTextString(m) } +func (*Sidecar) ProtoMessage() {} +func (*Sidecar) Descriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{0} +} +func (m *Sidecar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sidecar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sidecar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sidecar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sidecar.Merge(m, src) +} +func (m *Sidecar) XXX_Size() int { + return m.Size() +} +func (m *Sidecar) XXX_DiscardUnknown() { + xxx_messageInfo_Sidecar.DiscardUnknown(m) +} + +var xxx_messageInfo_Sidecar proto.InternalMessageInfo + +func (m *Sidecar) GetWorkloadSelector() *WorkloadSelector { + if m != nil { + return m.WorkloadSelector + } + return nil +} + +func (m *Sidecar) GetIngress() []*IstioIngressListener { + if m != nil { + return m.Ingress + } + return nil +} + +func (m *Sidecar) GetEgress() []*IstioEgressListener { + if m != nil { + return m.Egress + } + return nil +} + +func (m *Sidecar) GetOutboundTrafficPolicy() *OutboundTrafficPolicy { + if m != nil { + return m.OutboundTrafficPolicy + } + return nil +} + +// `IstioIngressListener` specifies the properties of an inbound +// traffic listener on the sidecar proxy attached to a workload instance. +type IstioIngressListener struct { + // The port associated with the listener. + Port *Port `protobuf:"bytes,1,opt,name=port,proto3" json:"port,omitempty"` + // The IP to which the listener should be bound. Must be in the + // format `x.x.x.x`. Unix domain socket addresses are not allowed in + // the bind field for ingress listeners. If omitted, Istio will + // automatically configure the defaults based on imported services + // and the workload instances to which this configuration is applied + // to. + Bind string `protobuf:"bytes,2,opt,name=bind,proto3" json:"bind,omitempty"` + // The captureMode option dictates how traffic to the listener is + // expected to be captured (or not). + CaptureMode CaptureMode `protobuf:"varint,3,opt,name=capture_mode,json=captureMode,proto3,enum=istio.networking.v1alpha3.CaptureMode" json:"capture_mode,omitempty"` + // The loopback IP endpoint or Unix domain socket to which + // traffic should be forwarded to. This configuration can be used to + // redirect traffic arriving at the bind `IP:Port` on the sidecar to a `localhost:port` + // or Unix domain socket where the application workload instance is listening for + // connections. Format should be `127.0.0.1:PORT` or `unix:///path/to/socket` + DefaultEndpoint string `protobuf:"bytes,4,opt,name=default_endpoint,json=defaultEndpoint,proto3" json:"default_endpoint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IstioIngressListener) Reset() { *m = IstioIngressListener{} } +func (m *IstioIngressListener) String() string { return proto.CompactTextString(m) } +func (*IstioIngressListener) ProtoMessage() {} +func (*IstioIngressListener) Descriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{1} +} +func (m *IstioIngressListener) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IstioIngressListener) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IstioIngressListener.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IstioIngressListener) XXX_Merge(src proto.Message) { + xxx_messageInfo_IstioIngressListener.Merge(m, src) +} +func (m *IstioIngressListener) XXX_Size() int { + return m.Size() +} +func (m *IstioIngressListener) XXX_DiscardUnknown() { + xxx_messageInfo_IstioIngressListener.DiscardUnknown(m) +} + +var xxx_messageInfo_IstioIngressListener proto.InternalMessageInfo + +func (m *IstioIngressListener) GetPort() *Port { + if m != nil { + return m.Port + } + return nil +} + +func (m *IstioIngressListener) GetBind() string { + if m != nil { + return m.Bind + } + return "" +} + +func (m *IstioIngressListener) GetCaptureMode() CaptureMode { + if m != nil { + return m.CaptureMode + } + return CaptureMode_DEFAULT +} + +func (m *IstioIngressListener) GetDefaultEndpoint() string { + if m != nil { + return m.DefaultEndpoint + } + return "" +} + +// `IstioEgressListener` specifies the properties of an outbound traffic +// listener on the sidecar proxy attached to a workload instance. +type IstioEgressListener struct { + // The port associated with the listener. If using Unix domain socket, + // use 0 as the port number, with a valid protocol. The port if + // specified, will be used as the default destination port associated + // with the imported hosts. If the port is omitted, Istio will infer the + // listener ports based on the imported hosts. Note that when multiple + // egress listeners are specified, where one or more listeners have + // specific ports while others have no port, the hosts exposed on a + // listener port will be based on the listener with the most specific + // port. + Port *Port `protobuf:"bytes,1,opt,name=port,proto3" json:"port,omitempty"` + // The IP or the Unix domain socket to which the listener should be bound + // to. Port MUST be specified if bind is not empty. Format: `x.x.x.x` or + // `unix:///path/to/uds` or `unix://@foobar` (Linux abstract namespace). If + // omitted, Istio will automatically configure the defaults based on imported + // services, the workload instances to which this configuration is applied to and + // the captureMode. If captureMode is `NONE`, bind will default to + // 127.0.0.1. + Bind string `protobuf:"bytes,2,opt,name=bind,proto3" json:"bind,omitempty"` + // When the bind address is an IP, the captureMode option dictates + // how traffic to the listener is expected to be captured (or not). + // captureMode must be DEFAULT or `NONE` for Unix domain socket binds. + CaptureMode CaptureMode `protobuf:"varint,3,opt,name=capture_mode,json=captureMode,proto3,enum=istio.networking.v1alpha3.CaptureMode" json:"capture_mode,omitempty"` + // One or more service hosts exposed by the listener + // in `namespace/dnsName` format. Services in the specified namespace + // matching `dnsName` will be exposed. + // The corresponding service can be a service in the service registry + // (e.g., a Kubernetes or cloud foundry service) or a service specified + // using a `ServiceEntry` or `VirtualService` configuration. Any + // associated `DestinationRule` in the same namespace will also be used. + // + // The `dnsName` should be specified using FQDN format, optionally including + // a wildcard character in the left-most component (e.g., `prod/*.example.com`). + // Set the `dnsName` to `*` to select all services from the specified namespace + // (e.g., `prod/*`). + // + // The `namespace` can be set to `*`, `.`, or `~`, representing any, the current, + // or no namespace, respectively. For example, `*/foo.example.com` selects the + // service from any available namespace while `./foo.example.com` only selects + // the service from the namespace of the sidecar. If a host is set to `*/*`, + // Istio will configure the sidecar to be able to reach every service in the + // mesh that is exported to the sidecar's namespace. The value `~/*` can be used + // to completely trim the configuration for sidecars that simply receive traffic + // and respond, but make no outbound connections of their own. + // + // NOTE: Only services and configuration artifacts exported to the sidecar's + // namespace (e.g., `exportTo` value of `*`) can be referenced. + // Private configurations (e.g., `exportTo` set to `.`) will + // not be available. Refer to the `exportTo` setting in `VirtualService`, + // `DestinationRule`, and `ServiceEntry` configurations for details. + // + // **WARNING:** The list of egress hosts in a `Sidecar` must also include + // the Mixer control plane services if they are enabled. Envoy will not + // be able to reach them otherwise. For example, add host + // `istio-system/istio-telemetry.istio-system.svc.cluster.local` if telemetry + // is enabled, `istio-system/istio-policy.istio-system.svc.cluster.local` if + // policy is enabled, or add `istio-system/*` to allow all services in the + // `istio-system` namespace. This requirement is temporary and will be removed + // in a future Istio release. + Hosts []string `protobuf:"bytes,4,rep,name=hosts,proto3" json:"hosts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IstioEgressListener) Reset() { *m = IstioEgressListener{} } +func (m *IstioEgressListener) String() string { return proto.CompactTextString(m) } +func (*IstioEgressListener) ProtoMessage() {} +func (*IstioEgressListener) Descriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{2} +} +func (m *IstioEgressListener) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IstioEgressListener) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IstioEgressListener.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IstioEgressListener) XXX_Merge(src proto.Message) { + xxx_messageInfo_IstioEgressListener.Merge(m, src) +} +func (m *IstioEgressListener) XXX_Size() int { + return m.Size() +} +func (m *IstioEgressListener) XXX_DiscardUnknown() { + xxx_messageInfo_IstioEgressListener.DiscardUnknown(m) +} + +var xxx_messageInfo_IstioEgressListener proto.InternalMessageInfo + +func (m *IstioEgressListener) GetPort() *Port { + if m != nil { + return m.Port + } + return nil +} + +func (m *IstioEgressListener) GetBind() string { + if m != nil { + return m.Bind + } + return "" +} + +func (m *IstioEgressListener) GetCaptureMode() CaptureMode { + if m != nil { + return m.CaptureMode + } + return CaptureMode_DEFAULT +} + +func (m *IstioEgressListener) GetHosts() []string { + if m != nil { + return m.Hosts + } + return nil +} + +// `WorkloadSelector` specifies the criteria used to determine if the `Gateway`, +// `Sidecar`, or `EnvoyFilter` configuration can be applied to a proxy. The matching criteria +// includes the metadata associated with a proxy, workload instance info such as +// labels attached to the pod/VM, or any other info that the proxy provides +// to Istio during the initial handshake. If multiple conditions are +// specified, all conditions need to match in order for the workload instance to be +// selected. Currently, only label based selection mechanism is supported. +type WorkloadSelector struct { + // One or more labels that indicate a specific set of pods/VMs + // on which this `Sidecar` configuration should be applied. The scope of + // label search is restricted to the configuration namespace in which the + // the resource is present. + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkloadSelector) Reset() { *m = WorkloadSelector{} } +func (m *WorkloadSelector) String() string { return proto.CompactTextString(m) } +func (*WorkloadSelector) ProtoMessage() {} +func (*WorkloadSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{3} +} +func (m *WorkloadSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkloadSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkloadSelector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkloadSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkloadSelector.Merge(m, src) +} +func (m *WorkloadSelector) XXX_Size() int { + return m.Size() +} +func (m *WorkloadSelector) XXX_DiscardUnknown() { + xxx_messageInfo_WorkloadSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkloadSelector proto.InternalMessageInfo + +func (m *WorkloadSelector) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// `OutboundTrafficPolicy` sets the default behavior of the sidecar for +// handling outbound traffic from the application. +// If your application uses one or more external +// services that are not known apriori, setting the policy to `ALLOW_ANY` +// will cause the sidecars to route any unknown traffic originating from +// the application to its requested destination. Users are strongly +// encouraged to use `ServiceEntry` configurations to explicitly declare any external +// dependencies, instead of using `ALLOW_ANY`, so that traffic to these +// services can be monitored. +type OutboundTrafficPolicy struct { + Mode OutboundTrafficPolicy_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=istio.networking.v1alpha3.OutboundTrafficPolicy_Mode" json:"mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OutboundTrafficPolicy) Reset() { *m = OutboundTrafficPolicy{} } +func (m *OutboundTrafficPolicy) String() string { return proto.CompactTextString(m) } +func (*OutboundTrafficPolicy) ProtoMessage() {} +func (*OutboundTrafficPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_b5c11342f04ad3d1, []int{4} +} +func (m *OutboundTrafficPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OutboundTrafficPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OutboundTrafficPolicy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OutboundTrafficPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_OutboundTrafficPolicy.Merge(m, src) +} +func (m *OutboundTrafficPolicy) XXX_Size() int { + return m.Size() +} +func (m *OutboundTrafficPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_OutboundTrafficPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_OutboundTrafficPolicy proto.InternalMessageInfo + +func (m *OutboundTrafficPolicy) GetMode() OutboundTrafficPolicy_Mode { + if m != nil { + return m.Mode + } + return OutboundTrafficPolicy_REGISTRY_ONLY +} + +func init() { + proto.RegisterEnum("istio.networking.v1alpha3.CaptureMode", CaptureMode_name, CaptureMode_value) + proto.RegisterEnum("istio.networking.v1alpha3.OutboundTrafficPolicy_Mode", OutboundTrafficPolicy_Mode_name, OutboundTrafficPolicy_Mode_value) + proto.RegisterType((*Sidecar)(nil), "istio.networking.v1alpha3.Sidecar") + proto.RegisterType((*IstioIngressListener)(nil), "istio.networking.v1alpha3.IstioIngressListener") + proto.RegisterType((*IstioEgressListener)(nil), "istio.networking.v1alpha3.IstioEgressListener") + proto.RegisterType((*WorkloadSelector)(nil), "istio.networking.v1alpha3.WorkloadSelector") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.WorkloadSelector.LabelsEntry") + proto.RegisterType((*OutboundTrafficPolicy)(nil), "istio.networking.v1alpha3.OutboundTrafficPolicy") +} + +func init() { proto.RegisterFile("networking/v1alpha3/sidecar.proto", fileDescriptor_b5c11342f04ad3d1) } + +var fileDescriptor_b5c11342f04ad3d1 = []byte{ + // 599 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xee, 0x26, 0xee, 0x4f, 0x26, 0x2d, 0xb8, 0x4b, 0x2b, 0xdc, 0x1e, 0x5a, 0xe3, 0x03, 0x8a, + 0x40, 0x72, 0x20, 0x15, 0xa2, 0x70, 0x4b, 0xc1, 0x20, 0x0b, 0x93, 0x54, 0x4e, 0x50, 0x29, 0x17, + 0x6b, 0x63, 0x6f, 0x92, 0x55, 0x8d, 0xd7, 0xb2, 0x37, 0xad, 0xf2, 0x0e, 0xbc, 0x05, 0xaf, 0xc2, + 0x81, 0x23, 0x6f, 0x40, 0x55, 0x89, 0xf7, 0x40, 0xdd, 0x75, 0xd5, 0x52, 0x99, 0xa0, 0x5c, 0xb8, + 0xed, 0xce, 0x7c, 0xdf, 0xb7, 0x33, 0x9f, 0x66, 0x07, 0x1e, 0x24, 0x54, 0x9c, 0xf1, 0xec, 0x84, + 0x25, 0xa3, 0xe6, 0xe9, 0x53, 0x12, 0xa7, 0x63, 0xb2, 0xd7, 0xcc, 0x59, 0x44, 0x43, 0x92, 0xd9, + 0x69, 0xc6, 0x05, 0xc7, 0x5b, 0x2c, 0x17, 0x8c, 0xdb, 0xd7, 0x40, 0xfb, 0x0a, 0xb8, 0xbd, 0x3b, + 0xe2, 0x7c, 0x14, 0xd3, 0x26, 0x49, 0x59, 0x73, 0xc8, 0x68, 0x1c, 0x05, 0x03, 0x3a, 0x26, 0xa7, + 0x8c, 0x17, 0xdc, 0xed, 0x52, 0xf9, 0x11, 0x11, 0xf4, 0x8c, 0x4c, 0x15, 0xc4, 0xfa, 0x55, 0x81, + 0xe5, 0x9e, 0x7a, 0x10, 0x7f, 0x84, 0xf5, 0x4b, 0x74, 0xcc, 0x49, 0x14, 0xe4, 0x34, 0xa6, 0xa1, + 0xe0, 0x99, 0x81, 0x4c, 0xd4, 0xa8, 0xb7, 0x1e, 0xdb, 0x7f, 0x2d, 0xc3, 0x3e, 0x2a, 0x38, 0xbd, + 0x82, 0xe2, 0xeb, 0x67, 0xb7, 0x22, 0xd8, 0x85, 0x65, 0x96, 0x8c, 0x32, 0x9a, 0xe7, 0x46, 0xc5, + 0xac, 0x36, 0xea, 0xad, 0xe6, 0x0c, 0x3d, 0xf7, 0x32, 0xe3, 0x2a, 0xb8, 0xc7, 0x72, 0x41, 0x13, + 0x9a, 0xf9, 0x57, 0x7c, 0xfc, 0x0e, 0x96, 0xa8, 0x52, 0xaa, 0x4a, 0x25, 0xfb, 0x5f, 0x4a, 0xce, + 0x1f, 0x42, 0x07, 0xd5, 0xf3, 0x76, 0xc5, 0x2f, 0x24, 0xf0, 0x18, 0xee, 0xf3, 0x89, 0x18, 0xf0, + 0x49, 0x12, 0x05, 0x22, 0x23, 0xc3, 0x21, 0x0b, 0x83, 0x94, 0xc7, 0x2c, 0x9c, 0x1a, 0x9a, 0xec, + 0xfb, 0xc9, 0x0c, 0xf5, 0x6e, 0xc1, 0xec, 0x2b, 0xe2, 0xa1, 0xe4, 0xf9, 0x9b, 0xbc, 0x2c, 0x6c, + 0xfd, 0x44, 0xb0, 0x51, 0xd6, 0x18, 0xde, 0x07, 0x2d, 0xe5, 0x99, 0x28, 0x7c, 0xde, 0x9d, 0xf1, + 0xde, 0x21, 0xcf, 0x84, 0x2a, 0x5f, 0x32, 0x30, 0x06, 0x6d, 0xc0, 0x92, 0xc8, 0xa8, 0x98, 0xa8, + 0x51, 0xf3, 0xe5, 0x19, 0xbb, 0xb0, 0x1a, 0x92, 0x54, 0x4c, 0x32, 0x1a, 0x7c, 0xe6, 0x11, 0x35, + 0xaa, 0x26, 0x6a, 0xdc, 0x69, 0x3d, 0x9c, 0xa1, 0xfa, 0x4a, 0xc1, 0xdf, 0xf3, 0x88, 0xfa, 0xf5, + 0xf0, 0xfa, 0x82, 0x6d, 0xd0, 0x23, 0x3a, 0x24, 0x93, 0x58, 0x04, 0x34, 0x89, 0x52, 0xce, 0x12, + 0x21, 0x4d, 0xa9, 0xa9, 0x1a, 0xee, 0x16, 0x49, 0xa7, 0xc8, 0x59, 0xdf, 0x10, 0xdc, 0x2b, 0x31, + 0x1c, 0xef, 0xcd, 0xd5, 0xe0, 0xff, 0xe9, 0x6d, 0x0b, 0x16, 0xc7, 0x3c, 0x17, 0xb9, 0xa1, 0x99, + 0xd5, 0xab, 0x86, 0x54, 0xc4, 0xfa, 0x8a, 0x40, 0xbf, 0x3d, 0xd1, 0xb8, 0x0f, 0x4b, 0x31, 0x19, + 0xd0, 0x38, 0x37, 0x90, 0x1c, 0xba, 0xe7, 0x73, 0x7c, 0x07, 0xdb, 0x93, 0x4c, 0x27, 0x11, 0xd9, + 0xb4, 0x98, 0x3e, 0xa5, 0xb5, 0xfd, 0x02, 0xea, 0x37, 0x72, 0x58, 0x87, 0xea, 0x09, 0x9d, 0x4a, + 0x9f, 0x6a, 0xfe, 0xe5, 0x11, 0x6f, 0xc0, 0xe2, 0x29, 0x89, 0x27, 0xb4, 0xb0, 0x41, 0x5d, 0x5e, + 0x56, 0xf6, 0x91, 0xf5, 0x05, 0xc1, 0x66, 0xe9, 0xfc, 0x61, 0x17, 0x34, 0xe9, 0x0e, 0x92, 0xee, + 0x3c, 0x9b, 0x77, 0x7e, 0x6d, 0x69, 0x96, 0x94, 0xb0, 0x1a, 0xa0, 0x49, 0xb7, 0xd6, 0x61, 0xcd, + 0x77, 0xde, 0xba, 0xbd, 0xbe, 0x7f, 0x1c, 0x74, 0x3b, 0xde, 0xb1, 0xbe, 0x80, 0xd7, 0xa0, 0xd6, + 0xf6, 0xbc, 0xee, 0x51, 0xd0, 0xee, 0x1c, 0xeb, 0xe8, 0x51, 0x0b, 0xea, 0x37, 0xbc, 0xc6, 0x75, + 0x58, 0x7e, 0xed, 0xbc, 0x69, 0x7f, 0xf0, 0xfa, 0xfa, 0x02, 0x5e, 0x85, 0x15, 0xf7, 0xb0, 0xdf, + 0x3e, 0xf0, 0x9c, 0x9e, 0x8e, 0xf0, 0x0a, 0x68, 0x9d, 0x6e, 0xc7, 0xd1, 0x2b, 0x07, 0xf6, 0xf7, + 0x8b, 0x1d, 0xf4, 0xe3, 0x62, 0x07, 0x9d, 0x5f, 0xec, 0xa0, 0x4f, 0xa6, 0xaa, 0x93, 0x71, 0xb9, + 0xcd, 0x4a, 0xf6, 0xd6, 0x60, 0x49, 0x2e, 0xac, 0xbd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x16, + 0x6e, 0x64, 0x48, 0x34, 0x05, 0x00, 0x00, +} + +func (m *Sidecar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sidecar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sidecar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OutboundTrafficPolicy != nil { + { + size, err := m.OutboundTrafficPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.WorkloadSelector != nil { + { + size, err := m.WorkloadSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IstioIngressListener) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IstioIngressListener) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IstioIngressListener) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultEndpoint) > 0 { + i -= len(m.DefaultEndpoint) + copy(dAtA[i:], m.DefaultEndpoint) + i = encodeVarintSidecar(dAtA, i, uint64(len(m.DefaultEndpoint))) + i-- + dAtA[i] = 0x22 + } + if m.CaptureMode != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.CaptureMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Bind) > 0 { + i -= len(m.Bind) + copy(dAtA[i:], m.Bind) + i = encodeVarintSidecar(dAtA, i, uint64(len(m.Bind))) + i-- + dAtA[i] = 0x12 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IstioEgressListener) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IstioEgressListener) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IstioEgressListener) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintSidecar(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.CaptureMode != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.CaptureMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Bind) > 0 { + i -= len(m.Bind) + copy(dAtA[i:], m.Bind) + i = encodeVarintSidecar(dAtA, i, uint64(len(m.Bind))) + i-- + dAtA[i] = 0x12 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSidecar(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkloadSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkloadSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkloadSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintSidecar(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintSidecar(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintSidecar(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OutboundTrafficPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutboundTrafficPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OutboundTrafficPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Mode != 0 { + i = encodeVarintSidecar(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintSidecar(dAtA []byte, offset int, v uint64) int { + offset -= sovSidecar(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Sidecar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WorkloadSelector != nil { + l = m.WorkloadSelector.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + } + if m.OutboundTrafficPolicy != nil { + l = m.OutboundTrafficPolicy.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IstioIngressListener) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + l = len(m.Bind) + if l > 0 { + n += 1 + l + sovSidecar(uint64(l)) + } + if m.CaptureMode != 0 { + n += 1 + sovSidecar(uint64(m.CaptureMode)) + } + l = len(m.DefaultEndpoint) + if l > 0 { + n += 1 + l + sovSidecar(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *IstioEgressListener) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovSidecar(uint64(l)) + } + l = len(m.Bind) + if l > 0 { + n += 1 + l + sovSidecar(uint64(l)) + } + if m.CaptureMode != 0 { + n += 1 + sovSidecar(uint64(m.CaptureMode)) + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovSidecar(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkloadSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSidecar(uint64(len(k))) + 1 + len(v) + sovSidecar(uint64(len(v))) + n += mapEntrySize + 1 + sovSidecar(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *OutboundTrafficPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovSidecar(uint64(m.Mode)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSidecar(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSidecar(x uint64) (n int) { + return sovSidecar(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Sidecar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sidecar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sidecar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkloadSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkloadSelector == nil { + m.WorkloadSelector = &WorkloadSelector{} + } + if err := m.WorkloadSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, &IstioIngressListener{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, &IstioEgressListener{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutboundTrafficPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OutboundTrafficPolicy == nil { + m.OutboundTrafficPolicy = &OutboundTrafficPolicy{} + } + if err := m.OutboundTrafficPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IstioIngressListener) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IstioIngressListener: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IstioIngressListener: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &Port{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CaptureMode", wireType) + } + m.CaptureMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CaptureMode |= CaptureMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultEndpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultEndpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IstioEgressListener) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IstioEgressListener: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IstioEgressListener: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &Port{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CaptureMode", wireType) + } + m.CaptureMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CaptureMode |= CaptureMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkloadSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkloadSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkloadSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSidecar + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSidecar + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSidecar + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthSidecar + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSidecar + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthSidecar + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OutboundTrafficPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OutboundTrafficPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OutboundTrafficPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSidecar + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= OutboundTrafficPolicy_Mode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSidecar(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSidecar + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSidecar(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSidecar + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthSidecar + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSidecar + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSidecar(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthSidecar + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSidecar = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSidecar = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/sidecar_deepcopy.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/sidecar_deepcopy.gen.go new file mode 100644 index 0000000000..552dbe2959 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/sidecar_deepcopy.gen.go @@ -0,0 +1,225 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/sidecar.proto + +// `Sidecar` describes the configuration of the sidecar proxy that mediates +// inbound and outbound communication to the workload instance it is attached to. By +// default, Istio will program all sidecar proxies in the mesh with the +// necessary configuration required to reach every workload instance in the mesh, as +// well as accept traffic on all the ports associated with the +// workload. The `Sidecar` configuration provides a way to fine tune the set of +// ports, protocols that the proxy will accept when forwarding traffic to +// and from the workload. In addition, it is possible to restrict the set +// of services that the proxy can reach when forwarding outbound traffic +// from workload instances. +// +// Services and configuration in a mesh are organized into one or more +// namespaces (e.g., a Kubernetes namespace or a CF org/space). A `Sidecar` +// configuration in a namespace will apply to one or more workload instances in the same +// namespace, selected using the `workloadSelector` field. In the absence of a +// `workloadSelector`, it will apply to all workload instances in the same +// namespace. When determining the `Sidecar` configuration to be applied to a +// workload instance, preference will be given to the resource with a +// `workloadSelector` that selects this workload instance, over a `Sidecar` configuration +// without any `workloadSelector`. +// +// NOTE 1: *_Each namespace can have only one `Sidecar` configuration without any +// `workloadSelector`_*. The behavior of the system is undefined if more +// than one selector-less `Sidecar` configurations exist in a given namespace. The +// behavior of the system is undefined if two or more `Sidecar` configurations +// with a `workloadSelector` select the same workload instance. +// +// NOTE 2: *_A `Sidecar` configuration in the `MeshConfig` +// [root namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig) +// will be applied by default to all namespaces without a `Sidecar` +// configuration_*. This global default `Sidecar` configuration should not have +// any `workloadSelector`. +// +// The example below declares a global default `Sidecar` configuration in the +// root namespace called `istio-config`, that configures sidecars in +// all namespaces to allow egress traffic only to other workloads in +// the same namespace, and to services in the `istio-system` namespace. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: istio-config +// spec: +// egress: +// - hosts: +// - "./*" +// - "istio-system/*" +//``` +// +// The example below declares a `Sidecar` configuration in the `prod-us1` +// namespace that overrides the global default defined above, and +// configures the sidecars in the namespace to allow egress traffic to +// public services in the `prod-us1`, `prod-apis`, and the `istio-system` +// namespaces. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: prod-us1 +// spec: +// egress: +// - hosts: +// - "prod-us1/*" +// - "prod-apis/*" +// - "istio-system/*" +// ``` +// +// The example below declares a `Sidecar` configuration in the `prod-us1` namespace +// that accepts inbound HTTP traffic on port 9080 and forwards +// it to the attached workload instance listening on a Unix domain socket. In the +// egress direction, in addition to the `istio-system` namespace, the sidecar +// proxies only HTTP traffic bound for port 9080 for services in the +// `prod-us1` namespace. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: prod-us1 +// spec: +// ingress: +// - port: +// number: 9080 +// protocol: HTTP +// name: somename +// defaultEndpoint: unix:///var/run/someuds.sock +// egress: +// - port: +// number: 9080 +// protocol: HTTP +// name: egresshttp +// hosts: +// - "prod-us1/*" +// - hosts: +// - "istio-system/*" +// ``` +// +// If the workload is deployed without IPTables-based traffic capture, the +// `Sidecar` configuration is the only way to configure the ports on the proxy +// attached to the workload instance. The following example declares a `Sidecar` +// configuration in the `prod-us1` namespace for all pods with labels +// `app: productpage` belonging to the `productpage.prod-us1` service. Assuming +// that these pods are deployed without IPtable rules (i.e. the `istio-init` +// container) and the proxy metadata `ISTIO_META_INTERCEPTION_MODE` is set to +// `NONE`, the specification, below, allows such pods to receive HTTP traffic +// on port 9080 and forward it to the application listening on +// `127.0.0.1:8080`. It also allows the application to communicate with a +// backing MySQL database on `127.0.0.1:3306`, that then gets proxied to the +// externally hosted MySQL service at `mysql.foo.com:3306`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: no-ip-tables +// namespace: prod-us1 +// spec: +// workloadSelector: +// labels: +// app: productpage +// ingress: +// - port: +// number: 9080 # binds to proxy_instance_ip:9080 (0.0.0.0:9080, if no unicast IP is available for the instance) +// protocol: HTTP +// name: somename +// defaultEndpoint: 127.0.0.1:8080 +// captureMode: NONE # not needed if metadata is set for entire proxy +// egress: +// - port: +// number: 3306 +// protocol: MYSQL +// name: egressmysql +// captureMode: NONE # not needed if metadata is set for entire proxy +// bind: 127.0.0.1 +// hosts: +// - "*/mysql.foo.com" +// ``` +// +// And the associated service entry for routing to `mysql.foo.com:3306` +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-mysql +// namespace: ns1 +// spec: +// hosts: +// - mysql.foo.com +// ports: +// - number: 3306 +// name: mysql +// protocol: MYSQL +// location: MESH_EXTERNAL +// resolution: DNS +// ``` +// +// It is also possible to mix and match traffic capture modes in a single +// proxy. For example, consider a setup where internal services are on the +// `192.168.0.0/16` subnet. So, IP tables are setup on the VM to capture all +// outbound traffic on `192.168.0.0/16` subnet. Assume that the VM has an +// additional network interface on `172.16.0.0/16` subnet for inbound +// traffic. The following `Sidecar` configuration allows the VM to expose a +// listener on `172.16.1.32:80` (the VM's IP) for traffic arriving from the +// `172.16.0.0/16` subnet. Note that in this scenario, the +// `ISTIO_META_INTERCEPTION_MODE` metadata on the proxy in the VM should +// contain `REDIRECT` or `TPROXY` as its value, implying that IP tables +// based traffic capture is active. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: partial-ip-tables +// namespace: prod-us1 +// spec: +// workloadSelector: +// labels: +// app: productpage +// ingress: +// - bind: 172.16.1.32 +// port: +// number: 80 # binds to 172.16.1.32:80 +// protocol: HTTP +// name: somename +// defaultEndpoint: 127.0.0.1:8080 +// captureMode: NONE +// egress: +// # use the system detected defaults +// # sets up configuration to handle outbound traffic to services +// # in 192.168.0.0/16 subnet, based on information provided by the +// # service registry +// - captureMode: IPTABLES +// hosts: +// - "*/*" +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// DeepCopyInto supports using Sidecar within kubernetes types, where deepcopy-gen is used. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + p := proto.Clone(in).(*Sidecar) + *out = *p +} diff --git a/test/vendor/istio.io/api/networking/v1alpha3/sidecar_json.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/sidecar_json.gen.go new file mode 100644 index 0000000000..e6409889f4 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/sidecar_json.gen.go @@ -0,0 +1,281 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/sidecar.proto + +// `Sidecar` describes the configuration of the sidecar proxy that mediates +// inbound and outbound communication to the workload instance it is attached to. By +// default, Istio will program all sidecar proxies in the mesh with the +// necessary configuration required to reach every workload instance in the mesh, as +// well as accept traffic on all the ports associated with the +// workload. The `Sidecar` configuration provides a way to fine tune the set of +// ports, protocols that the proxy will accept when forwarding traffic to +// and from the workload. In addition, it is possible to restrict the set +// of services that the proxy can reach when forwarding outbound traffic +// from workload instances. +// +// Services and configuration in a mesh are organized into one or more +// namespaces (e.g., a Kubernetes namespace or a CF org/space). A `Sidecar` +// configuration in a namespace will apply to one or more workload instances in the same +// namespace, selected using the `workloadSelector` field. In the absence of a +// `workloadSelector`, it will apply to all workload instances in the same +// namespace. When determining the `Sidecar` configuration to be applied to a +// workload instance, preference will be given to the resource with a +// `workloadSelector` that selects this workload instance, over a `Sidecar` configuration +// without any `workloadSelector`. +// +// NOTE 1: *_Each namespace can have only one `Sidecar` configuration without any +// `workloadSelector`_*. The behavior of the system is undefined if more +// than one selector-less `Sidecar` configurations exist in a given namespace. The +// behavior of the system is undefined if two or more `Sidecar` configurations +// with a `workloadSelector` select the same workload instance. +// +// NOTE 2: *_A `Sidecar` configuration in the `MeshConfig` +// [root namespace](https://istio.io/docs/reference/config/istio.mesh.v1alpha1/#MeshConfig) +// will be applied by default to all namespaces without a `Sidecar` +// configuration_*. This global default `Sidecar` configuration should not have +// any `workloadSelector`. +// +// The example below declares a global default `Sidecar` configuration in the +// root namespace called `istio-config`, that configures sidecars in +// all namespaces to allow egress traffic only to other workloads in +// the same namespace, and to services in the `istio-system` namespace. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: istio-config +// spec: +// egress: +// - hosts: +// - "./*" +// - "istio-system/*" +//``` +// +// The example below declares a `Sidecar` configuration in the `prod-us1` +// namespace that overrides the global default defined above, and +// configures the sidecars in the namespace to allow egress traffic to +// public services in the `prod-us1`, `prod-apis`, and the `istio-system` +// namespaces. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: prod-us1 +// spec: +// egress: +// - hosts: +// - "prod-us1/*" +// - "prod-apis/*" +// - "istio-system/*" +// ``` +// +// The example below declares a `Sidecar` configuration in the `prod-us1` namespace +// that accepts inbound HTTP traffic on port 9080 and forwards +// it to the attached workload instance listening on a Unix domain socket. In the +// egress direction, in addition to the `istio-system` namespace, the sidecar +// proxies only HTTP traffic bound for port 9080 for services in the +// `prod-us1` namespace. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: default +// namespace: prod-us1 +// spec: +// ingress: +// - port: +// number: 9080 +// protocol: HTTP +// name: somename +// defaultEndpoint: unix:///var/run/someuds.sock +// egress: +// - port: +// number: 9080 +// protocol: HTTP +// name: egresshttp +// hosts: +// - "prod-us1/*" +// - hosts: +// - "istio-system/*" +// ``` +// +// If the workload is deployed without IPTables-based traffic capture, the +// `Sidecar` configuration is the only way to configure the ports on the proxy +// attached to the workload instance. The following example declares a `Sidecar` +// configuration in the `prod-us1` namespace for all pods with labels +// `app: productpage` belonging to the `productpage.prod-us1` service. Assuming +// that these pods are deployed without IPtable rules (i.e. the `istio-init` +// container) and the proxy metadata `ISTIO_META_INTERCEPTION_MODE` is set to +// `NONE`, the specification, below, allows such pods to receive HTTP traffic +// on port 9080 and forward it to the application listening on +// `127.0.0.1:8080`. It also allows the application to communicate with a +// backing MySQL database on `127.0.0.1:3306`, that then gets proxied to the +// externally hosted MySQL service at `mysql.foo.com:3306`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: no-ip-tables +// namespace: prod-us1 +// spec: +// workloadSelector: +// labels: +// app: productpage +// ingress: +// - port: +// number: 9080 # binds to proxy_instance_ip:9080 (0.0.0.0:9080, if no unicast IP is available for the instance) +// protocol: HTTP +// name: somename +// defaultEndpoint: 127.0.0.1:8080 +// captureMode: NONE # not needed if metadata is set for entire proxy +// egress: +// - port: +// number: 3306 +// protocol: MYSQL +// name: egressmysql +// captureMode: NONE # not needed if metadata is set for entire proxy +// bind: 127.0.0.1 +// hosts: +// - "*/mysql.foo.com" +// ``` +// +// And the associated service entry for routing to `mysql.foo.com:3306` +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-mysql +// namespace: ns1 +// spec: +// hosts: +// - mysql.foo.com +// ports: +// - number: 3306 +// name: mysql +// protocol: MYSQL +// location: MESH_EXTERNAL +// resolution: DNS +// ``` +// +// It is also possible to mix and match traffic capture modes in a single +// proxy. For example, consider a setup where internal services are on the +// `192.168.0.0/16` subnet. So, IP tables are setup on the VM to capture all +// outbound traffic on `192.168.0.0/16` subnet. Assume that the VM has an +// additional network interface on `172.16.0.0/16` subnet for inbound +// traffic. The following `Sidecar` configuration allows the VM to expose a +// listener on `172.16.1.32:80` (the VM's IP) for traffic arriving from the +// `172.16.0.0/16` subnet. Note that in this scenario, the +// `ISTIO_META_INTERCEPTION_MODE` metadata on the proxy in the VM should +// contain `REDIRECT` or `TPROXY` as its value, implying that IP tables +// based traffic capture is active. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: Sidecar +// metadata: +// name: partial-ip-tables +// namespace: prod-us1 +// spec: +// workloadSelector: +// labels: +// app: productpage +// ingress: +// - bind: 172.16.1.32 +// port: +// number: 80 # binds to 172.16.1.32:80 +// protocol: HTTP +// name: somename +// defaultEndpoint: 127.0.0.1:8080 +// captureMode: NONE +// egress: +// # use the system detected defaults +// # sets up configuration to handle outbound traffic to services +// # in 192.168.0.0/16 subnet, based on information provided by the +// # service registry +// - captureMode: IPTABLES +// hosts: +// - "*/*" +// ``` +// + +package v1alpha3 + +import ( + bytes "bytes" + fmt "fmt" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + proto "github.com/gogo/protobuf/proto" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// MarshalJSON is a custom marshaler for Sidecar +func (this *Sidecar) MarshalJSON() ([]byte, error) { + str, err := SidecarMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Sidecar +func (this *Sidecar) UnmarshalJSON(b []byte) error { + return SidecarUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for IstioIngressListener +func (this *IstioIngressListener) MarshalJSON() ([]byte, error) { + str, err := SidecarMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for IstioIngressListener +func (this *IstioIngressListener) UnmarshalJSON(b []byte) error { + return SidecarUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for IstioEgressListener +func (this *IstioEgressListener) MarshalJSON() ([]byte, error) { + str, err := SidecarMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for IstioEgressListener +func (this *IstioEgressListener) UnmarshalJSON(b []byte) error { + return SidecarUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for WorkloadSelector +func (this *WorkloadSelector) MarshalJSON() ([]byte, error) { + str, err := SidecarMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for WorkloadSelector +func (this *WorkloadSelector) UnmarshalJSON(b []byte) error { + return SidecarUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for OutboundTrafficPolicy +func (this *OutboundTrafficPolicy) MarshalJSON() ([]byte, error) { + str, err := SidecarMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for OutboundTrafficPolicy +func (this *OutboundTrafficPolicy) UnmarshalJSON(b []byte) error { + return SidecarUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +var ( + SidecarMarshaler = &github_com_gogo_protobuf_jsonpb.Marshaler{} + SidecarUnmarshaler = &github_com_gogo_protobuf_jsonpb.Unmarshaler{} +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/virtual_service.pb.go b/test/vendor/istio.io/api/networking/v1alpha3/virtual_service.pb.go new file mode 100644 index 0000000000..f37e9bb4b1 --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/virtual_service.pb.go @@ -0,0 +1,10921 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/virtual_service.proto + +// Configuration affecting traffic routing. Here are a few terms useful to define +// in the context of traffic routing. +// +// `Service` a unit of application behavior bound to a unique name in a +// service registry. Services consist of multiple network *endpoints* +// implemented by workload instances running on pods, containers, VMs etc. +// +// `Service versions (a.k.a. subsets)` - In a continuous deployment +// scenario, for a given service, there can be distinct subsets of +// instances running different variants of the application binary. These +// variants are not necessarily different API versions. They could be +// iterative changes to the same service, deployed in different +// environments (prod, staging, dev, etc.). Common scenarios where this +// occurs include A/B testing, canary rollouts, etc. The choice of a +// particular version can be decided based on various criterion (headers, +// url, etc.) and/or by weights assigned to each version. Each service has +// a default version consisting of all its instances. +// +// `Source` - A downstream client calling a service. +// +// `Host` - The address used by a client when attempting to connect to a +// service. +// +// `Access model` - Applications address only the destination service +// (Host) without knowledge of individual service versions (subsets). The +// actual choice of the version is determined by the proxy/sidecar, enabling the +// application code to decouple itself from the evolution of dependent +// services. +// +// A `VirtualService` defines a set of traffic routing rules to apply when a host is +// addressed. Each routing rule defines matching criteria for traffic of a specific +// protocol. If the traffic is matched, then it is sent to a named destination service +// (or subset/version of it) defined in the registry. +// +// The source of traffic can also be matched in a routing rule. This allows routing +// to be customized for specific client contexts. +// +// The following example on Kubernetes, routes all HTTP traffic by default to +// pods of the reviews service with label "version: v1". In addition, +// HTTP requests with path starting with /wpcatalog/ or /consumercatalog/ will +// be rewritten to /newcatalog and sent to pods with label "version: v2". +// +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// http: +// - name: "reviews-v2-routes" +// match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v2 +// - name: "reviews-v1-route" +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v1 +// ``` +// +// A subset/version of a route destination is identified with a reference +// to a named service subset which must be declared in a corresponding +// `DestinationRule`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews.prod.svc.cluster.local +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// ``` +// + +package v1alpha3 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + io "io" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Configuration affecting traffic routing. +// +// +type VirtualService struct { + // The destination hosts to which traffic is being sent. Could + // be a DNS name with wildcard prefix or an IP address. Depending on the + // platform, short-names can also be used instead of a FQDN (i.e. has no + // dots in the name). In such a scenario, the FQDN of the host would be + // derived based on the underlying platform. + // + // A single VirtualService can be used to describe all the traffic + // properties of the corresponding hosts, including those for multiple + // HTTP and TCP ports. Alternatively, the traffic properties of a host + // can be defined using more than one VirtualService, with certain + // caveats. Refer to the + // [Operations Guide](https://istio.io/docs/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host) + // for details. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews" will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + // + // The hosts field applies to both HTTP and TCP services. Service inside + // the mesh, i.e., those found in the service registry, must always be + // referred to using their alphanumeric names. IP addresses are allowed + // only for services defined via the Gateway. + Hosts []string `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + // The names of gateways and sidecars that should apply these routes. A + // single VirtualService is used for sidecars inside the mesh as well as + // for one or more gateways. The selection condition imposed by this + // field can be overridden using the source field in the match conditions + // of protocol-specific routes. The reserved word `mesh` is used to imply + // all the sidecars in the mesh. When this field is omitted, the default + // gateway (`mesh`) will be used, which would apply the rule to all + // sidecars in the mesh. If a list of gateway names is provided, the + // rules will apply only to the gateways. To apply the rules to both + // gateways and sidecars, specify `mesh` as one of the gateway names. + Gateways []string `protobuf:"bytes,2,rep,name=gateways,proto3" json:"gateways,omitempty"` + // An ordered list of route rules for HTTP traffic. HTTP routes will be + // applied to platform service ports named 'http-*'/'http2-*'/'grpc-*', gateway + // ports with protocol HTTP/HTTP2/GRPC/ TLS-terminated-HTTPS and service + // entry ports using HTTP/HTTP2/GRPC protocols. The first rule matching + // an incoming request is used. + Http []*HTTPRoute `protobuf:"bytes,3,rep,name=http,proto3" json:"http,omitempty"` + // An ordered list of route rule for non-terminated TLS & HTTPS + // traffic. Routing is typically performed using the SNI value presented + // by the ClientHello message. TLS routes will be applied to platform + // service ports named 'https-*', 'tls-*', unterminated gateway ports using + // HTTPS/TLS protocols (i.e. with "passthrough" TLS mode) and service + // entry ports using HTTPS/TLS protocols. The first rule matching an + // incoming request is used. NOTE: Traffic 'https-*' or 'tls-*' ports + // without associated virtual service will be treated as opaque TCP + // traffic. + Tls []*TLSRoute `protobuf:"bytes,5,rep,name=tls,proto3" json:"tls,omitempty"` + // An ordered list of route rules for opaque TCP traffic. TCP routes will + // be applied to any port that is not a HTTP or TLS port. The first rule + // matching an incoming request is used. + Tcp []*TCPRoute `protobuf:"bytes,4,rep,name=tcp,proto3" json:"tcp,omitempty"` + // A list of namespaces to which this virtual service is exported. Exporting a + // virtual service allows it to be used by sidecars and gateways defined in + // other namespaces. This feature provides a mechanism for service owners + // and mesh administrators to control the visibility of virtual services + // across namespace boundaries. + // + // If no namespaces are specified then the virtual service is exported to all + // namespaces by default. + // + // The value "." is reserved and defines an export to the same namespace that + // the virtual service is declared in. Similarly the value "*" is reserved and + // defines an export to all namespaces. + // + // NOTE: in the current release, the `exportTo` value is restricted to + // "." or "*" (i.e., the current namespace or all namespaces). + ExportTo []string `protobuf:"bytes,6,rep,name=export_to,json=exportTo,proto3" json:"export_to,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VirtualService) Reset() { *m = VirtualService{} } +func (m *VirtualService) String() string { return proto.CompactTextString(m) } +func (*VirtualService) ProtoMessage() {} +func (*VirtualService) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{0} +} +func (m *VirtualService) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VirtualService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VirtualService.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VirtualService) XXX_Merge(src proto.Message) { + xxx_messageInfo_VirtualService.Merge(m, src) +} +func (m *VirtualService) XXX_Size() int { + return m.Size() +} +func (m *VirtualService) XXX_DiscardUnknown() { + xxx_messageInfo_VirtualService.DiscardUnknown(m) +} + +var xxx_messageInfo_VirtualService proto.InternalMessageInfo + +func (m *VirtualService) GetHosts() []string { + if m != nil { + return m.Hosts + } + return nil +} + +func (m *VirtualService) GetGateways() []string { + if m != nil { + return m.Gateways + } + return nil +} + +func (m *VirtualService) GetHttp() []*HTTPRoute { + if m != nil { + return m.Http + } + return nil +} + +func (m *VirtualService) GetTls() []*TLSRoute { + if m != nil { + return m.Tls + } + return nil +} + +func (m *VirtualService) GetTcp() []*TCPRoute { + if m != nil { + return m.Tcp + } + return nil +} + +func (m *VirtualService) GetExportTo() []string { + if m != nil { + return m.ExportTo + } + return nil +} + +// Destination indicates the network addressable service to which the +// request/connection will be sent after processing a routing rule. The +// destination.host should unambiguously refer to a service in the service +// registry. Istio's service registry is composed of all the services found +// in the platform's service registry (e.g., Kubernetes services, Consul +// services), as well as services declared through the +// [ServiceEntry](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry) resource. +// +// *Note for Kubernetes users*: When short names are used (e.g. "reviews" +// instead of "reviews.default.svc.cluster.local"), Istio will interpret +// the short name based on the namespace of the rule, not the service. A +// rule in the "default" namespace containing a host "reviews will be +// interpreted as "reviews.default.svc.cluster.local", irrespective of the +// actual namespace associated with the reviews service. _To avoid potential +// misconfigurations, it is recommended to always use fully qualified +// domain names over short names._ +// +// The following Kubernetes example routes all traffic by default to pods +// of the reviews service with label "version: v1" (i.e., subset v1), and +// some to subset v2, in a Kubernetes environment. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// namespace: foo +// spec: +// hosts: +// - reviews # interpreted as reviews.foo.svc.cluster.local +// http: +// - match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews # interpreted as reviews.foo.svc.cluster.local +// subset: v2 +// - route: +// - destination: +// host: reviews # interpreted as reviews.foo.svc.cluster.local +// subset: v1 +// ``` +// +// And the associated DestinationRule +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// namespace: foo +// spec: +// host: reviews # interpreted as reviews.foo.svc.cluster.local +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// ``` +// +// The following VirtualService sets a timeout of 5s for all calls to +// productpage.prod.svc.cluster.local service in Kubernetes. Notice that +// there are no subsets defined in this rule. Istio will fetch all +// instances of productpage.prod.svc.cluster.local service from the service +// registry and populate the sidecar's load balancing pool. Also, notice +// that this rule is set in the istio-system namespace but uses the fully +// qualified domain name of the productpage service, +// productpage.prod.svc.cluster.local. Therefore the rule's namespace does +// not have an impact in resolving the name of the productpage service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: my-productpage-rule +// namespace: istio-system +// spec: +// hosts: +// - productpage.prod.svc.cluster.local # ignores rule namespace +// http: +// - timeout: 5s +// route: +// - destination: +// host: productpage.prod.svc.cluster.local +// ``` +// +// To control routing for traffic bound to services outside the mesh, external +// services must first be added to Istio's internal service registry using the +// ServiceEntry resource. VirtualServices can then be defined to control traffic +// bound to these external services. For example, the following rules define a +// Service for wikipedia.org and set a timeout of 5s for http requests. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: ServiceEntry +// metadata: +// name: external-svc-wikipedia +// spec: +// hosts: +// - wikipedia.org +// location: MESH_EXTERNAL +// ports: +// - number: 80 +// name: example-http +// protocol: HTTP +// resolution: DNS +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: my-wiki-rule +// spec: +// hosts: +// - wikipedia.org +// http: +// - timeout: 5s +// route: +// - destination: +// host: wikipedia.org +// ``` +type Destination struct { + // The name of a service from the service registry. Service + // names are looked up from the platform's service registry (e.g., + // Kubernetes services, Consul services, etc.) and from the hosts + // declared by [ServiceEntry](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). Traffic forwarded to + // destinations that are not found in either of the two, will be dropped. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // The name of a subset within the service. Applicable only to services + // within the mesh. The subset must be defined in a corresponding + // DestinationRule. + Subset string `protobuf:"bytes,2,opt,name=subset,proto3" json:"subset,omitempty"` + // Specifies the port on the host that is being addressed. If a service + // exposes only a single port it is not required to explicitly select the + // port. + Port *PortSelector `protobuf:"bytes,3,opt,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Destination) Reset() { *m = Destination{} } +func (m *Destination) String() string { return proto.CompactTextString(m) } +func (*Destination) ProtoMessage() {} +func (*Destination) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{1} +} +func (m *Destination) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Destination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Destination.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Destination) XXX_Merge(src proto.Message) { + xxx_messageInfo_Destination.Merge(m, src) +} +func (m *Destination) XXX_Size() int { + return m.Size() +} +func (m *Destination) XXX_DiscardUnknown() { + xxx_messageInfo_Destination.DiscardUnknown(m) +} + +var xxx_messageInfo_Destination proto.InternalMessageInfo + +func (m *Destination) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Destination) GetSubset() string { + if m != nil { + return m.Subset + } + return "" +} + +func (m *Destination) GetPort() *PortSelector { + if m != nil { + return m.Port + } + return nil +} + +// Describes match conditions and actions for routing HTTP/1.1, HTTP2, and +// gRPC traffic. See VirtualService for usage examples. +type HTTPRoute struct { + // The name assigned to the route for debugging purposes. The + // route's name will be concatenated with the match's name and will + // be logged in the access logs for requests matching this + // route/match. + Name string `protobuf:"bytes,17,opt,name=name,proto3" json:"name,omitempty"` + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []*HTTPMatchRequest `protobuf:"bytes,1,rep,name=match,proto3" json:"match,omitempty"` + // A http rule can either redirect or forward (default) traffic. The + // forwarding target can be one of several versions of a service (see + // glossary in beginning of document). Weights associated with the + // service version determine the proportion of traffic it receives. + Route []*HTTPRouteDestination `protobuf:"bytes,2,rep,name=route,proto3" json:"route,omitempty"` + // A http rule can either redirect or forward (default) traffic. If + // traffic passthrough option is specified in the rule, + // route/redirect will be ignored. The redirect primitive can be used to + // send a HTTP 301 redirect to a different URI or Authority. + Redirect *HTTPRedirect `protobuf:"bytes,3,opt,name=redirect,proto3" json:"redirect,omitempty"` + // Rewrite HTTP URIs and Authority headers. Rewrite cannot be used with + // Redirect primitive. Rewrite will be performed before forwarding. + Rewrite *HTTPRewrite `protobuf:"bytes,4,opt,name=rewrite,proto3" json:"rewrite,omitempty"` + // Deprecated. Websocket upgrades are done automatically starting from Istio 1.0. + // $hide_from_docs + WebsocketUpgrade bool `protobuf:"varint,5,opt,name=websocket_upgrade,json=websocketUpgrade,proto3" json:"websocket_upgrade,omitempty"` + // Timeout for HTTP requests. + Timeout *types.Duration `protobuf:"bytes,6,opt,name=timeout,proto3" json:"timeout,omitempty"` + // Retry policy for HTTP requests. + Retries *HTTPRetry `protobuf:"bytes,7,opt,name=retries,proto3" json:"retries,omitempty"` + // Fault injection policy to apply on HTTP traffic at the client side. + // Note that timeouts or retries will not be enabled when faults are + // enabled on the client side. + Fault *HTTPFaultInjection `protobuf:"bytes,8,opt,name=fault,proto3" json:"fault,omitempty"` + // Mirror HTTP traffic to a another destination in addition to forwarding + // the requests to the intended destination. Mirrored traffic is on a + // best effort basis where the sidecar/gateway will not wait for the + // mirrored cluster to respond before returning the response from the + // original destination. Statistics will be generated for the mirrored + // destination. + Mirror *Destination `protobuf:"bytes,9,opt,name=mirror,proto3" json:"mirror,omitempty"` + // Percentage of the traffic to be mirrored by the `mirror` field. + // If this field is absent, all the traffic (100%) will be mirrored. + // Max value is 100. + MirrorPercent *types.UInt32Value `protobuf:"bytes,18,opt,name=mirror_percent,json=mirrorPercent,proto3" json:"mirror_percent,omitempty"` + // Cross-Origin Resource Sharing policy (CORS). Refer to + // [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) + // for further details about cross origin resource sharing. + CorsPolicy *CorsPolicy `protobuf:"bytes,10,opt,name=cors_policy,json=corsPolicy,proto3" json:"cors_policy,omitempty"` + // $hide_from_docs + AppendHeaders map[string]string `protobuf:"bytes,11,rep,name=append_headers,json=appendHeaders,proto3" json:"append_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + // $hide_from_docs + RemoveResponseHeaders []string `protobuf:"bytes,12,rep,name=remove_response_headers,json=removeResponseHeaders,proto3" json:"remove_response_headers,omitempty"` // Deprecated: Do not use. + // $hide_from_docs + AppendResponseHeaders map[string]string `protobuf:"bytes,13,rep,name=append_response_headers,json=appendResponseHeaders,proto3" json:"append_response_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + // $hide_from_docs + RemoveRequestHeaders []string `protobuf:"bytes,14,rep,name=remove_request_headers,json=removeRequestHeaders,proto3" json:"remove_request_headers,omitempty"` // Deprecated: Do not use. + // $hide_from_docs + AppendRequestHeaders map[string]string `protobuf:"bytes,15,rep,name=append_request_headers,json=appendRequestHeaders,proto3" json:"append_request_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + // Header manipulation rules + Headers *Headers `protobuf:"bytes,16,opt,name=headers,proto3" json:"headers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPRoute) Reset() { *m = HTTPRoute{} } +func (m *HTTPRoute) String() string { return proto.CompactTextString(m) } +func (*HTTPRoute) ProtoMessage() {} +func (*HTTPRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{2} +} +func (m *HTTPRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPRoute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRoute.Merge(m, src) +} +func (m *HTTPRoute) XXX_Size() int { + return m.Size() +} +func (m *HTTPRoute) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRoute proto.InternalMessageInfo + +func (m *HTTPRoute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HTTPRoute) GetMatch() []*HTTPMatchRequest { + if m != nil { + return m.Match + } + return nil +} + +func (m *HTTPRoute) GetRoute() []*HTTPRouteDestination { + if m != nil { + return m.Route + } + return nil +} + +func (m *HTTPRoute) GetRedirect() *HTTPRedirect { + if m != nil { + return m.Redirect + } + return nil +} + +func (m *HTTPRoute) GetRewrite() *HTTPRewrite { + if m != nil { + return m.Rewrite + } + return nil +} + +func (m *HTTPRoute) GetWebsocketUpgrade() bool { + if m != nil { + return m.WebsocketUpgrade + } + return false +} + +func (m *HTTPRoute) GetTimeout() *types.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *HTTPRoute) GetRetries() *HTTPRetry { + if m != nil { + return m.Retries + } + return nil +} + +func (m *HTTPRoute) GetFault() *HTTPFaultInjection { + if m != nil { + return m.Fault + } + return nil +} + +func (m *HTTPRoute) GetMirror() *Destination { + if m != nil { + return m.Mirror + } + return nil +} + +func (m *HTTPRoute) GetMirrorPercent() *types.UInt32Value { + if m != nil { + return m.MirrorPercent + } + return nil +} + +func (m *HTTPRoute) GetCorsPolicy() *CorsPolicy { + if m != nil { + return m.CorsPolicy + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRoute) GetAppendHeaders() map[string]string { + if m != nil { + return m.AppendHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRoute) GetRemoveResponseHeaders() []string { + if m != nil { + return m.RemoveResponseHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRoute) GetAppendResponseHeaders() map[string]string { + if m != nil { + return m.AppendResponseHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRoute) GetRemoveRequestHeaders() []string { + if m != nil { + return m.RemoveRequestHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRoute) GetAppendRequestHeaders() map[string]string { + if m != nil { + return m.AppendRequestHeaders + } + return nil +} + +func (m *HTTPRoute) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +// Message headers can be manipulated when Envoy forwards requests to, +// or responses from, a destination service. Header manipulation rules can +// be specified for a specific route destination or for all destinations. +// The following VirtualService adds a `test` header with the value `true` +// to requests that are routed to any `reviews` service destination. +// It also romoves the `foo` response header, but only from responses +// coming from the `v1` subset (version) of the `reviews` service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// http: +// - headers: +// request: +// set: +// test: true +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v2 +// weight: 25 +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v1 +// headers: +// response: +// remove: +// - foo +// weight: 75 +// ``` +type Headers struct { + // Header manipulation rules to apply before forwarding a request + // to the destination service + Request *Headers_HeaderOperations `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + // Header manipulation rules to apply before returning a response + // to the caller + Response *Headers_HeaderOperations `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{3} +} +func (m *Headers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Headers.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Headers) XXX_Merge(src proto.Message) { + xxx_messageInfo_Headers.Merge(m, src) +} +func (m *Headers) XXX_Size() int { + return m.Size() +} +func (m *Headers) XXX_DiscardUnknown() { + xxx_messageInfo_Headers.DiscardUnknown(m) +} + +var xxx_messageInfo_Headers proto.InternalMessageInfo + +func (m *Headers) GetRequest() *Headers_HeaderOperations { + if m != nil { + return m.Request + } + return nil +} + +func (m *Headers) GetResponse() *Headers_HeaderOperations { + if m != nil { + return m.Response + } + return nil +} + +// HeaderOperations Describes the header manipulations to apply +type Headers_HeaderOperations struct { + // Overwrite the headers specified by key with the given values + Set map[string]string `protobuf:"bytes,1,rep,name=set,proto3" json:"set,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Append the given values to the headers specified by keys + // (will create a comma-separated list of values) + Add map[string]string `protobuf:"bytes,2,rep,name=add,proto3" json:"add,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Remove a the specified headers + Remove []string `protobuf:"bytes,3,rep,name=remove,proto3" json:"remove,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Headers_HeaderOperations) Reset() { *m = Headers_HeaderOperations{} } +func (m *Headers_HeaderOperations) String() string { return proto.CompactTextString(m) } +func (*Headers_HeaderOperations) ProtoMessage() {} +func (*Headers_HeaderOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{3, 0} +} +func (m *Headers_HeaderOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Headers_HeaderOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Headers_HeaderOperations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Headers_HeaderOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Headers_HeaderOperations.Merge(m, src) +} +func (m *Headers_HeaderOperations) XXX_Size() int { + return m.Size() +} +func (m *Headers_HeaderOperations) XXX_DiscardUnknown() { + xxx_messageInfo_Headers_HeaderOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_Headers_HeaderOperations proto.InternalMessageInfo + +func (m *Headers_HeaderOperations) GetSet() map[string]string { + if m != nil { + return m.Set + } + return nil +} + +func (m *Headers_HeaderOperations) GetAdd() map[string]string { + if m != nil { + return m.Add + } + return nil +} + +func (m *Headers_HeaderOperations) GetRemove() []string { + if m != nil { + return m.Remove + } + return nil +} + +// Describes match conditions and actions for routing unterminated TLS +// traffic (TLS/HTTPS) The following routing rule forwards unterminated TLS +// traffic arriving at port 443 of gateway called "mygateway" to internal +// services in the mesh based on the SNI value. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-sni +// spec: +// hosts: +// - "*.bookinfo.com" +// gateways: +// - mygateway +// tls: +// - match: +// - port: 443 +// sniHosts: +// - login.bookinfo.com +// route: +// - destination: +// host: login.prod.svc.cluster.local +// - match: +// - port: 443 +// sniHosts: +// - reviews.bookinfo.com +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// ``` +type TLSRoute struct { + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []*TLSMatchAttributes `protobuf:"bytes,1,rep,name=match,proto3" json:"match,omitempty"` + // The destination to which the connection should be forwarded to. + Route []*RouteDestination `protobuf:"bytes,2,rep,name=route,proto3" json:"route,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TLSRoute) Reset() { *m = TLSRoute{} } +func (m *TLSRoute) String() string { return proto.CompactTextString(m) } +func (*TLSRoute) ProtoMessage() {} +func (*TLSRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{4} +} +func (m *TLSRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TLSRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TLSRoute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TLSRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_TLSRoute.Merge(m, src) +} +func (m *TLSRoute) XXX_Size() int { + return m.Size() +} +func (m *TLSRoute) XXX_DiscardUnknown() { + xxx_messageInfo_TLSRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_TLSRoute proto.InternalMessageInfo + +func (m *TLSRoute) GetMatch() []*TLSMatchAttributes { + if m != nil { + return m.Match + } + return nil +} + +func (m *TLSRoute) GetRoute() []*RouteDestination { + if m != nil { + return m.Route + } + return nil +} + +// Describes match conditions and actions for routing TCP traffic. The +// following routing rule forwards traffic arriving at port 27017 for +// mongo.prod.svc.cluster.local to another Mongo server on port 5555. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// spec: +// hosts: +// - mongo.prod.svc.cluster.local +// tcp: +// - match: +// - port: 27017 +// route: +// - destination: +// host: mongo.backup.svc.cluster.local +// port: +// number: 5555 +// ``` +type TCPRoute struct { + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []*L4MatchAttributes `protobuf:"bytes,1,rep,name=match,proto3" json:"match,omitempty"` + // The destination to which the connection should be forwarded to. + Route []*RouteDestination `protobuf:"bytes,2,rep,name=route,proto3" json:"route,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TCPRoute) Reset() { *m = TCPRoute{} } +func (m *TCPRoute) String() string { return proto.CompactTextString(m) } +func (*TCPRoute) ProtoMessage() {} +func (*TCPRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{5} +} +func (m *TCPRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TCPRoute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TCPRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPRoute.Merge(m, src) +} +func (m *TCPRoute) XXX_Size() int { + return m.Size() +} +func (m *TCPRoute) XXX_DiscardUnknown() { + xxx_messageInfo_TCPRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_TCPRoute proto.InternalMessageInfo + +func (m *TCPRoute) GetMatch() []*L4MatchAttributes { + if m != nil { + return m.Match + } + return nil +} + +func (m *TCPRoute) GetRoute() []*RouteDestination { + if m != nil { + return m.Route + } + return nil +} + +// HttpMatchRequest specifies a set of criterion to be met in order for the +// rule to be applied to the HTTP request. For example, the following +// restricts the rule to match only requests where the URL path +// starts with /ratings/v2/ and the request contains a custom `end-user` header +// with value `jason`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - match: +// - headers: +// end-user: +// exact: jason +// uri: +// prefix: "/ratings/v2/" +// ignoreUriCase: true +// route: +// - destination: +// host: ratings.prod.svc.cluster.local +// ``` +// +// HTTPMatchRequest CANNOT be empty. +type HTTPMatchRequest struct { + // The name assigned to a match. The match's name will be + // concatenated with the parent route's name and will be logged in + // the access logs for requests matching this route. + Name string `protobuf:"bytes,11,opt,name=name,proto3" json:"name,omitempty"` + // URI to match + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + // **Note:** Case-insensitive matching could be enabled via the + // `ignore_uri_case` flag. + Uri *StringMatch `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // URI Scheme + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Scheme *StringMatch `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` + // HTTP Method + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Method *StringMatch `protobuf:"bytes,3,opt,name=method,proto3" json:"method,omitempty"` + // HTTP Authority + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Authority *StringMatch `protobuf:"bytes,4,opt,name=authority,proto3" json:"authority,omitempty"` + // The header keys must be lowercase and use hyphen as the separator, + // e.g. _x-request-id_. + // + // Header values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + // **Note:** The keys `uri`, `scheme`, `method`, and `authority` will be ignored. + Headers map[string]*StringMatch `protobuf:"bytes,5,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Specifies the ports on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port uint32 `protobuf:"varint,6,opt,name=port,proto3" json:"port,omitempty"` + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it must include the reserved gateway + // `mesh` for this field to be applicable. + SourceLabels map[string]string `protobuf:"bytes,7,rep,name=source_labels,json=sourceLabels,proto3" json:"source_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // $hide_from_docs + Gateways []string `protobuf:"bytes,8,rep,name=gateways,proto3" json:"gateways,omitempty"` + // Query parameters for matching. + // + // Ex: + // - For a query parameter like "?key=true", the map key would be "key" and + // the string match could be defined as `exact: "true"`. + // - For a query parameter like "?key", the map key would be "key" and the + // string match could be defined as `exact: ""`. + // - For a query parameter like "?key=123", the map key would be "key" and the + // string match could be defined as `regex: "\d+$"`. Note that this + // configuration will only match values like "123" but not "a123" or "123a". + // + // **Note:** `prefix` matching is currently not supported. + QueryParams map[string]*StringMatch `protobuf:"bytes,9,rep,name=query_params,json=queryParams,proto3" json:"query_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Flag to specify whether the URI matching should be case-insensitive. + // + // **Note:** The case will be ignored only in the case of `exact` and `prefix` + // URI matches. + IgnoreUriCase bool `protobuf:"varint,10,opt,name=ignore_uri_case,json=ignoreUriCase,proto3" json:"ignore_uri_case,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPMatchRequest) Reset() { *m = HTTPMatchRequest{} } +func (m *HTTPMatchRequest) String() string { return proto.CompactTextString(m) } +func (*HTTPMatchRequest) ProtoMessage() {} +func (*HTTPMatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{6} +} +func (m *HTTPMatchRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPMatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPMatchRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPMatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPMatchRequest.Merge(m, src) +} +func (m *HTTPMatchRequest) XXX_Size() int { + return m.Size() +} +func (m *HTTPMatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPMatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPMatchRequest proto.InternalMessageInfo + +func (m *HTTPMatchRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HTTPMatchRequest) GetUri() *StringMatch { + if m != nil { + return m.Uri + } + return nil +} + +func (m *HTTPMatchRequest) GetScheme() *StringMatch { + if m != nil { + return m.Scheme + } + return nil +} + +func (m *HTTPMatchRequest) GetMethod() *StringMatch { + if m != nil { + return m.Method + } + return nil +} + +func (m *HTTPMatchRequest) GetAuthority() *StringMatch { + if m != nil { + return m.Authority + } + return nil +} + +func (m *HTTPMatchRequest) GetHeaders() map[string]*StringMatch { + if m != nil { + return m.Headers + } + return nil +} + +func (m *HTTPMatchRequest) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *HTTPMatchRequest) GetSourceLabels() map[string]string { + if m != nil { + return m.SourceLabels + } + return nil +} + +func (m *HTTPMatchRequest) GetGateways() []string { + if m != nil { + return m.Gateways + } + return nil +} + +func (m *HTTPMatchRequest) GetQueryParams() map[string]*StringMatch { + if m != nil { + return m.QueryParams + } + return nil +} + +func (m *HTTPMatchRequest) GetIgnoreUriCase() bool { + if m != nil { + return m.IgnoreUriCase + } + return false +} + +// Each routing rule is associated with one or more service versions (see +// glossary in beginning of document). Weights associated with the version +// determine the proportion of traffic it receives. For example, the +// following rule will route 25% of traffic for the "reviews" service to +// instances with the "v2" tag and the remaining traffic (i.e., 75%) to +// "v1". +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// http: +// - route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v2 +// weight: 25 +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v1 +// weight: 75 +// ``` +// +// And the associated DestinationRule +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews.prod.svc.cluster.local +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// ``` +// +// Traffic can also be split across two entirely different services without +// having to define new subsets. For example, the following rule forwards 25% of +// traffic to reviews.com to dev.reviews.com +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route-two-domains +// spec: +// hosts: +// - reviews.com +// http: +// - route: +// - destination: +// host: dev.reviews.com +// weight: 25 +// - destination: +// host: reviews.com +// weight: 75 +// ``` +type HTTPRouteDestination struct { + // Destination uniquely identifies the instances of a service + // to which the request/connection should be forwarded to. + Destination *Destination `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // The proportion of traffic to be forwarded to the service + // version. (0-100). Sum of weights across destinations SHOULD BE == 100. + // If there is only one destination in a rule, the weight value is assumed to + // be 100. + Weight int32 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` + // Use of `remove_response_header` is deprecated. Use the `headers` + // field instead. + RemoveResponseHeaders []string `protobuf:"bytes,3,rep,name=remove_response_headers,json=removeResponseHeaders,proto3" json:"remove_response_headers,omitempty"` // Deprecated: Do not use. + // Use of `append_response_headers` is deprecated. Use the `headers` + // field instead. + AppendResponseHeaders map[string]string `protobuf:"bytes,4,rep,name=append_response_headers,json=appendResponseHeaders,proto3" json:"append_response_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + // Use of `remove_request_headers` is deprecated. Use the `headers` + // field instead. + RemoveRequestHeaders []string `protobuf:"bytes,5,rep,name=remove_request_headers,json=removeRequestHeaders,proto3" json:"remove_request_headers,omitempty"` // Deprecated: Do not use. + // Use of `append_request_headers` is deprecated. Use the `headers` + // field instead. + AppendRequestHeaders map[string]string `protobuf:"bytes,6,rep,name=append_request_headers,json=appendRequestHeaders,proto3" json:"append_request_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Deprecated: Do not use. + // Header manipulation rules + Headers *Headers `protobuf:"bytes,7,opt,name=headers,proto3" json:"headers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPRouteDestination) Reset() { *m = HTTPRouteDestination{} } +func (m *HTTPRouteDestination) String() string { return proto.CompactTextString(m) } +func (*HTTPRouteDestination) ProtoMessage() {} +func (*HTTPRouteDestination) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{7} +} +func (m *HTTPRouteDestination) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPRouteDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPRouteDestination.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPRouteDestination) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRouteDestination.Merge(m, src) +} +func (m *HTTPRouteDestination) XXX_Size() int { + return m.Size() +} +func (m *HTTPRouteDestination) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRouteDestination.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRouteDestination proto.InternalMessageInfo + +func (m *HTTPRouteDestination) GetDestination() *Destination { + if m != nil { + return m.Destination + } + return nil +} + +func (m *HTTPRouteDestination) GetWeight() int32 { + if m != nil { + return m.Weight + } + return 0 +} + +// Deprecated: Do not use. +func (m *HTTPRouteDestination) GetRemoveResponseHeaders() []string { + if m != nil { + return m.RemoveResponseHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRouteDestination) GetAppendResponseHeaders() map[string]string { + if m != nil { + return m.AppendResponseHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRouteDestination) GetRemoveRequestHeaders() []string { + if m != nil { + return m.RemoveRequestHeaders + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPRouteDestination) GetAppendRequestHeaders() map[string]string { + if m != nil { + return m.AppendRequestHeaders + } + return nil +} + +func (m *HTTPRouteDestination) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +// L4 routing rule weighted destination. +type RouteDestination struct { + // Destination uniquely identifies the instances of a service + // to which the request/connection should be forwarded to. + Destination *Destination `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + // The proportion of traffic to be forwarded to the service + // version. If there is only one destination in a rule, all traffic will be + // routed to it irrespective of the weight. + Weight int32 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RouteDestination) Reset() { *m = RouteDestination{} } +func (m *RouteDestination) String() string { return proto.CompactTextString(m) } +func (*RouteDestination) ProtoMessage() {} +func (*RouteDestination) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{8} +} +func (m *RouteDestination) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteDestination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RouteDestination.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RouteDestination) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteDestination.Merge(m, src) +} +func (m *RouteDestination) XXX_Size() int { + return m.Size() +} +func (m *RouteDestination) XXX_DiscardUnknown() { + xxx_messageInfo_RouteDestination.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteDestination proto.InternalMessageInfo + +func (m *RouteDestination) GetDestination() *Destination { + if m != nil { + return m.Destination + } + return nil +} + +func (m *RouteDestination) GetWeight() int32 { + if m != nil { + return m.Weight + } + return 0 +} + +// L4 connection match attributes. Note that L4 connection matching support +// is incomplete. +type L4MatchAttributes struct { + // IPv4 or IPv6 ip addresses of destination with optional subnet. E.g., + // a.b.c.d/xx form or just a.b.c.d. + DestinationSubnets []string `protobuf:"bytes,1,rep,name=destination_subnets,json=destinationSubnets,proto3" json:"destination_subnets,omitempty"` + // Specifies the port on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // IPv4 or IPv6 ip address of source with optional subnet. E.g., a.b.c.d/xx + // form or just a.b.c.d + // $hide_from_docs + SourceSubnet string `protobuf:"bytes,3,opt,name=source_subnet,json=sourceSubnet,proto3" json:"source_subnet,omitempty"` + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `protobuf:"bytes,4,rep,name=source_labels,json=sourceLabels,proto3" json:"source_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway + // match is independent of sourceLabels. + Gateways []string `protobuf:"bytes,5,rep,name=gateways,proto3" json:"gateways,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *L4MatchAttributes) Reset() { *m = L4MatchAttributes{} } +func (m *L4MatchAttributes) String() string { return proto.CompactTextString(m) } +func (*L4MatchAttributes) ProtoMessage() {} +func (*L4MatchAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{9} +} +func (m *L4MatchAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *L4MatchAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_L4MatchAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *L4MatchAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_L4MatchAttributes.Merge(m, src) +} +func (m *L4MatchAttributes) XXX_Size() int { + return m.Size() +} +func (m *L4MatchAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_L4MatchAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_L4MatchAttributes proto.InternalMessageInfo + +func (m *L4MatchAttributes) GetDestinationSubnets() []string { + if m != nil { + return m.DestinationSubnets + } + return nil +} + +func (m *L4MatchAttributes) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *L4MatchAttributes) GetSourceSubnet() string { + if m != nil { + return m.SourceSubnet + } + return "" +} + +func (m *L4MatchAttributes) GetSourceLabels() map[string]string { + if m != nil { + return m.SourceLabels + } + return nil +} + +func (m *L4MatchAttributes) GetGateways() []string { + if m != nil { + return m.Gateways + } + return nil +} + +// TLS connection match attributes. +type TLSMatchAttributes struct { + // SNI (server name indicator) to match on. Wildcard prefixes + // can be used in the SNI value, e.g., *.com will match foo.example.com + // as well as example.com. An SNI value must be a subset (i.e., fall + // within the domain) of the corresponding virtual serivce's hosts. + SniHosts []string `protobuf:"bytes,1,rep,name=sni_hosts,json=sniHosts,proto3" json:"sni_hosts,omitempty"` + // IPv4 or IPv6 ip addresses of destination with optional subnet. E.g., + // a.b.c.d/xx form or just a.b.c.d. + DestinationSubnets []string `protobuf:"bytes,2,rep,name=destination_subnets,json=destinationSubnets,proto3" json:"destination_subnets,omitempty"` + // Specifies the port on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they + // support, in these cases it is not required to explicitly select the + // port. + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // IPv4 or IPv6 ip address of source with optional subnet. E.g., a.b.c.d/xx + // form or just a.b.c.d + // $hide_from_docs + SourceSubnet string `protobuf:"bytes,4,opt,name=source_subnet,json=sourceSubnet,proto3" json:"source_subnet,omitempty"` + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `protobuf:"bytes,5,rep,name=source_labels,json=sourceLabels,proto3" json:"source_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway + // match is independent of sourceLabels. + Gateways []string `protobuf:"bytes,6,rep,name=gateways,proto3" json:"gateways,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TLSMatchAttributes) Reset() { *m = TLSMatchAttributes{} } +func (m *TLSMatchAttributes) String() string { return proto.CompactTextString(m) } +func (*TLSMatchAttributes) ProtoMessage() {} +func (*TLSMatchAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{10} +} +func (m *TLSMatchAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TLSMatchAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TLSMatchAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TLSMatchAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_TLSMatchAttributes.Merge(m, src) +} +func (m *TLSMatchAttributes) XXX_Size() int { + return m.Size() +} +func (m *TLSMatchAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_TLSMatchAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_TLSMatchAttributes proto.InternalMessageInfo + +func (m *TLSMatchAttributes) GetSniHosts() []string { + if m != nil { + return m.SniHosts + } + return nil +} + +func (m *TLSMatchAttributes) GetDestinationSubnets() []string { + if m != nil { + return m.DestinationSubnets + } + return nil +} + +func (m *TLSMatchAttributes) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *TLSMatchAttributes) GetSourceSubnet() string { + if m != nil { + return m.SourceSubnet + } + return "" +} + +func (m *TLSMatchAttributes) GetSourceLabels() map[string]string { + if m != nil { + return m.SourceLabels + } + return nil +} + +func (m *TLSMatchAttributes) GetGateways() []string { + if m != nil { + return m.Gateways + } + return nil +} + +// HTTPRedirect can be used to send a 301 redirect response to the caller, +// where the Authority/Host and the URI in the response can be swapped with +// the specified values. For example, the following rule redirects +// requests for /v1/getProductRatings API on the ratings service to +// /v1/bookRatings provided by the bookratings service. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - match: +// - uri: +// exact: /v1/getProductRatings +// redirect: +// uri: /v1/bookRatings +// authority: newratings.default.svc.cluster.local +// ... +// ``` +type HTTPRedirect struct { + // On a redirect, overwrite the Path portion of the URL with this + // value. Note that the entire path will be replaced, irrespective of the + // request URI being matched as an exact path or prefix. + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // On a redirect, overwrite the Authority/Host portion of the URL with + // this value. + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + // On a redirect, Specifies the HTTP status code to use in the redirect + // response. The default response code is MOVED_PERMANENTLY (301). + RedirectCode uint32 `protobuf:"varint,3,opt,name=redirect_code,json=redirectCode,proto3" json:"redirect_code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPRedirect) Reset() { *m = HTTPRedirect{} } +func (m *HTTPRedirect) String() string { return proto.CompactTextString(m) } +func (*HTTPRedirect) ProtoMessage() {} +func (*HTTPRedirect) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{11} +} +func (m *HTTPRedirect) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPRedirect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPRedirect.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPRedirect) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRedirect.Merge(m, src) +} +func (m *HTTPRedirect) XXX_Size() int { + return m.Size() +} +func (m *HTTPRedirect) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRedirect.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRedirect proto.InternalMessageInfo + +func (m *HTTPRedirect) GetUri() string { + if m != nil { + return m.Uri + } + return "" +} + +func (m *HTTPRedirect) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *HTTPRedirect) GetRedirectCode() uint32 { + if m != nil { + return m.RedirectCode + } + return 0 +} + +// HTTPRewrite can be used to rewrite specific parts of a HTTP request +// before forwarding the request to the destination. Rewrite primitive can +// be used only with HTTPRouteDestination. The following example +// demonstrates how to rewrite the URL prefix for api call (/ratings) to +// ratings service before making the actual API call. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - match: +// - uri: +// prefix: /ratings +// rewrite: +// uri: /v1/bookRatings +// route: +// - destination: +// host: ratings.prod.svc.cluster.local +// subset: v1 +// ``` +// +type HTTPRewrite struct { + // rewrite the path (or the prefix) portion of the URI with this + // value. If the original URI was matched based on prefix, the value + // provided in this field will replace the corresponding matched prefix. + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // rewrite the Authority/Host header with this value. + Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPRewrite) Reset() { *m = HTTPRewrite{} } +func (m *HTTPRewrite) String() string { return proto.CompactTextString(m) } +func (*HTTPRewrite) ProtoMessage() {} +func (*HTTPRewrite) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{12} +} +func (m *HTTPRewrite) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPRewrite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPRewrite.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPRewrite) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRewrite.Merge(m, src) +} +func (m *HTTPRewrite) XXX_Size() int { + return m.Size() +} +func (m *HTTPRewrite) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRewrite.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRewrite proto.InternalMessageInfo + +func (m *HTTPRewrite) GetUri() string { + if m != nil { + return m.Uri + } + return "" +} + +func (m *HTTPRewrite) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +// Describes how to match a given string in HTTP headers. Match is +// case-sensitive. +type StringMatch struct { + // Types that are valid to be assigned to MatchType: + // *StringMatch_Exact + // *StringMatch_Prefix + // *StringMatch_Regex + MatchType isStringMatch_MatchType `protobuf_oneof:"match_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringMatch) Reset() { *m = StringMatch{} } +func (m *StringMatch) String() string { return proto.CompactTextString(m) } +func (*StringMatch) ProtoMessage() {} +func (*StringMatch) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{13} +} +func (m *StringMatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringMatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringMatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringMatch.Merge(m, src) +} +func (m *StringMatch) XXX_Size() int { + return m.Size() +} +func (m *StringMatch) XXX_DiscardUnknown() { + xxx_messageInfo_StringMatch.DiscardUnknown(m) +} + +var xxx_messageInfo_StringMatch proto.InternalMessageInfo + +type isStringMatch_MatchType interface { + isStringMatch_MatchType() + MarshalTo([]byte) (int, error) + Size() int +} + +type StringMatch_Exact struct { + Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"` +} +type StringMatch_Prefix struct { + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"` +} +type StringMatch_Regex struct { + Regex string `protobuf:"bytes,3,opt,name=regex,proto3,oneof"` +} + +func (*StringMatch_Exact) isStringMatch_MatchType() {} +func (*StringMatch_Prefix) isStringMatch_MatchType() {} +func (*StringMatch_Regex) isStringMatch_MatchType() {} + +func (m *StringMatch) GetMatchType() isStringMatch_MatchType { + if m != nil { + return m.MatchType + } + return nil +} + +func (m *StringMatch) GetExact() string { + if x, ok := m.GetMatchType().(*StringMatch_Exact); ok { + return x.Exact + } + return "" +} + +func (m *StringMatch) GetPrefix() string { + if x, ok := m.GetMatchType().(*StringMatch_Prefix); ok { + return x.Prefix + } + return "" +} + +func (m *StringMatch) GetRegex() string { + if x, ok := m.GetMatchType().(*StringMatch_Regex); ok { + return x.Regex + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*StringMatch) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*StringMatch_Exact)(nil), + (*StringMatch_Prefix)(nil), + (*StringMatch_Regex)(nil), + } +} + +// Describes the retry policy to use when a HTTP request fails. For +// example, the following rule sets the maximum number of retries to 3 when +// calling ratings:v1 service, with a 2s timeout per retry attempt. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - route: +// - destination: +// host: ratings.prod.svc.cluster.local +// subset: v1 +// retries: +// attempts: 3 +// perTryTimeout: 2s +// retryOn: gateway-error,connect-failure,refused-stream +// ``` +// +type HTTPRetry struct { + // Number of retries for a given request. The interval + // between retries will be determined automatically (25ms+). Actual + // number of retries attempted depends on the httpReqTimeout. + Attempts int32 `protobuf:"varint,1,opt,name=attempts,proto3" json:"attempts,omitempty"` + // Timeout per retry attempt for a given request. format: 1h/1m/1s/1ms. MUST BE >=1ms. + PerTryTimeout *types.Duration `protobuf:"bytes,2,opt,name=per_try_timeout,json=perTryTimeout,proto3" json:"per_try_timeout,omitempty"` + // Specifies the conditions under which retry takes place. + // One or more policies can be specified using a ‘,’ delimited list. + // See the [retry policies](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-on) + // and [gRPC retry policies](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-grpc-on) for more details. + RetryOn string `protobuf:"bytes,3,opt,name=retry_on,json=retryOn,proto3" json:"retry_on,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPRetry) Reset() { *m = HTTPRetry{} } +func (m *HTTPRetry) String() string { return proto.CompactTextString(m) } +func (*HTTPRetry) ProtoMessage() {} +func (*HTTPRetry) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{14} +} +func (m *HTTPRetry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPRetry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPRetry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPRetry) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRetry.Merge(m, src) +} +func (m *HTTPRetry) XXX_Size() int { + return m.Size() +} +func (m *HTTPRetry) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRetry.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRetry proto.InternalMessageInfo + +func (m *HTTPRetry) GetAttempts() int32 { + if m != nil { + return m.Attempts + } + return 0 +} + +func (m *HTTPRetry) GetPerTryTimeout() *types.Duration { + if m != nil { + return m.PerTryTimeout + } + return nil +} + +func (m *HTTPRetry) GetRetryOn() string { + if m != nil { + return m.RetryOn + } + return "" +} + +// Describes the Cross-Origin Resource Sharing (CORS) policy, for a given +// service. Refer to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS) +// for further details about cross origin resource sharing. For example, +// the following rule restricts cross origin requests to those originating +// from example.com domain using HTTP POST/GET, and sets the +// `Access-Control-Allow-Credentials` header to false. In addition, it only +// exposes `X-Foo-bar` header and sets an expiry period of 1 day. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - route: +// - destination: +// host: ratings.prod.svc.cluster.local +// subset: v1 +// corsPolicy: +// allowOrigin: +// - example.com +// allowMethods: +// - POST +// - GET +// allowCredentials: false +// allowHeaders: +// - X-Foo-Bar +// maxAge: "24h" +// ``` +// +type CorsPolicy struct { + // The list of origins that are allowed to perform CORS requests. The + // content will be serialized into the Access-Control-Allow-Origin + // header. Wildcard * will allow all origins. + AllowOrigin []string `protobuf:"bytes,1,rep,name=allow_origin,json=allowOrigin,proto3" json:"allow_origin,omitempty"` + // List of HTTP methods allowed to access the resource. The content will + // be serialized into the Access-Control-Allow-Methods header. + AllowMethods []string `protobuf:"bytes,2,rep,name=allow_methods,json=allowMethods,proto3" json:"allow_methods,omitempty"` + // List of HTTP headers that can be used when requesting the + // resource. Serialized to Access-Control-Allow-Headers header. + AllowHeaders []string `protobuf:"bytes,3,rep,name=allow_headers,json=allowHeaders,proto3" json:"allow_headers,omitempty"` + // A white list of HTTP headers that the browsers are allowed to + // access. Serialized into Access-Control-Expose-Headers header. + ExposeHeaders []string `protobuf:"bytes,4,rep,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` + // Specifies how long the results of a preflight request can be + // cached. Translates to the `Access-Control-Max-Age` header. + MaxAge *types.Duration `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` + // Indicates whether the caller is allowed to send the actual request + // (not the preflight) using credentials. Translates to + // `Access-Control-Allow-Credentials` header. + AllowCredentials *types.BoolValue `protobuf:"bytes,6,opt,name=allow_credentials,json=allowCredentials,proto3" json:"allow_credentials,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CorsPolicy) Reset() { *m = CorsPolicy{} } +func (m *CorsPolicy) String() string { return proto.CompactTextString(m) } +func (*CorsPolicy) ProtoMessage() {} +func (*CorsPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{15} +} +func (m *CorsPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CorsPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CorsPolicy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CorsPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CorsPolicy.Merge(m, src) +} +func (m *CorsPolicy) XXX_Size() int { + return m.Size() +} +func (m *CorsPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_CorsPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_CorsPolicy proto.InternalMessageInfo + +func (m *CorsPolicy) GetAllowOrigin() []string { + if m != nil { + return m.AllowOrigin + } + return nil +} + +func (m *CorsPolicy) GetAllowMethods() []string { + if m != nil { + return m.AllowMethods + } + return nil +} + +func (m *CorsPolicy) GetAllowHeaders() []string { + if m != nil { + return m.AllowHeaders + } + return nil +} + +func (m *CorsPolicy) GetExposeHeaders() []string { + if m != nil { + return m.ExposeHeaders + } + return nil +} + +func (m *CorsPolicy) GetMaxAge() *types.Duration { + if m != nil { + return m.MaxAge + } + return nil +} + +func (m *CorsPolicy) GetAllowCredentials() *types.BoolValue { + if m != nil { + return m.AllowCredentials + } + return nil +} + +// HTTPFaultInjection can be used to specify one or more faults to inject +// while forwarding http requests to the destination specified in a route. +// Fault specification is part of a VirtualService rule. Faults include +// aborting the Http request from downstream service, and/or delaying +// proxying of requests. A fault rule MUST HAVE delay or abort or both. +// +// *Note:* Delay and abort faults are independent of one another, even if +// both are specified simultaneously. +type HTTPFaultInjection struct { + // Delay requests before forwarding, emulating various failures such as + // network issues, overloaded upstream service, etc. + Delay *HTTPFaultInjection_Delay `protobuf:"bytes,1,opt,name=delay,proto3" json:"delay,omitempty"` + // Abort Http request attempts and return error codes back to downstream + // service, giving the impression that the upstream service is faulty. + Abort *HTTPFaultInjection_Abort `protobuf:"bytes,2,opt,name=abort,proto3" json:"abort,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPFaultInjection) Reset() { *m = HTTPFaultInjection{} } +func (m *HTTPFaultInjection) String() string { return proto.CompactTextString(m) } +func (*HTTPFaultInjection) ProtoMessage() {} +func (*HTTPFaultInjection) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{16} +} +func (m *HTTPFaultInjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPFaultInjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPFaultInjection.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPFaultInjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPFaultInjection.Merge(m, src) +} +func (m *HTTPFaultInjection) XXX_Size() int { + return m.Size() +} +func (m *HTTPFaultInjection) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPFaultInjection.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPFaultInjection proto.InternalMessageInfo + +func (m *HTTPFaultInjection) GetDelay() *HTTPFaultInjection_Delay { + if m != nil { + return m.Delay + } + return nil +} + +func (m *HTTPFaultInjection) GetAbort() *HTTPFaultInjection_Abort { + if m != nil { + return m.Abort + } + return nil +} + +// Delay specification is used to inject latency into the request +// forwarding path. The following example will introduce a 5 second delay +// in 1 out of every 1000 requests to the "v1" version of the "reviews" +// service from all pods with label env: prod +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// http: +// - match: +// - sourceLabels: +// env: prod +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v1 +// fault: +// delay: +// percentage: +// value: 0.1 +// fixedDelay: 5s +// ``` +// +// The _fixedDelay_ field is used to indicate the amount of delay in seconds. +// The optional _percentage_ field can be used to only delay a certain +// percentage of requests. If left unspecified, all request will be delayed. +type HTTPFaultInjection_Delay struct { + // Percentage of requests on which the delay will be injected (0-100). + // Use of integer `percent` value is deprecated. Use the double `percentage` + // field instead. + Percent int32 `protobuf:"varint,1,opt,name=percent,proto3" json:"percent,omitempty"` // Deprecated: Do not use. + // Types that are valid to be assigned to HttpDelayType: + // *HTTPFaultInjection_Delay_FixedDelay + // *HTTPFaultInjection_Delay_ExponentialDelay + HttpDelayType isHTTPFaultInjection_Delay_HttpDelayType `protobuf_oneof:"http_delay_type"` + // Percentage of requests on which the delay will be injected. + Percentage *Percent `protobuf:"bytes,5,opt,name=percentage,proto3" json:"percentage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPFaultInjection_Delay) Reset() { *m = HTTPFaultInjection_Delay{} } +func (m *HTTPFaultInjection_Delay) String() string { return proto.CompactTextString(m) } +func (*HTTPFaultInjection_Delay) ProtoMessage() {} +func (*HTTPFaultInjection_Delay) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{16, 0} +} +func (m *HTTPFaultInjection_Delay) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPFaultInjection_Delay) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPFaultInjection_Delay.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPFaultInjection_Delay) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPFaultInjection_Delay.Merge(m, src) +} +func (m *HTTPFaultInjection_Delay) XXX_Size() int { + return m.Size() +} +func (m *HTTPFaultInjection_Delay) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPFaultInjection_Delay.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPFaultInjection_Delay proto.InternalMessageInfo + +type isHTTPFaultInjection_Delay_HttpDelayType interface { + isHTTPFaultInjection_Delay_HttpDelayType() + MarshalTo([]byte) (int, error) + Size() int +} + +type HTTPFaultInjection_Delay_FixedDelay struct { + FixedDelay *types.Duration `protobuf:"bytes,2,opt,name=fixed_delay,json=fixedDelay,proto3,oneof"` +} +type HTTPFaultInjection_Delay_ExponentialDelay struct { + ExponentialDelay *types.Duration `protobuf:"bytes,3,opt,name=exponential_delay,json=exponentialDelay,proto3,oneof"` +} + +func (*HTTPFaultInjection_Delay_FixedDelay) isHTTPFaultInjection_Delay_HttpDelayType() {} +func (*HTTPFaultInjection_Delay_ExponentialDelay) isHTTPFaultInjection_Delay_HttpDelayType() {} + +func (m *HTTPFaultInjection_Delay) GetHttpDelayType() isHTTPFaultInjection_Delay_HttpDelayType { + if m != nil { + return m.HttpDelayType + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPFaultInjection_Delay) GetPercent() int32 { + if m != nil { + return m.Percent + } + return 0 +} + +func (m *HTTPFaultInjection_Delay) GetFixedDelay() *types.Duration { + if x, ok := m.GetHttpDelayType().(*HTTPFaultInjection_Delay_FixedDelay); ok { + return x.FixedDelay + } + return nil +} + +func (m *HTTPFaultInjection_Delay) GetExponentialDelay() *types.Duration { + if x, ok := m.GetHttpDelayType().(*HTTPFaultInjection_Delay_ExponentialDelay); ok { + return x.ExponentialDelay + } + return nil +} + +func (m *HTTPFaultInjection_Delay) GetPercentage() *Percent { + if m != nil { + return m.Percentage + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HTTPFaultInjection_Delay) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HTTPFaultInjection_Delay_FixedDelay)(nil), + (*HTTPFaultInjection_Delay_ExponentialDelay)(nil), + } +} + +// Abort specification is used to prematurely abort a request with a +// pre-specified error code. The following example will return an HTTP 400 +// error code for 1 out of every 1000 requests to the "ratings" service "v1". +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings.prod.svc.cluster.local +// http: +// - route: +// - destination: +// host: ratings.prod.svc.cluster.local +// subset: v1 +// fault: +// abort: +// percentage: +// value: 0.1 +// httpStatus: 400 +// ``` +// +// The _httpStatus_ field is used to indicate the HTTP status code to +// return to the caller. The optional _percentage_ field can be used to only +// abort a certain percentage of requests. If not specified, all requests are +// aborted. +type HTTPFaultInjection_Abort struct { + // Percentage of requests to be aborted with the error code provided (0-100). + // Use of integer `percent` value is deprecated. Use the double `percentage` + // field instead. + Percent int32 `protobuf:"varint,1,opt,name=percent,proto3" json:"percent,omitempty"` // Deprecated: Do not use. + // Types that are valid to be assigned to ErrorType: + // *HTTPFaultInjection_Abort_HttpStatus + // *HTTPFaultInjection_Abort_GrpcStatus + // *HTTPFaultInjection_Abort_Http2Error + ErrorType isHTTPFaultInjection_Abort_ErrorType `protobuf_oneof:"error_type"` + // Percentage of requests to be aborted with the error code provided. + Percentage *Percent `protobuf:"bytes,5,opt,name=percentage,proto3" json:"percentage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPFaultInjection_Abort) Reset() { *m = HTTPFaultInjection_Abort{} } +func (m *HTTPFaultInjection_Abort) String() string { return proto.CompactTextString(m) } +func (*HTTPFaultInjection_Abort) ProtoMessage() {} +func (*HTTPFaultInjection_Abort) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{16, 1} +} +func (m *HTTPFaultInjection_Abort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPFaultInjection_Abort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HTTPFaultInjection_Abort.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HTTPFaultInjection_Abort) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPFaultInjection_Abort.Merge(m, src) +} +func (m *HTTPFaultInjection_Abort) XXX_Size() int { + return m.Size() +} +func (m *HTTPFaultInjection_Abort) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPFaultInjection_Abort.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPFaultInjection_Abort proto.InternalMessageInfo + +type isHTTPFaultInjection_Abort_ErrorType interface { + isHTTPFaultInjection_Abort_ErrorType() + MarshalTo([]byte) (int, error) + Size() int +} + +type HTTPFaultInjection_Abort_HttpStatus struct { + HttpStatus int32 `protobuf:"varint,2,opt,name=http_status,json=httpStatus,proto3,oneof"` +} +type HTTPFaultInjection_Abort_GrpcStatus struct { + GrpcStatus string `protobuf:"bytes,3,opt,name=grpc_status,json=grpcStatus,proto3,oneof"` +} +type HTTPFaultInjection_Abort_Http2Error struct { + Http2Error string `protobuf:"bytes,4,opt,name=http2_error,json=http2Error,proto3,oneof"` +} + +func (*HTTPFaultInjection_Abort_HttpStatus) isHTTPFaultInjection_Abort_ErrorType() {} +func (*HTTPFaultInjection_Abort_GrpcStatus) isHTTPFaultInjection_Abort_ErrorType() {} +func (*HTTPFaultInjection_Abort_Http2Error) isHTTPFaultInjection_Abort_ErrorType() {} + +func (m *HTTPFaultInjection_Abort) GetErrorType() isHTTPFaultInjection_Abort_ErrorType { + if m != nil { + return m.ErrorType + } + return nil +} + +// Deprecated: Do not use. +func (m *HTTPFaultInjection_Abort) GetPercent() int32 { + if m != nil { + return m.Percent + } + return 0 +} + +func (m *HTTPFaultInjection_Abort) GetHttpStatus() int32 { + if x, ok := m.GetErrorType().(*HTTPFaultInjection_Abort_HttpStatus); ok { + return x.HttpStatus + } + return 0 +} + +func (m *HTTPFaultInjection_Abort) GetGrpcStatus() string { + if x, ok := m.GetErrorType().(*HTTPFaultInjection_Abort_GrpcStatus); ok { + return x.GrpcStatus + } + return "" +} + +func (m *HTTPFaultInjection_Abort) GetHttp2Error() string { + if x, ok := m.GetErrorType().(*HTTPFaultInjection_Abort_Http2Error); ok { + return x.Http2Error + } + return "" +} + +func (m *HTTPFaultInjection_Abort) GetPercentage() *Percent { + if m != nil { + return m.Percentage + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HTTPFaultInjection_Abort) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HTTPFaultInjection_Abort_HttpStatus)(nil), + (*HTTPFaultInjection_Abort_GrpcStatus)(nil), + (*HTTPFaultInjection_Abort_Http2Error)(nil), + } +} + +// PortSelector specifies the number of a port to be used for +// matching or selection for final routing. +type PortSelector struct { + // Valid port number + Number uint32 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortSelector) Reset() { *m = PortSelector{} } +func (m *PortSelector) String() string { return proto.CompactTextString(m) } +func (*PortSelector) ProtoMessage() {} +func (*PortSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{17} +} +func (m *PortSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortSelector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortSelector.Merge(m, src) +} +func (m *PortSelector) XXX_Size() int { + return m.Size() +} +func (m *PortSelector) XXX_DiscardUnknown() { + xxx_messageInfo_PortSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_PortSelector proto.InternalMessageInfo + +func (m *PortSelector) GetNumber() uint32 { + if m != nil { + return m.Number + } + return 0 +} + +// Percent specifies a percentage in the range of [0.0, 100.0]. +type Percent struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Percent) Reset() { *m = Percent{} } +func (m *Percent) String() string { return proto.CompactTextString(m) } +func (*Percent) ProtoMessage() {} +func (*Percent) Descriptor() ([]byte, []int) { + return fileDescriptor_e85a9a4fa9c17a22, []int{18} +} +func (m *Percent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Percent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Percent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Percent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Percent.Merge(m, src) +} +func (m *Percent) XXX_Size() int { + return m.Size() +} +func (m *Percent) XXX_DiscardUnknown() { + xxx_messageInfo_Percent.DiscardUnknown(m) +} + +var xxx_messageInfo_Percent proto.InternalMessageInfo + +func (m *Percent) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*VirtualService)(nil), "istio.networking.v1alpha3.VirtualService") + proto.RegisterType((*Destination)(nil), "istio.networking.v1alpha3.Destination") + proto.RegisterType((*HTTPRoute)(nil), "istio.networking.v1alpha3.HTTPRoute") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.HTTPRoute.AppendHeadersEntry") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.HTTPRoute.AppendRequestHeadersEntry") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.HTTPRoute.AppendResponseHeadersEntry") + proto.RegisterType((*Headers)(nil), "istio.networking.v1alpha3.Headers") + proto.RegisterType((*Headers_HeaderOperations)(nil), "istio.networking.v1alpha3.Headers.HeaderOperations") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.Headers.HeaderOperations.AddEntry") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.Headers.HeaderOperations.SetEntry") + proto.RegisterType((*TLSRoute)(nil), "istio.networking.v1alpha3.TLSRoute") + proto.RegisterType((*TCPRoute)(nil), "istio.networking.v1alpha3.TCPRoute") + proto.RegisterType((*HTTPMatchRequest)(nil), "istio.networking.v1alpha3.HTTPMatchRequest") + proto.RegisterMapType((map[string]*StringMatch)(nil), "istio.networking.v1alpha3.HTTPMatchRequest.HeadersEntry") + proto.RegisterMapType((map[string]*StringMatch)(nil), "istio.networking.v1alpha3.HTTPMatchRequest.QueryParamsEntry") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.HTTPMatchRequest.SourceLabelsEntry") + proto.RegisterType((*HTTPRouteDestination)(nil), "istio.networking.v1alpha3.HTTPRouteDestination") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.HTTPRouteDestination.AppendRequestHeadersEntry") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.HTTPRouteDestination.AppendResponseHeadersEntry") + proto.RegisterType((*RouteDestination)(nil), "istio.networking.v1alpha3.RouteDestination") + proto.RegisterType((*L4MatchAttributes)(nil), "istio.networking.v1alpha3.L4MatchAttributes") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.L4MatchAttributes.SourceLabelsEntry") + proto.RegisterType((*TLSMatchAttributes)(nil), "istio.networking.v1alpha3.TLSMatchAttributes") + proto.RegisterMapType((map[string]string)(nil), "istio.networking.v1alpha3.TLSMatchAttributes.SourceLabelsEntry") + proto.RegisterType((*HTTPRedirect)(nil), "istio.networking.v1alpha3.HTTPRedirect") + proto.RegisterType((*HTTPRewrite)(nil), "istio.networking.v1alpha3.HTTPRewrite") + proto.RegisterType((*StringMatch)(nil), "istio.networking.v1alpha3.StringMatch") + proto.RegisterType((*HTTPRetry)(nil), "istio.networking.v1alpha3.HTTPRetry") + proto.RegisterType((*CorsPolicy)(nil), "istio.networking.v1alpha3.CorsPolicy") + proto.RegisterType((*HTTPFaultInjection)(nil), "istio.networking.v1alpha3.HTTPFaultInjection") + proto.RegisterType((*HTTPFaultInjection_Delay)(nil), "istio.networking.v1alpha3.HTTPFaultInjection.Delay") + proto.RegisterType((*HTTPFaultInjection_Abort)(nil), "istio.networking.v1alpha3.HTTPFaultInjection.Abort") + proto.RegisterType((*PortSelector)(nil), "istio.networking.v1alpha3.PortSelector") + proto.RegisterType((*Percent)(nil), "istio.networking.v1alpha3.Percent") +} + +func init() { + proto.RegisterFile("networking/v1alpha3/virtual_service.proto", fileDescriptor_e85a9a4fa9c17a22) +} + +var fileDescriptor_e85a9a4fa9c17a22 = []byte{ + // 1951 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x73, 0x1b, 0x49, + 0x15, 0x8f, 0xfe, 0x8c, 0x25, 0xbd, 0x91, 0x12, 0xb9, 0xc9, 0x26, 0x63, 0x91, 0x4a, 0xbc, 0x0a, + 0x1b, 0x4c, 0x2d, 0x2b, 0x17, 0x36, 0x2c, 0xae, 0x25, 0x9b, 0x5d, 0xd9, 0xc9, 0xae, 0xb2, 0x95, + 0x10, 0xd3, 0x76, 0xf6, 0xc0, 0x65, 0xaa, 0x35, 0xd3, 0x96, 0x86, 0x48, 0xd3, 0x93, 0x9e, 0x1e, + 0x5b, 0xaa, 0x3d, 0x52, 0x45, 0x15, 0x14, 0x17, 0x4e, 0x9c, 0xe0, 0xc4, 0xa7, 0xe0, 0xc2, 0x75, + 0x8f, 0x54, 0xf1, 0x05, 0xb6, 0x42, 0xc1, 0xe7, 0xa0, 0xba, 0x7b, 0x46, 0x1a, 0x4b, 0xb6, 0x46, + 0x32, 0xa1, 0xd8, 0x93, 0xd5, 0xdd, 0xef, 0xf7, 0xde, 0xeb, 0xd7, 0xdd, 0xef, 0xfd, 0xe6, 0x19, + 0x7e, 0xe0, 0x53, 0x71, 0xc6, 0xf8, 0x2b, 0xcf, 0xef, 0x6d, 0x9f, 0xfe, 0x88, 0x0c, 0x82, 0x3e, + 0xd9, 0xdd, 0x3e, 0xf5, 0xb8, 0x88, 0xc8, 0xc0, 0x0e, 0x29, 0x3f, 0xf5, 0x1c, 0xda, 0x0a, 0x38, + 0x13, 0x0c, 0x6d, 0x78, 0xa1, 0xf0, 0x58, 0x6b, 0x0a, 0x68, 0x25, 0x80, 0xc6, 0xbd, 0x1e, 0x63, + 0xbd, 0x01, 0xdd, 0x26, 0x81, 0xb7, 0x7d, 0xe2, 0xd1, 0x81, 0x6b, 0x77, 0x69, 0x9f, 0x9c, 0x7a, + 0x8c, 0x6b, 0x6c, 0xe3, 0x6e, 0x2c, 0xa0, 0x46, 0xdd, 0xe8, 0x64, 0xdb, 0x8d, 0x38, 0x11, 0x1e, + 0xf3, 0x2f, 0x5b, 0x3f, 0xe3, 0x24, 0x08, 0x28, 0x0f, 0xf5, 0x7a, 0xf3, 0xf7, 0x79, 0xb8, 0xfe, + 0xa5, 0xf6, 0xea, 0x48, 0x3b, 0x85, 0x36, 0xc0, 0xe8, 0xb3, 0x50, 0x84, 0x56, 0x6e, 0xb3, 0xb0, + 0x55, 0xd9, 0x2f, 0x7c, 0xd3, 0xce, 0x63, 0x3d, 0x83, 0x1a, 0x50, 0xee, 0x11, 0x41, 0xcf, 0xc8, + 0x38, 0xb4, 0xf2, 0x72, 0x15, 0x4f, 0xc6, 0x68, 0x0f, 0x8a, 0x7d, 0x21, 0x02, 0xab, 0xb0, 0x59, + 0xd8, 0x32, 0x77, 0xbe, 0xd7, 0xba, 0x74, 0x53, 0xad, 0xce, 0xf1, 0xf1, 0x21, 0x66, 0x91, 0xa0, + 0x58, 0x21, 0xd0, 0x4f, 0xa0, 0x20, 0x06, 0xa1, 0x65, 0x28, 0xe0, 0xfd, 0x05, 0xc0, 0xe3, 0x67, + 0x47, 0x1a, 0x27, 0xe5, 0x15, 0xcc, 0x09, 0xac, 0x62, 0x36, 0xec, 0xe0, 0x30, 0x81, 0x39, 0x01, + 0xfa, 0x2e, 0x54, 0xe8, 0x28, 0x60, 0x5c, 0xd8, 0x82, 0x59, 0x6b, 0x7a, 0x13, 0x7a, 0xe2, 0x98, + 0x35, 0xbf, 0x02, 0xf3, 0x31, 0x0d, 0x85, 0xe7, 0xab, 0x18, 0xa2, 0xdb, 0x50, 0x94, 0x1b, 0xb7, + 0x72, 0x9b, 0xb9, 0x24, 0x12, 0x6a, 0x02, 0xdd, 0x82, 0xb5, 0x30, 0xea, 0x86, 0x54, 0x58, 0x79, + 0xb9, 0x84, 0xe3, 0x11, 0xfa, 0x19, 0x14, 0xa5, 0x26, 0xab, 0xb0, 0x99, 0xdb, 0x32, 0x77, 0xbe, + 0xbf, 0xc0, 0xa9, 0x43, 0xc6, 0xc5, 0x11, 0x1d, 0x50, 0x47, 0x30, 0x8e, 0x15, 0xa8, 0xf9, 0xb5, + 0x09, 0x95, 0x49, 0x6c, 0x10, 0x82, 0xa2, 0x4f, 0x86, 0xd4, 0x5a, 0x57, 0x06, 0xd4, 0x6f, 0xd4, + 0x06, 0x63, 0x48, 0x84, 0xd3, 0x57, 0x47, 0x63, 0xee, 0xbc, 0x9f, 0x11, 0xe4, 0xe7, 0x52, 0x16, + 0xd3, 0xd7, 0x11, 0x0d, 0x05, 0xd6, 0x48, 0xf4, 0x04, 0x0c, 0x2e, 0xf5, 0xab, 0xf3, 0x33, 0x77, + 0xb6, 0x97, 0x39, 0xa7, 0x54, 0x48, 0xb0, 0x46, 0xa3, 0x03, 0x28, 0x73, 0xea, 0x7a, 0x9c, 0x3a, + 0xcb, 0x6c, 0x56, 0x69, 0x8a, 0xc5, 0xf1, 0x04, 0x88, 0x3e, 0x85, 0x12, 0xa7, 0x67, 0xdc, 0x13, + 0xd4, 0x2a, 0x2a, 0x1d, 0x0f, 0x32, 0x75, 0x28, 0x69, 0x9c, 0xc0, 0xd0, 0xfb, 0xb0, 0x7e, 0x46, + 0xbb, 0x21, 0x73, 0x5e, 0x51, 0x61, 0x47, 0x41, 0x8f, 0x13, 0x97, 0x5a, 0xc6, 0x66, 0x6e, 0xab, + 0x8c, 0xeb, 0x93, 0x85, 0x97, 0x7a, 0x1e, 0xed, 0x42, 0x49, 0x78, 0x43, 0xca, 0x22, 0x61, 0xad, + 0x29, 0x73, 0x1b, 0x2d, 0xfd, 0x3a, 0x5a, 0xc9, 0xeb, 0x68, 0x3d, 0x8e, 0x5f, 0x0f, 0x4e, 0x24, + 0xd1, 0x23, 0xe9, 0xa3, 0xe0, 0x1e, 0x0d, 0xad, 0x92, 0x02, 0x65, 0xde, 0x6c, 0x2a, 0xf8, 0x18, + 0x27, 0x20, 0x74, 0x00, 0xc6, 0x09, 0x89, 0x06, 0xc2, 0x2a, 0x2b, 0xf4, 0x07, 0x19, 0xe8, 0xcf, + 0xa4, 0xec, 0x53, 0xff, 0x57, 0xd4, 0xd1, 0xd1, 0x56, 0x58, 0xf4, 0x08, 0xd6, 0x86, 0x1e, 0xe7, + 0x8c, 0x5b, 0x95, 0xcc, 0x38, 0xa5, 0x0f, 0x2b, 0x46, 0xa1, 0x03, 0xb8, 0xae, 0x7f, 0xd9, 0x01, + 0xe5, 0x0e, 0xf5, 0x85, 0x85, 0x94, 0x9e, 0x3b, 0x73, 0x01, 0x78, 0xf9, 0xd4, 0x17, 0xbb, 0x3b, + 0x5f, 0x92, 0x41, 0x44, 0x71, 0x4d, 0x63, 0x0e, 0x35, 0x04, 0x7d, 0x06, 0xa6, 0xc3, 0x78, 0x68, + 0x07, 0x6c, 0xe0, 0x39, 0x63, 0x0b, 0x94, 0x86, 0xf7, 0x16, 0x78, 0x72, 0xc0, 0x78, 0x78, 0xa8, + 0x84, 0x31, 0x38, 0x93, 0xdf, 0xa8, 0x0b, 0xd7, 0x65, 0x0a, 0xf2, 0x5d, 0xbb, 0x4f, 0x89, 0x4b, + 0x79, 0x68, 0x99, 0xea, 0x2a, 0xfe, 0x74, 0x99, 0xab, 0xd8, 0x6a, 0x2b, 0x68, 0x47, 0x23, 0x9f, + 0xf8, 0x82, 0x8f, 0xf7, 0xf3, 0x56, 0x0e, 0xd7, 0x48, 0x7a, 0x1e, 0x7d, 0x04, 0xb7, 0x39, 0x1d, + 0xb2, 0x53, 0x6a, 0x73, 0x1a, 0x06, 0xcc, 0x0f, 0xe9, 0xc4, 0x58, 0x55, 0x65, 0x35, 0x89, 0x79, + 0x47, 0x8b, 0xe0, 0x58, 0x22, 0xc1, 0x7e, 0x05, 0xb7, 0x63, 0xff, 0xe6, 0xb0, 0x35, 0xe5, 0xe8, + 0x27, 0x2b, 0x38, 0x3a, 0xa3, 0x7c, 0xea, 0xf0, 0x3b, 0xe4, 0xa2, 0x75, 0xb4, 0x07, 0xb7, 0x26, + 0x8e, 0xab, 0x77, 0x3b, 0xb1, 0x7d, 0x7d, 0xe2, 0xf7, 0xcd, 0xc4, 0x6f, 0x25, 0x90, 0x20, 0x47, + 0x70, 0x6b, 0xe2, 0xf6, 0x79, 0xe4, 0x0d, 0xe5, 0xf5, 0xa3, 0x95, 0xbc, 0x4e, 0xab, 0x9e, 0x3a, + 0x7d, 0x93, 0x5c, 0xb0, 0x8c, 0x1e, 0x42, 0x29, 0x31, 0x55, 0x57, 0x97, 0xa2, 0xb9, 0xc8, 0x94, + 0x96, 0xc4, 0x09, 0xa4, 0xf1, 0x29, 0xa0, 0xf9, 0x33, 0x45, 0x75, 0x28, 0xbc, 0xa2, 0x63, 0x9d, + 0x78, 0xb1, 0xfc, 0x89, 0x6e, 0x82, 0x71, 0x2a, 0xaf, 0x65, 0x9c, 0x71, 0xf5, 0xe0, 0xa3, 0xfc, + 0x5e, 0xae, 0xd1, 0x81, 0xc6, 0xe5, 0xc1, 0x5e, 0x49, 0xd3, 0xe7, 0xb0, 0x71, 0x69, 0x00, 0x56, + 0x51, 0xd4, 0xfc, 0x77, 0x01, 0x4a, 0x49, 0x78, 0x9e, 0xcb, 0x0c, 0xa2, 0xd4, 0x29, 0xac, 0xb9, + 0xb3, 0x9b, 0x1d, 0x9e, 0xf8, 0xef, 0x8b, 0x80, 0xea, 0x7c, 0x14, 0xe2, 0x44, 0x07, 0x7a, 0x21, + 0x33, 0xaf, 0xde, 0xa7, 0xb2, 0x7b, 0x45, 0x7d, 0x13, 0x25, 0x8d, 0xbf, 0xe6, 0xa1, 0x3e, 0xbb, + 0x8c, 0x7e, 0x0e, 0x05, 0x59, 0xdd, 0x74, 0x9d, 0x79, 0x78, 0x05, 0x03, 0xad, 0x23, 0x2a, 0x54, + 0xdc, 0xb0, 0x54, 0x24, 0xf5, 0x11, 0xd7, 0x8d, 0x8b, 0xce, 0x95, 0xf4, 0xb5, 0x5d, 0x37, 0xd6, + 0x47, 0x5c, 0x57, 0x16, 0x60, 0xfd, 0x0a, 0x14, 0xdf, 0xa8, 0xe0, 0x78, 0xd4, 0xf8, 0x10, 0xca, + 0x89, 0xe1, 0x95, 0x4e, 0xfe, 0x43, 0x28, 0x27, 0x06, 0x56, 0x3a, 0xe8, 0x3f, 0xe6, 0xa0, 0x9c, + 0xd0, 0x12, 0xd4, 0x39, 0x5f, 0x9e, 0x3f, 0x58, 0x4c, 0x65, 0x54, 0x75, 0x6e, 0x0b, 0xc1, 0xbd, + 0x6e, 0x24, 0x68, 0x18, 0x13, 0x2d, 0x5d, 0xa5, 0xdb, 0xe7, 0xab, 0xf4, 0xa2, 0x42, 0x7f, 0x49, + 0x85, 0x6e, 0xfe, 0x41, 0x7a, 0x16, 0x33, 0x1f, 0xb4, 0x7f, 0xde, 0xb3, 0x1f, 0x2e, 0xd0, 0xf7, + 0xec, 0xc7, 0x33, 0x8e, 0xbd, 0x45, 0x9f, 0xfe, 0x52, 0x82, 0xfa, 0x2c, 0x31, 0x99, 0x10, 0x1d, + 0x33, 0x45, 0x74, 0xf6, 0xa0, 0x10, 0x71, 0x2f, 0x7e, 0x2f, 0x8b, 0xaa, 0xdd, 0x91, 0xe0, 0x9e, + 0xdf, 0xd3, 0xfa, 0x24, 0x44, 0x96, 0xca, 0xd0, 0xe9, 0xd3, 0x61, 0xf2, 0x38, 0x96, 0x05, 0xc7, + 0x28, 0x55, 0x6a, 0xa9, 0xe8, 0x33, 0x37, 0xa6, 0x35, 0x4b, 0xe3, 0x35, 0x0a, 0x3d, 0x86, 0x0a, + 0x89, 0x44, 0x9f, 0x71, 0x4f, 0x8c, 0x97, 0x60, 0x35, 0x69, 0x15, 0x53, 0x20, 0xc2, 0xd3, 0x94, + 0xaa, 0x69, 0xf1, 0xde, 0x0a, 0x54, 0xaf, 0x95, 0x4e, 0x5b, 0x93, 0x44, 0x2b, 0xe3, 0xac, 0xb8, + 0xa9, 0xe4, 0x3e, 0x35, 0x4d, 0x39, 0x51, 0x17, 0x6a, 0x21, 0x8b, 0xb8, 0x43, 0xed, 0x01, 0xe9, + 0xd2, 0x81, 0xe4, 0x38, 0xd2, 0xda, 0xc7, 0xab, 0x58, 0x3b, 0x52, 0x0a, 0x9e, 0x29, 0xbc, 0x36, + 0x59, 0x0d, 0x53, 0x53, 0xe7, 0x3e, 0x1a, 0xca, 0x33, 0x1f, 0x0d, 0x36, 0x54, 0x5f, 0x47, 0x94, + 0x8f, 0xed, 0x80, 0x70, 0x32, 0x0c, 0xad, 0x4a, 0x76, 0x7e, 0x98, 0x35, 0xff, 0x0b, 0x89, 0x3f, + 0x54, 0x70, 0x6d, 0xdd, 0x7c, 0x3d, 0x9d, 0x41, 0x0f, 0xe0, 0x86, 0xd7, 0xf3, 0x19, 0xa7, 0x76, + 0xc4, 0x3d, 0xdb, 0x21, 0x21, 0x55, 0xc4, 0xa5, 0x8c, 0x6b, 0x7a, 0xfa, 0x25, 0xf7, 0x0e, 0x48, + 0x48, 0x1b, 0x5d, 0xa8, 0x66, 0x24, 0xfb, 0x87, 0xe9, 0x1c, 0xb0, 0xfc, 0xa1, 0xa6, 0x72, 0xcc, + 0x27, 0xb0, 0x3e, 0x17, 0xab, 0x95, 0x92, 0xd4, 0x09, 0xd4, 0x67, 0x77, 0xfb, 0xbf, 0x70, 0xb4, + 0xf9, 0x0f, 0x03, 0x6e, 0x5e, 0x44, 0xfe, 0xd1, 0x73, 0x30, 0xdd, 0xe9, 0x70, 0x89, 0xe7, 0x99, + 0x02, 0xeb, 0xfc, 0x96, 0xc6, 0xcb, 0x24, 0x7e, 0x46, 0xbd, 0x5e, 0x5f, 0x7f, 0x45, 0x19, 0x38, + 0x1e, 0x2d, 0x62, 0x6f, 0x85, 0x2c, 0xf6, 0xf6, 0x9b, 0xdc, 0xe5, 0xf4, 0x4d, 0x7f, 0x2a, 0x7e, + 0xb1, 0xe2, 0x27, 0xcf, 0x5b, 0x67, 0x72, 0x46, 0x06, 0x93, 0xfb, 0x75, 0xee, 0x52, 0x2a, 0xb7, + 0xa6, 0x76, 0xf0, 0xf4, 0xaa, 0x3b, 0xb8, 0x22, 0xab, 0x2b, 0xad, 0xce, 0xea, 0xbe, 0x85, 0x9c, + 0x6c, 0x0c, 0xf5, 0xff, 0xd3, 0x85, 0x6e, 0xfe, 0x2d, 0x0f, 0xeb, 0x73, 0x75, 0x15, 0x6d, 0xc3, + 0x77, 0x52, 0x60, 0x3b, 0x8c, 0xba, 0x3e, 0x4d, 0xda, 0x2e, 0x18, 0xa5, 0x96, 0x8e, 0xf4, 0xca, + 0x24, 0x83, 0xe7, 0x53, 0x19, 0xfc, 0xfe, 0x24, 0x83, 0x6b, 0xbc, 0x2a, 0x5b, 0x95, 0x24, 0x05, + 0x6b, 0x24, 0x72, 0x66, 0xd3, 0x7c, 0x31, 0xf3, 0x93, 0x60, 0xce, 0xdd, 0x95, 0xf2, 0xbc, 0x71, + 0x3e, 0xcf, 0xff, 0xd7, 0xa9, 0xaf, 0xf9, 0xcf, 0x3c, 0xa0, 0x79, 0xce, 0x84, 0x36, 0xa1, 0x12, + 0xfa, 0x9e, 0x3d, 0xd7, 0xaf, 0x2a, 0x87, 0xbe, 0xd7, 0x51, 0x2d, 0xab, 0x4b, 0x82, 0x9c, 0xcf, + 0x0c, 0x72, 0x61, 0x51, 0x90, 0x8b, 0x17, 0x04, 0xd9, 0x9d, 0x0d, 0xb2, 0x91, 0xf9, 0xb5, 0x38, + 0xbf, 0xa3, 0x95, 0xa2, 0xbc, 0xf6, 0xb6, 0xa3, 0xec, 0x40, 0x35, 0xdd, 0xaa, 0x91, 0xd8, 0x84, + 0x86, 0x55, 0x34, 0xbd, 0xba, 0x93, 0xa6, 0x37, 0x1a, 0x9f, 0xa2, 0x2d, 0xf7, 0xa1, 0x96, 0x34, + 0x77, 0x6c, 0x87, 0xb9, 0x34, 0x0e, 0x62, 0x35, 0x99, 0x3c, 0x60, 0x2e, 0x6d, 0x7e, 0x0c, 0x66, + 0xaa, 0x97, 0xb3, 0xaa, 0x8d, 0x26, 0x05, 0x33, 0x55, 0xb6, 0xd0, 0x2d, 0x30, 0xe8, 0x88, 0x38, + 0x71, 0x8f, 0xae, 0x73, 0x0d, 0xeb, 0x21, 0xb2, 0x60, 0x2d, 0xe0, 0xf4, 0xc4, 0x1b, 0x69, 0x0d, + 0x9d, 0x6b, 0x38, 0x1e, 0x4b, 0x04, 0xa7, 0x3d, 0x3a, 0xd2, 0x2f, 0x45, 0x22, 0xd4, 0x70, 0xbf, + 0x0a, 0xa0, 0x88, 0xae, 0x2d, 0xc6, 0x01, 0x6d, 0xfe, 0x2e, 0x17, 0x37, 0xe3, 0xa8, 0x0c, 0xe2, + 0x3d, 0x28, 0x13, 0x21, 0xe8, 0x30, 0x50, 0xd7, 0x2c, 0xb7, 0x65, 0xc4, 0xd7, 0x2c, 0x99, 0x44, + 0x6d, 0xb8, 0x11, 0x50, 0x6e, 0x0b, 0x3e, 0xb6, 0x93, 0x1e, 0x53, 0x3e, 0xab, 0xc7, 0x54, 0x0b, + 0x28, 0x3f, 0xe6, 0xe3, 0xe3, 0xb8, 0xd3, 0xb4, 0x21, 0x3f, 0xec, 0xa4, 0x02, 0xe6, 0xc7, 0x8f, + 0x58, 0x35, 0x91, 0xc6, 0x2f, 0xfc, 0xe6, 0x9f, 0xf2, 0x00, 0xd3, 0x6e, 0x0a, 0x7a, 0x17, 0xaa, + 0x64, 0x30, 0x60, 0x67, 0x36, 0xe3, 0x5e, 0xcf, 0xf3, 0xe3, 0x8c, 0x61, 0xaa, 0xb9, 0x17, 0x6a, + 0x4a, 0x9e, 0x84, 0x16, 0xd1, 0xb4, 0x34, 0xb9, 0xf0, 0x1a, 0xf7, 0x5c, 0xcf, 0x4d, 0x85, 0xce, + 0x55, 0xd7, 0x58, 0x28, 0xa9, 0x03, 0xef, 0xc1, 0x75, 0x3a, 0x0a, 0xd8, 0x4c, 0x19, 0xad, 0xe0, + 0x9a, 0x9e, 0x4d, 0xc4, 0x76, 0xa0, 0x34, 0x24, 0x23, 0x9b, 0xf4, 0x74, 0xff, 0x6d, 0xe1, 0xc6, + 0xd7, 0x86, 0x64, 0xd4, 0xee, 0x51, 0xf4, 0x39, 0xac, 0x6b, 0xfb, 0x0e, 0xa7, 0x2e, 0xf5, 0x85, + 0x47, 0x06, 0x61, 0xdc, 0x9a, 0x6b, 0xcc, 0xa1, 0xf7, 0x19, 0x1b, 0xe8, 0xbe, 0x54, 0x5d, 0x81, + 0x0e, 0xa6, 0x98, 0xe6, 0x9f, 0x0d, 0x40, 0xf3, 0xdd, 0x33, 0xf4, 0x14, 0x0c, 0x97, 0x0e, 0xc8, + 0x78, 0x99, 0xef, 0xee, 0x39, 0x74, 0xeb, 0xb1, 0x84, 0x62, 0xad, 0x41, 0xaa, 0x22, 0xdd, 0x24, + 0xf7, 0xae, 0xac, 0xaa, 0x2d, 0xa1, 0x58, 0x6b, 0x68, 0xfc, 0x36, 0x0f, 0x86, 0xd2, 0x8d, 0xee, + 0x40, 0x29, 0xe9, 0xc7, 0xe9, 0x4b, 0x25, 0xab, 0x71, 0x32, 0x85, 0xda, 0x60, 0x9e, 0x78, 0x23, + 0xea, 0xda, 0x7a, 0x0f, 0x59, 0xd7, 0x49, 0xdd, 0xc8, 0xce, 0x35, 0x0c, 0x0a, 0xa4, 0x0d, 0x74, + 0x60, 0x5d, 0x9e, 0x92, 0xaf, 0xe3, 0x14, 0x2b, 0x2a, 0x64, 0x28, 0xea, 0x5c, 0xc3, 0xf5, 0x14, + 0x4a, 0x6b, 0xda, 0x07, 0x88, 0xfd, 0x9a, 0x9e, 0xf0, 0x22, 0x42, 0x10, 0x37, 0x0d, 0x71, 0x0a, + 0xb5, 0xbf, 0x0e, 0x37, 0xfa, 0x42, 0x04, 0xda, 0x0d, 0xf5, 0xca, 0x1a, 0xff, 0xca, 0x81, 0xa1, + 0x82, 0x93, 0x11, 0x8b, 0x07, 0x60, 0x2a, 0x68, 0x28, 0x88, 0x88, 0x42, 0x5d, 0x5d, 0x27, 0x1b, + 0x96, 0x2b, 0x47, 0x6a, 0x01, 0xbd, 0x0b, 0x66, 0x8f, 0x07, 0x4e, 0x22, 0x97, 0xbc, 0x70, 0x90, + 0x93, 0x53, 0x11, 0x09, 0xd8, 0xb1, 0xa9, 0x6a, 0xa8, 0x16, 0x13, 0x11, 0x35, 0xf9, 0x44, 0xb5, + 0x4b, 0xdf, 0xc6, 0x66, 0xab, 0x00, 0xca, 0x80, 0xce, 0x26, 0x3b, 0x50, 0x4d, 0x37, 0xfc, 0x25, + 0x51, 0xf0, 0xa3, 0x61, 0x97, 0x72, 0xb5, 0xd9, 0x1a, 0x8e, 0x47, 0x5f, 0x14, 0xcb, 0xf9, 0x7a, + 0x41, 0x7f, 0x03, 0x37, 0xef, 0x41, 0x29, 0x69, 0xbd, 0x4e, 0x32, 0xb6, 0x94, 0xce, 0xc5, 0x19, + 0x7b, 0xbf, 0xf5, 0xf5, 0x9b, 0xbb, 0xb9, 0xbf, 0xbf, 0xb9, 0x9b, 0xfb, 0xe6, 0xcd, 0xdd, 0xdc, + 0x2f, 0x37, 0xb5, 0x7f, 0x1e, 0x53, 0xff, 0x2c, 0xba, 0xe0, 0xbf, 0x4f, 0xdd, 0x35, 0x75, 0xd4, + 0xbb, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xa0, 0xe6, 0x9e, 0x31, 0x9b, 0x1a, 0x00, 0x00, +} + +func (m *VirtualService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VirtualService) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VirtualService) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExportTo) > 0 { + for iNdEx := len(m.ExportTo) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExportTo[iNdEx]) + copy(dAtA[i:], m.ExportTo[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.ExportTo[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Tls) > 0 { + for iNdEx := len(m.Tls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Tcp) > 0 { + for iNdEx := len(m.Tcp) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tcp[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Http) > 0 { + for iNdEx := len(m.Http) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Http[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Gateways) > 0 { + for iNdEx := len(m.Gateways) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Gateways[iNdEx]) + copy(dAtA[i:], m.Gateways[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Gateways[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Destination) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Destination) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Destination) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Subset) > 0 { + i -= len(m.Subset) + copy(dAtA[i:], m.Subset) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Subset))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MirrorPercent != nil { + { + size, err := m.MirrorPercent.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.Headers != nil { + { + size, err := m.Headers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.AppendRequestHeaders) > 0 { + for k := range m.AppendRequestHeaders { + v := m.AppendRequestHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } + if len(m.RemoveRequestHeaders) > 0 { + for iNdEx := len(m.RemoveRequestHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemoveRequestHeaders[iNdEx]) + copy(dAtA[i:], m.RemoveRequestHeaders[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.RemoveRequestHeaders[iNdEx]))) + i-- + dAtA[i] = 0x72 + } + } + if len(m.AppendResponseHeaders) > 0 { + for k := range m.AppendResponseHeaders { + v := m.AppendResponseHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.RemoveResponseHeaders) > 0 { + for iNdEx := len(m.RemoveResponseHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemoveResponseHeaders[iNdEx]) + copy(dAtA[i:], m.RemoveResponseHeaders[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.RemoveResponseHeaders[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.AppendHeaders) > 0 { + for k := range m.AppendHeaders { + v := m.AppendHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.CorsPolicy != nil { + { + size, err := m.CorsPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Mirror != nil { + { + size, err := m.Mirror.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Fault != nil { + { + size, err := m.Fault.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Retries != nil { + { + size, err := m.Retries.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Timeout != nil { + { + size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.WebsocketUpgrade { + i-- + if m.WebsocketUpgrade { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Rewrite != nil { + { + size, err := m.Rewrite.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Redirect != nil { + { + size, err := m.Redirect.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Route) > 0 { + for iNdEx := len(m.Route) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Route[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Match) > 0 { + for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Match[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Headers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Headers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Headers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Headers_HeaderOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Headers_HeaderOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Headers_HeaderOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Remove) > 0 { + for iNdEx := len(m.Remove) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Remove[iNdEx]) + copy(dAtA[i:], m.Remove[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Remove[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Add) > 0 { + for k := range m.Add { + v := m.Add[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Set) > 0 { + for k := range m.Set { + v := m.Set[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TLSRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Route) > 0 { + for iNdEx := len(m.Route) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Route[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Match) > 0 { + for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Match[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TCPRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Route) > 0 { + for iNdEx := len(m.Route) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Route[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Match) > 0 { + for iNdEx := len(m.Match) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Match[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HTTPMatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPMatchRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPMatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x5a + } + if m.IgnoreUriCase { + i-- + if m.IgnoreUriCase { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if len(m.QueryParams) > 0 { + for k := range m.QueryParams { + v := m.QueryParams[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Gateways) > 0 { + for iNdEx := len(m.Gateways) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Gateways[iNdEx]) + copy(dAtA[i:], m.Gateways[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Gateways[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.SourceLabels) > 0 { + for k := range m.SourceLabels { + v := m.SourceLabels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if m.Port != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x30 + } + if len(m.Headers) > 0 { + for k := range m.Headers { + v := m.Headers[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Authority != nil { + { + size, err := m.Authority.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Method != nil { + { + size, err := m.Method.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Scheme != nil { + { + size, err := m.Scheme.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Uri != nil { + { + size, err := m.Uri.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPRouteDestination) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRouteDestination) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPRouteDestination) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Headers != nil { + { + size, err := m.Headers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.AppendRequestHeaders) > 0 { + for k := range m.AppendRequestHeaders { + v := m.AppendRequestHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RemoveRequestHeaders) > 0 { + for iNdEx := len(m.RemoveRequestHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemoveRequestHeaders[iNdEx]) + copy(dAtA[i:], m.RemoveRequestHeaders[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.RemoveRequestHeaders[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AppendResponseHeaders) > 0 { + for k := range m.AppendResponseHeaders { + v := m.AppendResponseHeaders[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.RemoveResponseHeaders) > 0 { + for iNdEx := len(m.RemoveResponseHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RemoveResponseHeaders[iNdEx]) + copy(dAtA[i:], m.RemoveResponseHeaders[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.RemoveResponseHeaders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.Weight != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x10 + } + if m.Destination != nil { + { + size, err := m.Destination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RouteDestination) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteDestination) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteDestination) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Weight != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x10 + } + if m.Destination != nil { + { + size, err := m.Destination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *L4MatchAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *L4MatchAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *L4MatchAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Gateways) > 0 { + for iNdEx := len(m.Gateways) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Gateways[iNdEx]) + copy(dAtA[i:], m.Gateways[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Gateways[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.SourceLabels) > 0 { + for k := range m.SourceLabels { + v := m.SourceLabels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.SourceSubnet) > 0 { + i -= len(m.SourceSubnet) + copy(dAtA[i:], m.SourceSubnet) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.SourceSubnet))) + i-- + dAtA[i] = 0x1a + } + if m.Port != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + } + if len(m.DestinationSubnets) > 0 { + for iNdEx := len(m.DestinationSubnets) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DestinationSubnets[iNdEx]) + copy(dAtA[i:], m.DestinationSubnets[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.DestinationSubnets[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TLSMatchAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSMatchAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSMatchAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Gateways) > 0 { + for iNdEx := len(m.Gateways) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Gateways[iNdEx]) + copy(dAtA[i:], m.Gateways[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Gateways[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.SourceLabels) > 0 { + for k := range m.SourceLabels { + v := m.SourceLabels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintVirtualService(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintVirtualService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintVirtualService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.SourceSubnet) > 0 { + i -= len(m.SourceSubnet) + copy(dAtA[i:], m.SourceSubnet) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.SourceSubnet))) + i-- + dAtA[i] = 0x22 + } + if m.Port != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + } + if len(m.DestinationSubnets) > 0 { + for iNdEx := len(m.DestinationSubnets) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DestinationSubnets[iNdEx]) + copy(dAtA[i:], m.DestinationSubnets[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.DestinationSubnets[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.SniHosts) > 0 { + for iNdEx := len(m.SniHosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SniHosts[iNdEx]) + copy(dAtA[i:], m.SniHosts[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.SniHosts[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HTTPRedirect) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRedirect) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPRedirect) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RedirectCode != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.RedirectCode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPRewrite) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRewrite) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPRewrite) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0x12 + } + if len(m.Uri) > 0 { + i -= len(m.Uri) + copy(dAtA[i:], m.Uri) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Uri))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StringMatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringMatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringMatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.MatchType != nil { + { + size := m.MatchType.Size() + i -= size + if _, err := m.MatchType.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *StringMatch_Exact) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *StringMatch_Exact) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Exact) + copy(dAtA[i:], m.Exact) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Exact))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *StringMatch_Prefix) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *StringMatch_Prefix) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *StringMatch_Regex) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *StringMatch_Regex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Regex) + copy(dAtA[i:], m.Regex) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Regex))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *HTTPRetry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPRetry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPRetry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RetryOn) > 0 { + i -= len(m.RetryOn) + copy(dAtA[i:], m.RetryOn) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.RetryOn))) + i-- + dAtA[i] = 0x1a + } + if m.PerTryTimeout != nil { + { + size, err := m.PerTryTimeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Attempts != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Attempts)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CorsPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CorsPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CorsPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.AllowCredentials != nil { + { + size, err := m.AllowCredentials.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.MaxAge != nil { + { + size, err := m.MaxAge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ExposeHeaders) > 0 { + for iNdEx := len(m.ExposeHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExposeHeaders[iNdEx]) + copy(dAtA[i:], m.ExposeHeaders[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.ExposeHeaders[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.AllowHeaders) > 0 { + for iNdEx := len(m.AllowHeaders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowHeaders[iNdEx]) + copy(dAtA[i:], m.AllowHeaders[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.AllowHeaders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AllowMethods) > 0 { + for iNdEx := len(m.AllowMethods) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowMethods[iNdEx]) + copy(dAtA[i:], m.AllowMethods[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.AllowMethods[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.AllowOrigin) > 0 { + for iNdEx := len(m.AllowOrigin) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowOrigin[iNdEx]) + copy(dAtA[i:], m.AllowOrigin[iNdEx]) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.AllowOrigin[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HTTPFaultInjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPFaultInjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPFaultInjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Abort != nil { + { + size, err := m.Abort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Delay != nil { + { + size, err := m.Delay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HTTPFaultInjection_Delay) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPFaultInjection_Delay) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPFaultInjection_Delay) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Percentage != nil { + { + size, err := m.Percentage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.HttpDelayType != nil { + { + size := m.HttpDelayType.Size() + i -= size + if _, err := m.HttpDelayType.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Percent != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Percent)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPFaultInjection_Delay_FixedDelay) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HTTPFaultInjection_Delay_FixedDelay) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FixedDelay != nil { + { + size, err := m.FixedDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *HTTPFaultInjection_Delay_ExponentialDelay) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HTTPFaultInjection_Delay_ExponentialDelay) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExponentialDelay != nil { + { + size, err := m.ExponentialDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *HTTPFaultInjection_Abort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPFaultInjection_Abort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPFaultInjection_Abort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Percentage != nil { + { + size, err := m.Percentage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintVirtualService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ErrorType != nil { + { + size := m.ErrorType.Size() + i -= size + if _, err := m.ErrorType.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Percent != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Percent)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HTTPFaultInjection_Abort_HttpStatus) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HTTPFaultInjection_Abort_HttpStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintVirtualService(dAtA, i, uint64(m.HttpStatus)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *HTTPFaultInjection_Abort_GrpcStatus) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HTTPFaultInjection_Abort_GrpcStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.GrpcStatus) + copy(dAtA[i:], m.GrpcStatus) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.GrpcStatus))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *HTTPFaultInjection_Abort_Http2Error) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HTTPFaultInjection_Abort_Http2Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Http2Error) + copy(dAtA[i:], m.Http2Error) + i = encodeVarintVirtualService(dAtA, i, uint64(len(m.Http2Error))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *PortSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Number != 0 { + i = encodeVarintVirtualService(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Percent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Percent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Percent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func encodeVarintVirtualService(dAtA []byte, offset int, v uint64) int { + offset -= sovVirtualService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *VirtualService) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Gateways) > 0 { + for _, s := range m.Gateways { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Http) > 0 { + for _, e := range m.Http { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Tcp) > 0 { + for _, e := range m.Tcp { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Tls) > 0 { + for _, e := range m.Tls { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.ExportTo) > 0 { + for _, s := range m.ExportTo { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Destination) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + l = len(m.Subset) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Match) > 0 { + for _, e := range m.Match { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Route) > 0 { + for _, e := range m.Route { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.Redirect != nil { + l = m.Redirect.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Rewrite != nil { + l = m.Rewrite.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.WebsocketUpgrade { + n += 2 + } + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Retries != nil { + l = m.Retries.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Fault != nil { + l = m.Fault.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Mirror != nil { + l = m.Mirror.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.CorsPolicy != nil { + l = m.CorsPolicy.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if len(m.AppendHeaders) > 0 { + for k, v := range m.AppendHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.RemoveResponseHeaders) > 0 { + for _, s := range m.RemoveResponseHeaders { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.AppendResponseHeaders) > 0 { + for k, v := range m.AppendResponseHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.RemoveRequestHeaders) > 0 { + for _, s := range m.RemoveRequestHeaders { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.AppendRequestHeaders) > 0 { + for k, v := range m.AppendRequestHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if m.Headers != nil { + l = m.Headers.Size() + n += 2 + l + sovVirtualService(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 2 + l + sovVirtualService(uint64(l)) + } + if m.MirrorPercent != nil { + l = m.MirrorPercent.Size() + n += 2 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Headers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Headers_HeaderOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Set) > 0 { + for k, v := range m.Set { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.Add) > 0 { + for k, v := range m.Add { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.Remove) > 0 { + for _, s := range m.Remove { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Match) > 0 { + for _, e := range m.Match { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Route) > 0 { + for _, e := range m.Route { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TCPRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Match) > 0 { + for _, e := range m.Match { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.Route) > 0 { + for _, e := range m.Route { + l = e.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPMatchRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Uri != nil { + l = m.Uri.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Scheme != nil { + l = m.Scheme.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Method != nil { + l = m.Method.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Authority != nil { + l = m.Authority.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if len(m.Headers) > 0 { + for k, v := range m.Headers { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVirtualService(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if m.Port != 0 { + n += 1 + sovVirtualService(uint64(m.Port)) + } + if len(m.SourceLabels) > 0 { + for k, v := range m.SourceLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.Gateways) > 0 { + for _, s := range m.Gateways { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.QueryParams) > 0 { + for k, v := range m.QueryParams { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovVirtualService(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + l + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if m.IgnoreUriCase { + n += 2 + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPRouteDestination) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Destination != nil { + l = m.Destination.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovVirtualService(uint64(m.Weight)) + } + if len(m.RemoveResponseHeaders) > 0 { + for _, s := range m.RemoveResponseHeaders { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.AppendResponseHeaders) > 0 { + for k, v := range m.AppendResponseHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.RemoveRequestHeaders) > 0 { + for _, s := range m.RemoveRequestHeaders { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.AppendRequestHeaders) > 0 { + for k, v := range m.AppendRequestHeaders { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if m.Headers != nil { + l = m.Headers.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RouteDestination) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Destination != nil { + l = m.Destination.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovVirtualService(uint64(m.Weight)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *L4MatchAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DestinationSubnets) > 0 { + for _, s := range m.DestinationSubnets { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.Port != 0 { + n += 1 + sovVirtualService(uint64(m.Port)) + } + l = len(m.SourceSubnet) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if len(m.SourceLabels) > 0 { + for k, v := range m.SourceLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.Gateways) > 0 { + for _, s := range m.Gateways { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSMatchAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SniHosts) > 0 { + for _, s := range m.SniHosts { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.DestinationSubnets) > 0 { + for _, s := range m.DestinationSubnets { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.Port != 0 { + n += 1 + sovVirtualService(uint64(m.Port)) + } + l = len(m.SourceSubnet) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if len(m.SourceLabels) > 0 { + for k, v := range m.SourceLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovVirtualService(uint64(len(k))) + 1 + len(v) + sovVirtualService(uint64(len(v))) + n += mapEntrySize + 1 + sovVirtualService(uint64(mapEntrySize)) + } + } + if len(m.Gateways) > 0 { + for _, s := range m.Gateways { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPRedirect) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.RedirectCode != 0 { + n += 1 + sovVirtualService(uint64(m.RedirectCode)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPRewrite) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uri) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringMatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MatchType != nil { + n += m.MatchType.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringMatch_Exact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Exact) + n += 1 + l + sovVirtualService(uint64(l)) + return n +} +func (m *StringMatch_Prefix) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovVirtualService(uint64(l)) + return n +} +func (m *StringMatch_Regex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Regex) + n += 1 + l + sovVirtualService(uint64(l)) + return n +} +func (m *HTTPRetry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Attempts != 0 { + n += 1 + sovVirtualService(uint64(m.Attempts)) + } + if m.PerTryTimeout != nil { + l = m.PerTryTimeout.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + l = len(m.RetryOn) + if l > 0 { + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CorsPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowOrigin) > 0 { + for _, s := range m.AllowOrigin { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.AllowMethods) > 0 { + for _, s := range m.AllowMethods { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.AllowHeaders) > 0 { + for _, s := range m.AllowHeaders { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if len(m.ExposeHeaders) > 0 { + for _, s := range m.ExposeHeaders { + l = len(s) + n += 1 + l + sovVirtualService(uint64(l)) + } + } + if m.MaxAge != nil { + l = m.MaxAge.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.AllowCredentials != nil { + l = m.AllowCredentials.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPFaultInjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Delay != nil { + l = m.Delay.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.Abort != nil { + l = m.Abort.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPFaultInjection_Delay) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Percent != 0 { + n += 1 + sovVirtualService(uint64(m.Percent)) + } + if m.HttpDelayType != nil { + n += m.HttpDelayType.Size() + } + if m.Percentage != nil { + l = m.Percentage.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPFaultInjection_Delay_FixedDelay) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FixedDelay != nil { + l = m.FixedDelay.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + return n +} +func (m *HTTPFaultInjection_Delay_ExponentialDelay) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExponentialDelay != nil { + l = m.ExponentialDelay.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + return n +} +func (m *HTTPFaultInjection_Abort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Percent != 0 { + n += 1 + sovVirtualService(uint64(m.Percent)) + } + if m.ErrorType != nil { + n += m.ErrorType.Size() + } + if m.Percentage != nil { + l = m.Percentage.Size() + n += 1 + l + sovVirtualService(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HTTPFaultInjection_Abort_HttpStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovVirtualService(uint64(m.HttpStatus)) + return n +} +func (m *HTTPFaultInjection_Abort_GrpcStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GrpcStatus) + n += 1 + l + sovVirtualService(uint64(l)) + return n +} +func (m *HTTPFaultInjection_Abort_Http2Error) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Http2Error) + n += 1 + l + sovVirtualService(uint64(l)) + return n +} +func (m *PortSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Number != 0 { + n += 1 + sovVirtualService(uint64(m.Number)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Percent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovVirtualService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozVirtualService(x uint64) (n int) { + return sovVirtualService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *VirtualService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VirtualService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VirtualService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateways", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateways = append(m.Gateways, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Http", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Http = append(m.Http, &HTTPRoute{}) + if err := m.Http[len(m.Http)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tcp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tcp = append(m.Tcp, &TCPRoute{}) + if err := m.Tcp[len(m.Tcp)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tls = append(m.Tls, &TLSRoute{}) + if err := m.Tls[len(m.Tls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportTo = append(m.ExportTo, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Destination) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Destination: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Destination: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subset", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subset = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &PortSelector{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Match = append(m.Match, &HTTPMatchRequest{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Route", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Route = append(m.Route, &HTTPRouteDestination{}) + if err := m.Route[len(m.Route)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Redirect", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Redirect == nil { + m.Redirect = &HTTPRedirect{} + } + if err := m.Redirect.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rewrite", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rewrite == nil { + m.Rewrite = &HTTPRewrite{} + } + if err := m.Rewrite.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WebsocketUpgrade", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WebsocketUpgrade = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &types.Duration{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Retries == nil { + m.Retries = &HTTPRetry{} + } + if err := m.Retries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fault", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fault == nil { + m.Fault = &HTTPFaultInjection{} + } + if err := m.Fault.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mirror", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mirror == nil { + m.Mirror = &Destination{} + } + if err := m.Mirror.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CorsPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CorsPolicy == nil { + m.CorsPolicy = &CorsPolicy{} + } + if err := m.CorsPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppendHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppendHeaders == nil { + m.AppendHeaders = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AppendHeaders[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveResponseHeaders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemoveResponseHeaders = append(m.RemoveResponseHeaders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppendResponseHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppendResponseHeaders == nil { + m.AppendResponseHeaders = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AppendResponseHeaders[mapkey] = mapvalue + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveRequestHeaders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemoveRequestHeaders = append(m.RemoveRequestHeaders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppendRequestHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppendRequestHeaders == nil { + m.AppendRequestHeaders = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AppendRequestHeaders[mapkey] = mapvalue + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = &Headers{} + } + if err := m.Headers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MirrorPercent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MirrorPercent == nil { + m.MirrorPercent = &types.UInt32Value{} + } + if err := m.MirrorPercent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Headers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Headers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Headers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &Headers_HeaderOperations{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &Headers_HeaderOperations{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Headers_HeaderOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeaderOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeaderOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Set == nil { + m.Set = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Set[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Add == nil { + m.Add = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Add[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remove = append(m.Remove, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Match = append(m.Match, &TLSMatchAttributes{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Route", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Route = append(m.Route, &RouteDestination{}) + if err := m.Route[len(m.Route)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Match = append(m.Match, &L4MatchAttributes{}) + if err := m.Match[len(m.Match)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Route", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Route = append(m.Route, &RouteDestination{}) + if err := m.Route[len(m.Route)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPMatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPMatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPMatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Uri == nil { + m.Uri = &StringMatch{} + } + if err := m.Uri.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheme == nil { + m.Scheme = &StringMatch{} + } + if err := m.Scheme.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Method == nil { + m.Method = &StringMatch{} + } + if err := m.Method.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Authority == nil { + m.Authority = &StringMatch{} + } + if err := m.Authority.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]*StringMatch) + } + var mapkey string + var mapvalue *StringMatch + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVirtualService + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StringMatch{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Headers[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceLabels == nil { + m.SourceLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SourceLabels[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateways", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateways = append(m.Gateways, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QueryParams == nil { + m.QueryParams = make(map[string]*StringMatch) + } + var mapkey string + var mapvalue *StringMatch + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthVirtualService + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StringMatch{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.QueryParams[mapkey] = mapvalue + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreUriCase", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreUriCase = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPRouteDestination) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPRouteDestination: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPRouteDestination: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Destination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Destination == nil { + m.Destination = &Destination{} + } + if err := m.Destination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveResponseHeaders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemoveResponseHeaders = append(m.RemoveResponseHeaders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppendResponseHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppendResponseHeaders == nil { + m.AppendResponseHeaders = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AppendResponseHeaders[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveRequestHeaders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemoveRequestHeaders = append(m.RemoveRequestHeaders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppendRequestHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppendRequestHeaders == nil { + m.AppendRequestHeaders = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AppendRequestHeaders[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = &Headers{} + } + if err := m.Headers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteDestination) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteDestination: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteDestination: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Destination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Destination == nil { + m.Destination = &Destination{} + } + if err := m.Destination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *L4MatchAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: L4MatchAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: L4MatchAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationSubnets", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationSubnets = append(m.DestinationSubnets, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSubnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceSubnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceLabels == nil { + m.SourceLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SourceLabels[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateways", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateways = append(m.Gateways, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSMatchAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSMatchAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSMatchAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SniHosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SniHosts = append(m.SniHosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationSubnets", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationSubnets = append(m.DestinationSubnets, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSubnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceSubnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceLabels == nil { + m.SourceLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthVirtualService + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SourceLabels[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateways", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateways = append(m.Gateways, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPRedirect) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPRedirect: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPRedirect: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uri = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectCode", wireType) + } + m.RedirectCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RedirectCode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPRewrite) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPRewrite: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPRewrite: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uri = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringMatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringMatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringMatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchType = &StringMatch_Exact{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchType = &StringMatch_Prefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchType = &StringMatch_Regex{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPRetry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPRetry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPRetry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attempts", wireType) + } + m.Attempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Attempts |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PerTryTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PerTryTimeout == nil { + m.PerTryTimeout = &types.Duration{} + } + if err := m.PerTryTimeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryOn", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RetryOn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CorsPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CorsPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CorsPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowOrigin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowOrigin = append(m.AllowOrigin, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowMethods", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowMethods = append(m.AllowMethods, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHeaders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowHeaders = append(m.AllowHeaders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExposeHeaders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExposeHeaders = append(m.ExposeHeaders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxAge == nil { + m.MaxAge = &types.Duration{} + } + if err := m.MaxAge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowCredentials", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllowCredentials == nil { + m.AllowCredentials = &types.BoolValue{} + } + if err := m.AllowCredentials.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPFaultInjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPFaultInjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPFaultInjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delay == nil { + m.Delay = &HTTPFaultInjection_Delay{} + } + if err := m.Delay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Abort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Abort == nil { + m.Abort = &HTTPFaultInjection_Abort{} + } + if err := m.Abort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPFaultInjection_Delay) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Delay: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Delay: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Percent", wireType) + } + m.Percent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Percent |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FixedDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &types.Duration{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.HttpDelayType = &HTTPFaultInjection_Delay_FixedDelay{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExponentialDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &types.Duration{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.HttpDelayType = &HTTPFaultInjection_Delay_ExponentialDelay{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Percentage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Percentage == nil { + m.Percentage = &Percent{} + } + if err := m.Percentage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPFaultInjection_Abort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Abort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Abort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Percent", wireType) + } + m.Percent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Percent |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpStatus", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ErrorType = &HTTPFaultInjection_Abort_HttpStatus{v} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GrpcStatus", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorType = &HTTPFaultInjection_Abort_GrpcStatus{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Http2Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ErrorType = &HTTPFaultInjection_Abort_Http2Error{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Percentage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthVirtualService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthVirtualService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Percentage == nil { + m.Percentage = &Percent{} + } + if err := m.Percentage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Percent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVirtualService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Percent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Percent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipVirtualService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthVirtualService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVirtualService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVirtualService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVirtualService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVirtualService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthVirtualService + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthVirtualService + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVirtualService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipVirtualService(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthVirtualService + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthVirtualService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVirtualService = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/api/networking/v1alpha3/virtual_service_deepcopy.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/virtual_service_deepcopy.gen.go new file mode 100644 index 0000000000..9f4e61efab --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/virtual_service_deepcopy.gen.go @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/virtual_service.proto + +// Configuration affecting traffic routing. Here are a few terms useful to define +// in the context of traffic routing. +// +// `Service` a unit of application behavior bound to a unique name in a +// service registry. Services consist of multiple network *endpoints* +// implemented by workload instances running on pods, containers, VMs etc. +// +// `Service versions (a.k.a. subsets)` - In a continuous deployment +// scenario, for a given service, there can be distinct subsets of +// instances running different variants of the application binary. These +// variants are not necessarily different API versions. They could be +// iterative changes to the same service, deployed in different +// environments (prod, staging, dev, etc.). Common scenarios where this +// occurs include A/B testing, canary rollouts, etc. The choice of a +// particular version can be decided based on various criterion (headers, +// url, etc.) and/or by weights assigned to each version. Each service has +// a default version consisting of all its instances. +// +// `Source` - A downstream client calling a service. +// +// `Host` - The address used by a client when attempting to connect to a +// service. +// +// `Access model` - Applications address only the destination service +// (Host) without knowledge of individual service versions (subsets). The +// actual choice of the version is determined by the proxy/sidecar, enabling the +// application code to decouple itself from the evolution of dependent +// services. +// +// A `VirtualService` defines a set of traffic routing rules to apply when a host is +// addressed. Each routing rule defines matching criteria for traffic of a specific +// protocol. If the traffic is matched, then it is sent to a named destination service +// (or subset/version of it) defined in the registry. +// +// The source of traffic can also be matched in a routing rule. This allows routing +// to be customized for specific client contexts. +// +// The following example on Kubernetes, routes all HTTP traffic by default to +// pods of the reviews service with label "version: v1". In addition, +// HTTP requests with path starting with /wpcatalog/ or /consumercatalog/ will +// be rewritten to /newcatalog and sent to pods with label "version: v2". +// +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// http: +// - name: "reviews-v2-routes" +// match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v2 +// - name: "reviews-v1-route" +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v1 +// ``` +// +// A subset/version of a route destination is identified with a reference +// to a named service subset which must be declared in a corresponding +// `DestinationRule`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews.prod.svc.cluster.local +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// ``` +// + +package v1alpha3 + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// DeepCopyInto supports using VirtualService within kubernetes types, where deepcopy-gen is used. +func (in *VirtualService) DeepCopyInto(out *VirtualService) { + p := proto.Clone(in).(*VirtualService) + *out = *p +} diff --git a/test/vendor/istio.io/api/networking/v1alpha3/virtual_service_json.gen.go b/test/vendor/istio.io/api/networking/v1alpha3/virtual_service_json.gen.go new file mode 100644 index 0000000000..00435482dc --- /dev/null +++ b/test/vendor/istio.io/api/networking/v1alpha3/virtual_service_json.gen.go @@ -0,0 +1,358 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networking/v1alpha3/virtual_service.proto + +// Configuration affecting traffic routing. Here are a few terms useful to define +// in the context of traffic routing. +// +// `Service` a unit of application behavior bound to a unique name in a +// service registry. Services consist of multiple network *endpoints* +// implemented by workload instances running on pods, containers, VMs etc. +// +// `Service versions (a.k.a. subsets)` - In a continuous deployment +// scenario, for a given service, there can be distinct subsets of +// instances running different variants of the application binary. These +// variants are not necessarily different API versions. They could be +// iterative changes to the same service, deployed in different +// environments (prod, staging, dev, etc.). Common scenarios where this +// occurs include A/B testing, canary rollouts, etc. The choice of a +// particular version can be decided based on various criterion (headers, +// url, etc.) and/or by weights assigned to each version. Each service has +// a default version consisting of all its instances. +// +// `Source` - A downstream client calling a service. +// +// `Host` - The address used by a client when attempting to connect to a +// service. +// +// `Access model` - Applications address only the destination service +// (Host) without knowledge of individual service versions (subsets). The +// actual choice of the version is determined by the proxy/sidecar, enabling the +// application code to decouple itself from the evolution of dependent +// services. +// +// A `VirtualService` defines a set of traffic routing rules to apply when a host is +// addressed. Each routing rule defines matching criteria for traffic of a specific +// protocol. If the traffic is matched, then it is sent to a named destination service +// (or subset/version of it) defined in the registry. +// +// The source of traffic can also be matched in a routing rule. This allows routing +// to be customized for specific client contexts. +// +// The following example on Kubernetes, routes all HTTP traffic by default to +// pods of the reviews service with label "version: v1". In addition, +// HTTP requests with path starting with /wpcatalog/ or /consumercatalog/ will +// be rewritten to /newcatalog and sent to pods with label "version: v2". +// +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews.prod.svc.cluster.local +// http: +// - name: "reviews-v2-routes" +// match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v2 +// - name: "reviews-v1-route" +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// subset: v1 +// ``` +// +// A subset/version of a route destination is identified with a reference +// to a named service subset which must be declared in a corresponding +// `DestinationRule`. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews.prod.svc.cluster.local +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// ``` +// + +package v1alpha3 + +import ( + bytes "bytes" + fmt "fmt" + github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + _ "istio.io/gogo-genproto/googleapis/google/api" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// MarshalJSON is a custom marshaler for VirtualService +func (this *VirtualService) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for VirtualService +func (this *VirtualService) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Destination +func (this *Destination) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Destination +func (this *Destination) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPRoute +func (this *HTTPRoute) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPRoute +func (this *HTTPRoute) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Headers +func (this *Headers) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Headers +func (this *Headers) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Headers_HeaderOperations +func (this *Headers_HeaderOperations) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Headers_HeaderOperations +func (this *Headers_HeaderOperations) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for TLSRoute +func (this *TLSRoute) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for TLSRoute +func (this *TLSRoute) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for TCPRoute +func (this *TCPRoute) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for TCPRoute +func (this *TCPRoute) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPMatchRequest +func (this *HTTPMatchRequest) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPMatchRequest +func (this *HTTPMatchRequest) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPRouteDestination +func (this *HTTPRouteDestination) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPRouteDestination +func (this *HTTPRouteDestination) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for RouteDestination +func (this *RouteDestination) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for RouteDestination +func (this *RouteDestination) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for L4MatchAttributes +func (this *L4MatchAttributes) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for L4MatchAttributes +func (this *L4MatchAttributes) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for TLSMatchAttributes +func (this *TLSMatchAttributes) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for TLSMatchAttributes +func (this *TLSMatchAttributes) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPRedirect +func (this *HTTPRedirect) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPRedirect +func (this *HTTPRedirect) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPRewrite +func (this *HTTPRewrite) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPRewrite +func (this *HTTPRewrite) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for StringMatch +func (this *StringMatch) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for StringMatch +func (this *StringMatch) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPRetry +func (this *HTTPRetry) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPRetry +func (this *HTTPRetry) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for CorsPolicy +func (this *CorsPolicy) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for CorsPolicy +func (this *CorsPolicy) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPFaultInjection +func (this *HTTPFaultInjection) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPFaultInjection +func (this *HTTPFaultInjection) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPFaultInjection_Delay +func (this *HTTPFaultInjection_Delay) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPFaultInjection_Delay +func (this *HTTPFaultInjection_Delay) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for HTTPFaultInjection_Abort +func (this *HTTPFaultInjection_Abort) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for HTTPFaultInjection_Abort +func (this *HTTPFaultInjection_Abort) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for PortSelector +func (this *PortSelector) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for PortSelector +func (this *PortSelector) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +// MarshalJSON is a custom marshaler for Percent +func (this *Percent) MarshalJSON() ([]byte, error) { + str, err := VirtualServiceMarshaler.MarshalToString(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for Percent +func (this *Percent) UnmarshalJSON(b []byte) error { + return VirtualServiceUnmarshaler.Unmarshal(bytes.NewReader(b), this) +} + +var ( + VirtualServiceMarshaler = &github_com_gogo_protobuf_jsonpb.Marshaler{} + VirtualServiceUnmarshaler = &github_com_gogo_protobuf_jsonpb.Unmarshaler{} +) diff --git a/test/vendor/istio.io/client-go/LICENSE b/test/vendor/istio.io/client-go/LICENSE new file mode 100644 index 0000000000..139182e271 --- /dev/null +++ b/test/vendor/istio.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/common/config/license-lint.yml b/test/vendor/istio.io/client-go/common/config/license-lint.yml new file mode 100644 index 0000000000..fbdf259624 --- /dev/null +++ b/test/vendor/istio.io/client-go/common/config/license-lint.yml @@ -0,0 +1,141 @@ +unrestricted_licenses: + - Apache-2.0 + - ISC + - AFL-2.1 + - AFL-3.0 + - Artistic-1.0 + - Artistic-2.0 + - Apache-1.1 + - BSD-1-Clause + - BSD-2-Clause + - BSD-3-Clause + - FTL + - LPL-1.02 + - MS-PL + - MIT + - NCSA + - OpenSSL + - PHP-3.0 + - TCP-wrappers + - W3C + - Xnet + - Zlib + +reciprocal_licenses: + - CC0-1.0 + - APSL-2.0 + - CDDL-1.0 + - CDDL-1.1 + - CPL-1.0 + - EPL-1.0 + - IPL-1.0 + - MPL-1.0 + - MPL-1.1 + - MPL-2.0 + - Ruby + +restricted_licenses: + - GPL-1.0-only + - GPL-1.0-or-later + - GPL-2.0-only + - GPL-2.0-or-later + - GPL-3.0-only + - GPL-3.0-or-later + - LGPL-2.0-only + - LGPL-2.0-or-later + - LGPL-2.1-only + - LGPL-2.1-or-later + - LGPL-3.0-only + - LGPL-3.0-or-later + - NPL-1.0 + - NPL-1.1 + - OSL-1.0 + - OSL-1.1 + - OSL-2.0 + - OSL-2.1 + - OSL-3.0 + - QPL-1.0 + - Sleepycat + +whitelisted_modules: + - bitbucket.org/ww/goautoneg + - git.apache.org/thrift.git + - github.com/alicebob/gopher-json + - github.com/antlr/antlr4 + - github.com/apache/thrift + - github.com/bazelbuild/buildtools + - github.com/bgentry/speakeasy + - github.com/bmizerany/assert + - github.com/BurntSushi/xgb + - github.com/DATA-DOG/go-sqlmock + - github.com/daviddengcn/go-colortext + - github.com/dchest/siphash + - github.com/dnaeon/go-vcr + - github.com/docker/docker + - github.com/duosecurity/duo_api_golang + - github.com/dustin/go-humanize + - github.com/facebookgo/stack + - github.com/facebookgo/stackerr + - github.com/ghodss/yaml + - github.com/globalsign/mgo + - github.com/gogo/protobuf + - github.com/google/cadvisor + - github.com/google/pprof + - github.com/gophercloud/gophercloud + - github.com/gotestyourself/gotestyourself + - github.com/hashicorp/consul + - github.com/hashicorp/serf + - github.com/hashicorp/vault + - github.com/heketi/heketi + - github.com/heketi/utils + - github.com/inconshreveable/mousetrap + - github.com/JeffAshton/win_pdh + - github.com/jmespath/go-jmespath + - github.com/jteeuwen/go-bindata + - github.com/juju/errors + - github.com/juju/loggo + - github.com/juju/testing + - github.com/julienschmidt/httprouter + - github.com/koneu/natend + - github.com/kr/logfmt + - github.com/libopenstorage/openstorage + - github.com/logrusorgru/aurora + - github.com/magiconair/properties + - github.com/Masterminds/semver + - github.com/Masterminds/sprig + - github.com/mesos/mesos-go + - github.com/miekg/dns + - github.com/munnerz/goautoneg + - github.com/Nvveen/Gotty + - github.com/NYTimes/gziphandler + - github.com/opencontainers/runc + - github.com/openshift/origin + - github.com/pascaldekloe/goe + - github.com/pmezard/go-difflib + - github.com/projectcalico/go-yaml + - github.com/projectcalico/go-yaml-wrapper + - github.com/rcrowley/go-metrics + - github.com/russross/blackfriday + - github.com/russross/blackfriday/v2 + - github.com/sean-/seed + - github.com/signalfx/com_signalfx_metrics_protobuf + - github.com/smartystreets/assertions + - github.com/smartystreets/goconvey + - github.com/storageos/go-api + - github.com/technosophos/moniker + - github.com/ulikunitz/xz + - github.com/xeipuuv/gojsonpointer + - github.com/xeipuuv/gojsonreference + - github.com/xi2/xz + - github.com/ziutek/mymysql + - gopkg.in/check.v1 + - gopkg.in/mgo.v2 + - gopkg.in/tomb.v1 + - gopkg.in/yaml.v1 + - gopkg.in/yaml.v3 + - gotest.tools + - istio.io/tools + - k8s.io/helm + - k8s.io/kubernetes + - modernc.org/cc + - sigs.k8s.io/yaml diff --git a/test/vendor/istio.io/client-go/licenses/cloud.google.com/go/LICENSE b/test/vendor/istio.io/client-go/licenses/cloud.google.com/go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/COPYING b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/NYTimes/gziphandler/LICENSE.md b/test/vendor/istio.io/client-go/licenses/github.com/NYTimes/gziphandler/LICENSE.md new file mode 100644 index 0000000000..b7e2ecb63f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/NYTimes/gziphandler/LICENSE.md @@ -0,0 +1,13 @@ +Copyright (c) 2015 The New York Times Company + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this library except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/purell/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/purell/LICENSE new file mode 100644 index 0000000000..4b9986dea7 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/purell/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/urlesc/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/urlesc/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/PuerkitoBio/urlesc/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/client9/misspell/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/client9/misspell/LICENSE new file mode 100644 index 0000000000..423e1f9e0f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/client9/misspell/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Nick Galbreath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/test/vendor/istio.io/client-go/licenses/github.com/davecgh/go-spew/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000000..bc52e96f2b --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/docopt/docopt-go/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/docopt/docopt-go/LICENSE new file mode 100644 index 0000000000..5e51f73e79 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/docopt/docopt-go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Keith Batten +Copyright (c) 2016 David Irvine + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/emicklei/go-restful/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/emicklei/go-restful/LICENSE new file mode 100644 index 0000000000..ece7ec61ef --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/emicklei/go-restful/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/test/vendor/istio.io/client-go/licenses/github.com/evanphx/json-patch/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 0000000000..0eb9b72d84 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/ghodss/yaml/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/ghodss/yaml/LICENSE new file mode 100644 index 0000000000..7805d36de7 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/go-logr/logr/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/LICENSE rename to test/vendor/istio.io/client-go/licenses/github.com/go-logr/logr/LICENSE diff --git a/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonpointer/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonpointer/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonpointer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonreference/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/spec/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/spec/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/swag/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/swag/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/gogo/protobuf/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000000..f57de90da8 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/istio.io/client-go/licenses/github.com/golang/glog/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/golang/glog/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/golang/groupcache/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/golang/groupcache/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/golang/mock/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/golang/mock/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/golang/mock/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/golang/protobuf/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000000..0f646931a4 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/istio.io/client-go/licenses/github.com/google/btree/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/google/btree/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/google/go-cmp/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/google/go-cmp/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/google/gofuzz/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/google/gofuzz/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/googleapis/gnostic/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 0000000000..6b0b1270ff --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/test/vendor/istio.io/client-go/licenses/github.com/gregjones/httpcache/LICENSE.txt b/test/vendor/istio.io/client-go/licenses/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 0000000000..81316beb0c --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/test/vendor/istio.io/client-go/licenses/github.com/hashicorp/golang-lru/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000000..be2cc4dfb6 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/json-iterator/go/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/json-iterator/go/LICENSE new file mode 100644 index 0000000000..2cf4f5ab28 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/kisielk/errcheck/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/kisielk/errcheck/LICENSE new file mode 100644 index 0000000000..a2b16b5bd9 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/kisielk/errcheck/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/kisielk/gotool/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/kisielk/gotool/LICENSE new file mode 100644 index 0000000000..1cbf651e2f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/kisielk/gotool/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/kr/pretty/License b/test/vendor/istio.io/client-go/licenses/github.com/kr/pretty/License new file mode 100644 index 0000000000..05c783ccf6 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/kr/pretty/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/kr/pty/License b/test/vendor/istio.io/client-go/licenses/github.com/kr/pty/License new file mode 100644 index 0000000000..6b7558b6b4 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/kr/pty/License @@ -0,0 +1,23 @@ +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/kr/text/License b/test/vendor/istio.io/client-go/licenses/github.com/kr/text/License new file mode 100644 index 0000000000..480a328059 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/mailru/easyjson/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/mailru/easyjson/LICENSE new file mode 100644 index 0000000000..fbff658f70 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/modern-go/concurrent/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/modern-go/concurrent/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/modern-go/concurrent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/modern-go/reflect2/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/modern-go/reflect2/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/modern-go/reflect2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/LICENSE new file mode 100644 index 0000000000..9415ee72c1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE new file mode 100644 index 0000000000..65dc692b6b --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/onsi/gomega/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/onsi/gomega/LICENSE new file mode 100644 index 0000000000..9415ee72c1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/onsi/gomega/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/peterbourgon/diskv/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 0000000000..41ce7f16e1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/pkg/errors/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000000..835ba3e755 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/pmezard/go-difflib/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 0000000000..c67dad612a --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/spf13/pflag/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/spf13/pflag/LICENSE new file mode 100644 index 0000000000..63ed1cfea1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/stretchr/objx/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/stretchr/objx/LICENSE new file mode 100644 index 0000000000..44d4d9d5a7 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/github.com/stretchr/testify/LICENSE b/test/vendor/istio.io/client-go/licenses/github.com/stretchr/testify/LICENSE new file mode 100644 index 0000000000..f38ec5956b --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/github.com/stretchr/testify/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/crypto/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/exp/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/exp/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/lint/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/lint/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/net/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/net/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/oauth2/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/sync/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/sys/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/sys/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/text/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/text/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/time/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/time/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/tools/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/golang.org/x/tools/cmd/getgo/LICENSE b/test/vendor/istio.io/client-go/licenses/golang.org/x/tools/cmd/getgo/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/golang.org/x/tools/cmd/getgo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/google.golang.org/appengine/LICENSE b/test/vendor/istio.io/client-go/licenses/google.golang.org/appengine/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/google.golang.org/genproto/LICENSE b/test/vendor/istio.io/client-go/licenses/google.golang.org/genproto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/google.golang.org/grpc/LICENSE b/test/vendor/istio.io/client-go/licenses/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/gopkg.in/check.v1/LICENSE b/test/vendor/istio.io/client-go/licenses/gopkg.in/check.v1/LICENSE new file mode 100644 index 0000000000..545cf2d331 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/gopkg.in/check.v1/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/gopkg.in/inf.v0/LICENSE b/test/vendor/istio.io/client-go/licenses/gopkg.in/inf.v0/LICENSE new file mode 100644 index 0000000000..87a5cede33 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/gopkg.in/inf.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/gopkg.in/yaml.v2/LICENSE b/test/vendor/istio.io/client-go/licenses/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/LICENSE b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/LICENSE new file mode 100644 index 0000000000..dfd0314546 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/gcsizes/LICENSE b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/gcsizes/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/gcsizes/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/lint/LICENSE b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/lint/LICENSE new file mode 100644 index 0000000000..796130a123 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/lint/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/ssa/LICENSE b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/ssa/LICENSE new file mode 100644 index 0000000000..aee48041e1 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/honnef.co/go/tools/ssa/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/licenses/istio.io/api/LICENSE b/test/vendor/istio.io/client-go/licenses/istio.io/api/LICENSE new file mode 100644 index 0000000000..139182e271 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/istio.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/istio.io/gogo-genproto/LICENSE b/test/vendor/istio.io/client-go/licenses/istio.io/gogo-genproto/LICENSE new file mode 100644 index 0000000000..139182e271 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/istio.io/gogo-genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/k8s.io/api/LICENSE b/test/vendor/istio.io/client-go/licenses/k8s.io/api/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/k8s.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/k8s.io/apimachinery/LICENSE b/test/vendor/istio.io/client-go/licenses/k8s.io/apimachinery/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/k8s.io/client-go/LICENSE b/test/vendor/istio.io/client-go/licenses/k8s.io/client-go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/k8s.io/gengo/LICENSE b/test/vendor/istio.io/client-go/licenses/k8s.io/gengo/LICENSE new file mode 100644 index 0000000000..00b2401109 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/k8s.io/gengo/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/k8s.io/klog/LICENSE b/test/vendor/istio.io/client-go/licenses/k8s.io/klog/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/k8s.io/klog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/k8s.io/kube-openapi/LICENSE b/test/vendor/istio.io/client-go/licenses/k8s.io/kube-openapi/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/sigs.k8s.io/structured-merge-diff/LICENSE b/test/vendor/istio.io/client-go/licenses/sigs.k8s.io/structured-merge-diff/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/sigs.k8s.io/structured-merge-diff/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/client-go/licenses/sigs.k8s.io/yaml/LICENSE b/test/vendor/istio.io/client-go/licenses/sigs.k8s.io/yaml/LICENSE new file mode 100644 index 0000000000..7805d36de7 --- /dev/null +++ b/test/vendor/istio.io/client-go/licenses/sigs.k8s.io/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/doc.go b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/doc.go new file mode 100644 index 0000000000..a347b27ff9 --- /dev/null +++ b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/doc.go @@ -0,0 +1,21 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by kubetype-gen. DO NOT EDIT. + +// Package has auto-generated kube type wrappers for raw types. +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +groupName=networking.istio.io +package v1alpha3 diff --git a/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/register.gen.go b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/register.gen.go new file mode 100644 index 0000000000..43324097b2 --- /dev/null +++ b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/register.gen.go @@ -0,0 +1,59 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by kubetype-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // Package-wide variables from generator "register". + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +const ( + // Package-wide consts from generator "register". + GroupName = "networking.istio.io" +) + +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DestinationRule{}, + &DestinationRuleList{}, + &EnvoyFilter{}, + &EnvoyFilterList{}, + &Gateway{}, + &GatewayList{}, + &ServiceEntry{}, + &ServiceEntryList{}, + &Sidecar{}, + &SidecarList{}, + &VirtualService{}, + &VirtualServiceList{}, + ) + v1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/types.gen.go b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/types.gen.go new file mode 100644 index 0000000000..cfe30a81a2 --- /dev/null +++ b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/types.gen.go @@ -0,0 +1,214 @@ +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by kubetype-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + networkingv1alpha3 "istio.io/api/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DestinationRule defines policies that apply to traffic intended for a service +// after routing has occurred. +// +// +type DestinationRule struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the implementation of this definition. + // +optional + Spec networkingv1alpha3.DestinationRule `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DestinationRuleList is a collection of DestinationRules. +type DestinationRuleList struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []DestinationRule `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EnvoyFilter provides a mechanism to customize the Envoy configuration +// generated by Istio Pilot. +// +// +type EnvoyFilter struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the implementation of this definition. + // +optional + Spec networkingv1alpha3.EnvoyFilter `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EnvoyFilterList is a collection of EnvoyFilters. +type EnvoyFilterList struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []EnvoyFilter `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Gateway describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. +// +// +type Gateway struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the implementation of this definition. + // +optional + Spec networkingv1alpha3.Gateway `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GatewayList is a collection of Gateways. +type GatewayList struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []Gateway `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceEntry enables adding additional entries into Istio's internal +// service registry. +// +// +type ServiceEntry struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the implementation of this definition. + // +optional + Spec networkingv1alpha3.ServiceEntry `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceEntryList is a collection of ServiceEntries. +type ServiceEntryList struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []ServiceEntry `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// `Sidecar` describes the configuration of the sidecar proxy that mediates +// inbound and outbound communication of the workload instance to which it is +// attached. +// +// +type Sidecar struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the implementation of this definition. + // +optional + Spec networkingv1alpha3.Sidecar `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SidecarList is a collection of Sidecars. +type SidecarList struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []Sidecar `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// please upgrade the proto package +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Configuration affecting traffic routing. +// +// +type VirtualService struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the implementation of this definition. + // +optional + Spec networkingv1alpha3.VirtualService `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VirtualServiceList is a collection of VirtualServices. +type VirtualServiceList struct { + v1.TypeMeta `json:",inline"` + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []VirtualService `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/zz_generated.deepcopy.gen.go b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/zz_generated.deepcopy.gen.go new file mode 100644 index 0000000000..b2a789d5a3 --- /dev/null +++ b/test/vendor/istio.io/client-go/pkg/apis/networking/v1alpha3/zz_generated.deepcopy.gen.go @@ -0,0 +1,383 @@ +// +build !ignore_autogenerated + +// Copyright Istio Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRule) DeepCopyInto(out *DestinationRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRule. +func (in *DestinationRule) DeepCopy() *DestinationRule { + if in == nil { + return nil + } + out := new(DestinationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DestinationRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRuleList) DeepCopyInto(out *DestinationRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DestinationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleList. +func (in *DestinationRuleList) DeepCopy() *DestinationRuleList { + if in == nil { + return nil + } + out := new(DestinationRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DestinationRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvoyFilter) DeepCopyInto(out *EnvoyFilter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyFilter. +func (in *EnvoyFilter) DeepCopy() *EnvoyFilter { + if in == nil { + return nil + } + out := new(EnvoyFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvoyFilter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvoyFilterList) DeepCopyInto(out *EnvoyFilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EnvoyFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyFilterList. +func (in *EnvoyFilterList) DeepCopy() *EnvoyFilterList { + if in == nil { + return nil + } + out := new(EnvoyFilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EnvoyFilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceEntry) DeepCopyInto(out *ServiceEntry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceEntry. +func (in *ServiceEntry) DeepCopy() *ServiceEntry { + if in == nil { + return nil + } + out := new(ServiceEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceEntry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceEntryList) DeepCopyInto(out *ServiceEntryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceEntryList. +func (in *ServiceEntryList) DeepCopy() *ServiceEntryList { + if in == nil { + return nil + } + out := new(ServiceEntryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceEntryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. +func (in *Sidecar) DeepCopy() *Sidecar { + if in == nil { + return nil + } + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Sidecar) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SidecarList) DeepCopyInto(out *SidecarList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Sidecar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarList. +func (in *SidecarList) DeepCopy() *SidecarList { + if in == nil { + return nil + } + out := new(SidecarList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SidecarList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualService) DeepCopyInto(out *VirtualService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService. +func (in *VirtualService) DeepCopy() *VirtualService { + if in == nil { + return nil + } + out := new(VirtualService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceList) DeepCopyInto(out *VirtualServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceList. +func (in *VirtualServiceList) DeepCopy() *VirtualServiceList { + if in == nil { + return nil + } + out := new(VirtualServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/test/vendor/istio.io/gogo-genproto/LICENSE b/test/vendor/istio.io/gogo-genproto/LICENSE new file mode 100644 index 0000000000..139182e271 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/common/config/license-lint.yml b/test/vendor/istio.io/gogo-genproto/common/config/license-lint.yml new file mode 100644 index 0000000000..fbdf259624 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/common/config/license-lint.yml @@ -0,0 +1,141 @@ +unrestricted_licenses: + - Apache-2.0 + - ISC + - AFL-2.1 + - AFL-3.0 + - Artistic-1.0 + - Artistic-2.0 + - Apache-1.1 + - BSD-1-Clause + - BSD-2-Clause + - BSD-3-Clause + - FTL + - LPL-1.02 + - MS-PL + - MIT + - NCSA + - OpenSSL + - PHP-3.0 + - TCP-wrappers + - W3C + - Xnet + - Zlib + +reciprocal_licenses: + - CC0-1.0 + - APSL-2.0 + - CDDL-1.0 + - CDDL-1.1 + - CPL-1.0 + - EPL-1.0 + - IPL-1.0 + - MPL-1.0 + - MPL-1.1 + - MPL-2.0 + - Ruby + +restricted_licenses: + - GPL-1.0-only + - GPL-1.0-or-later + - GPL-2.0-only + - GPL-2.0-or-later + - GPL-3.0-only + - GPL-3.0-or-later + - LGPL-2.0-only + - LGPL-2.0-or-later + - LGPL-2.1-only + - LGPL-2.1-or-later + - LGPL-3.0-only + - LGPL-3.0-or-later + - NPL-1.0 + - NPL-1.1 + - OSL-1.0 + - OSL-1.1 + - OSL-2.0 + - OSL-2.1 + - OSL-3.0 + - QPL-1.0 + - Sleepycat + +whitelisted_modules: + - bitbucket.org/ww/goautoneg + - git.apache.org/thrift.git + - github.com/alicebob/gopher-json + - github.com/antlr/antlr4 + - github.com/apache/thrift + - github.com/bazelbuild/buildtools + - github.com/bgentry/speakeasy + - github.com/bmizerany/assert + - github.com/BurntSushi/xgb + - github.com/DATA-DOG/go-sqlmock + - github.com/daviddengcn/go-colortext + - github.com/dchest/siphash + - github.com/dnaeon/go-vcr + - github.com/docker/docker + - github.com/duosecurity/duo_api_golang + - github.com/dustin/go-humanize + - github.com/facebookgo/stack + - github.com/facebookgo/stackerr + - github.com/ghodss/yaml + - github.com/globalsign/mgo + - github.com/gogo/protobuf + - github.com/google/cadvisor + - github.com/google/pprof + - github.com/gophercloud/gophercloud + - github.com/gotestyourself/gotestyourself + - github.com/hashicorp/consul + - github.com/hashicorp/serf + - github.com/hashicorp/vault + - github.com/heketi/heketi + - github.com/heketi/utils + - github.com/inconshreveable/mousetrap + - github.com/JeffAshton/win_pdh + - github.com/jmespath/go-jmespath + - github.com/jteeuwen/go-bindata + - github.com/juju/errors + - github.com/juju/loggo + - github.com/juju/testing + - github.com/julienschmidt/httprouter + - github.com/koneu/natend + - github.com/kr/logfmt + - github.com/libopenstorage/openstorage + - github.com/logrusorgru/aurora + - github.com/magiconair/properties + - github.com/Masterminds/semver + - github.com/Masterminds/sprig + - github.com/mesos/mesos-go + - github.com/miekg/dns + - github.com/munnerz/goautoneg + - github.com/Nvveen/Gotty + - github.com/NYTimes/gziphandler + - github.com/opencontainers/runc + - github.com/openshift/origin + - github.com/pascaldekloe/goe + - github.com/pmezard/go-difflib + - github.com/projectcalico/go-yaml + - github.com/projectcalico/go-yaml-wrapper + - github.com/rcrowley/go-metrics + - github.com/russross/blackfriday + - github.com/russross/blackfriday/v2 + - github.com/sean-/seed + - github.com/signalfx/com_signalfx_metrics_protobuf + - github.com/smartystreets/assertions + - github.com/smartystreets/goconvey + - github.com/storageos/go-api + - github.com/technosophos/moniker + - github.com/ulikunitz/xz + - github.com/xeipuuv/gojsonpointer + - github.com/xeipuuv/gojsonreference + - github.com/xi2/xz + - github.com/ziutek/mymysql + - gopkg.in/check.v1 + - gopkg.in/mgo.v2 + - gopkg.in/tomb.v1 + - gopkg.in/yaml.v1 + - gopkg.in/yaml.v3 + - gotest.tools + - istio.io/tools + - k8s.io/helm + - k8s.io/kubernetes + - modernc.org/cc + - sigs.k8s.io/yaml diff --git a/test/vendor/istio.io/gogo-genproto/googleapis/google/api/annotations.pb.go b/test/vendor/istio.io/gogo-genproto/googleapis/google/api/annotations.pb.go new file mode 100644 index 0000000000..bf55829c0a --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/googleapis/google/api/annotations.pb.go @@ -0,0 +1,56 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/api/annotations.proto + +package google_api + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor_c591c5aa9fb79aab) } + +var fileDescriptor_c591c5aa9fb79aab = []byte{ + // 232 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0xf8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, + 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, + 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, + 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x5c, 0x7c, 0xc9, 0xf9, + 0xb9, 0x48, 0x86, 0x3a, 0x09, 0x38, 0x22, 0xbc, 0x1a, 0x00, 0x72, 0x4d, 0x00, 0xe3, 0x22, 0x26, + 0x16, 0x77, 0xc7, 0x00, 0xcf, 0x24, 0x36, 0xb0, 0xeb, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x5b, 0x05, 0x38, 0x7d, 0x19, 0x01, 0x00, 0x00, +} diff --git a/test/vendor/istio.io/gogo-genproto/googleapis/google/api/field_behavior.pb.go b/test/vendor/istio.io/gogo-genproto/googleapis/google/api/field_behavior.pb.go new file mode 100644 index 0000000000..e632077282 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/googleapis/google/api/field_behavior.pb.go @@ -0,0 +1,128 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/api/field_behavior.proto + +package google_api + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" + strconv "strconv" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +type FieldBehavior int32 + +const ( + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0 + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL FieldBehavior = 1 + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED FieldBehavior = 2 + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY FieldBehavior = 3 + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY FieldBehavior = 4 + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE FieldBehavior = 5 +) + +var FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", +} + +var FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, +} + +func (FieldBehavior) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4648f18fd5079967, []int{0} +} + +var E_FieldBehavior = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", +} + +func init() { + proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value) + proto.RegisterExtension(E_FieldBehavior) +} + +func init() { proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_4648f18fd5079967) } + +var fileDescriptor_4648f18fd5079967 = []byte{ + // 325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcb, 0x4c, 0xcd, 0x49, 0x89, 0x4f, 0x4a, 0xcd, + 0x48, 0x2c, 0xcb, 0xcc, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x28, 0xd0, + 0x4b, 0x2c, 0xc8, 0x94, 0x52, 0x80, 0x2a, 0x06, 0xcb, 0x24, 0x95, 0xa6, 0xe9, 0xa7, 0xa4, 0x16, + 0x27, 0x17, 0x65, 0x16, 0x94, 0xc0, 0x54, 0x6b, 0x55, 0x73, 0xf1, 0xba, 0x81, 0x4c, 0x71, 0x82, + 0x1a, 0x22, 0x24, 0xc7, 0x25, 0xe5, 0xe6, 0xe9, 0xea, 0xe3, 0x12, 0xef, 0xe4, 0xea, 0xe1, 0x18, + 0xe6, 0xe9, 0x1f, 0x14, 0x1f, 0xea, 0x17, 0x1c, 0xe0, 0xea, 0xec, 0xe9, 0xe6, 0xe9, 0xea, 0x22, + 0xc0, 0x20, 0xc4, 0xc3, 0xc5, 0xe1, 0x1f, 0x10, 0xe2, 0xe9, 0xef, 0xe7, 0xe8, 0x23, 0xc0, 0x08, + 0xe2, 0x05, 0xb9, 0x06, 0x86, 0x7a, 0x06, 0xb9, 0xba, 0x08, 0x30, 0x09, 0xf1, 0x73, 0x71, 0xfb, + 0x87, 0x86, 0x04, 0x84, 0x86, 0xc4, 0xfb, 0xfb, 0xf9, 0x44, 0x0a, 0x30, 0x0b, 0xf1, 0x71, 0x71, + 0x79, 0xfa, 0xc1, 0xf9, 0x2c, 0x42, 0xbc, 0x5c, 0x9c, 0x9e, 0xbe, 0xbe, 0xa1, 0x21, 0x8e, 0x4e, + 0x3e, 0xae, 0x02, 0xac, 0x56, 0x09, 0x5c, 0x7c, 0xa8, 0x5e, 0x10, 0x92, 0xd5, 0x83, 0xba, 0x1e, + 0xe6, 0x62, 0x3d, 0xb0, 0xeb, 0xfc, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0x8a, 0x25, 0xe6, 0x70, 0x28, + 0x30, 0x6b, 0xf0, 0x19, 0x49, 0xea, 0x21, 0xfc, 0xa8, 0x87, 0xe2, 0xfc, 0x20, 0xde, 0x34, 0x64, + 0xae, 0x53, 0xe2, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xd8, + 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, + 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, + 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x06, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, + 0x2b, 0x9c, 0x84, 0x50, 0xec, 0x08, 0x00, 0x39, 0x2d, 0x80, 0x71, 0x11, 0x13, 0x8b, 0xbb, 0x63, + 0x80, 0x67, 0x12, 0x1b, 0xd8, 0xa9, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x72, 0x37, + 0xaf, 0x99, 0x01, 0x00, 0x00, +} + +func (x FieldBehavior) String() string { + s, ok := FieldBehavior_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} diff --git a/test/vendor/istio.io/gogo-genproto/googleapis/google/api/http.pb.go b/test/vendor/istio.io/gogo-genproto/googleapis/google/api/http.pb.go new file mode 100644 index 0000000000..436af02d4e --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/googleapis/google/api/http.pb.go @@ -0,0 +1,2197 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/api/http.proto + +package google_api + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` +} + +func (m *Http) Reset() { *m = Http{} } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{0} +} +func (m *Http) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Http.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Http) XXX_Merge(src proto.Message) { + xxx_messageInfo_Http.Merge(m, src) +} +func (m *Http) XXX_Size() int { + return m.Size() +} +func (m *Http) XXX_DiscardUnknown() { + xxx_messageInfo_Http.DiscardUnknown(m) +} + +var xxx_messageInfo_Http proto.InternalMessageInfo + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Http) GetFullyDecodeReservedExpansion() bool { + if m != nil { + return m.FullyDecodeReservedExpansion + } + return false +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +type HttpRule struct { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{1} +} +func (m *HttpRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HttpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpRule.Merge(m, src) +} +func (m *HttpRule) XXX_Size() int { + return m.Size() +} +func (m *HttpRule) XXX_DiscardUnknown() { + xxx_messageInfo_HttpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpRule proto.InternalMessageInfo + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} +func (*HttpRule_Put) isHttpRule_Pattern() {} +func (*HttpRule_Post) isHttpRule_Pattern() {} +func (*HttpRule_Delete) isHttpRule_Pattern() {} +func (*HttpRule_Patch) isHttpRule_Pattern() {} +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetResponseBody() string { + if m != nil { + return m.ResponseBody + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HttpRule) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{2} +} +func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CustomHttpPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomHttpPattern.Merge(m, src) +} +func (m *CustomHttpPattern) XXX_Size() int { + return m.Size() +} +func (m *CustomHttpPattern) XXX_DiscardUnknown() { + xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_ff9994be407cdcc9) } + +var fileDescriptor_ff9994be407cdcc9 = []byte{ + // 446 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xb1, 0x8e, 0xd3, 0x40, + 0x10, 0x86, 0xbd, 0x89, 0x93, 0x4b, 0x26, 0x07, 0x12, 0xcb, 0x81, 0x56, 0x08, 0x96, 0x28, 0x34, + 0x11, 0x45, 0x4e, 0x3a, 0x0a, 0x0a, 0x2a, 0x0c, 0x11, 0x47, 0x17, 0xb9, 0xa5, 0xb0, 0x1c, 0x7b, + 0x48, 0x2c, 0x7c, 0xde, 0x95, 0x77, 0x8c, 0x48, 0xc7, 0x23, 0xf0, 0x0c, 0x54, 0x3c, 0x0a, 0x65, + 0xca, 0x13, 0x15, 0x71, 0x1a, 0xca, 0x2b, 0x29, 0xd1, 0xae, 0x1d, 0xee, 0x24, 0x24, 0xba, 0xf9, + 0xff, 0xf9, 0x3c, 0xfe, 0x3d, 0x1e, 0xb8, 0xb7, 0x52, 0x6a, 0x95, 0xe3, 0x69, 0xac, 0xb3, 0xd3, + 0x35, 0x91, 0x9e, 0xe9, 0x52, 0x91, 0xe2, 0xd0, 0xd8, 0xb3, 0x58, 0x67, 0x93, 0x0d, 0xf8, 0xe7, + 0x44, 0x9a, 0x3f, 0x85, 0x5e, 0x59, 0xe5, 0x68, 0x04, 0x1b, 0x77, 0xa7, 0xa3, 0xb3, 0x93, 0xd9, + 0x35, 0x33, 0xb3, 0x40, 0x58, 0xe5, 0x18, 0x36, 0x08, 0x9f, 0xc3, 0xe3, 0xf7, 0x55, 0x9e, 0x6f, + 0xa2, 0x14, 0x13, 0x95, 0x62, 0x54, 0xa2, 0xc1, 0xf2, 0x23, 0xa6, 0x11, 0x7e, 0xd2, 0x71, 0x61, + 0x32, 0x55, 0x88, 0xce, 0x98, 0x4d, 0x07, 0xe1, 0x43, 0x87, 0xbd, 0x76, 0x54, 0xd8, 0x42, 0xf3, + 0x03, 0x33, 0xf9, 0xd1, 0x81, 0xc1, 0x61, 0x34, 0x7f, 0x00, 0x03, 0x83, 0x39, 0x26, 0xa4, 0x4a, + 0xc1, 0xc6, 0x6c, 0x3a, 0x0c, 0xff, 0x6a, 0xce, 0xa1, 0xbb, 0x42, 0x72, 0x33, 0x87, 0xe7, 0x5e, + 0x68, 0x85, 0xf5, 0x74, 0x45, 0xa2, 0x7b, 0xf0, 0x74, 0x45, 0xfc, 0x04, 0x7c, 0xad, 0x0c, 0x09, + 0xbf, 0x35, 0x9d, 0xe2, 0x02, 0xfa, 0x29, 0xe6, 0x48, 0x28, 0x7a, 0xad, 0xdf, 0x6a, 0x7e, 0x1f, + 0x7a, 0x3a, 0xa6, 0x64, 0x2d, 0xfa, 0x6d, 0xa3, 0x91, 0xfc, 0x39, 0xf4, 0x93, 0xca, 0x90, 0xba, + 0x10, 0x83, 0x31, 0x9b, 0x8e, 0xce, 0x1e, 0xdd, 0x5c, 0xc6, 0x2b, 0xd7, 0xb1, 0xb9, 0x17, 0x31, + 0x11, 0x96, 0x85, 0x1d, 0xd8, 0xe0, 0x9c, 0x83, 0xbf, 0x54, 0xe9, 0x46, 0x1c, 0xb9, 0x0f, 0x70, + 0x35, 0x7f, 0x02, 0xb7, 0x4a, 0x34, 0x5a, 0x15, 0x06, 0x23, 0xd7, 0x3c, 0x76, 0xcd, 0xe3, 0x83, + 0x19, 0x58, 0x68, 0x0e, 0x77, 0xe3, 0x34, 0xcd, 0x28, 0x53, 0x45, 0x9c, 0x47, 0xcb, 0xac, 0x48, + 0xb3, 0x62, 0x65, 0xc4, 0xe8, 0x3f, 0xff, 0x82, 0x5f, 0x3f, 0x10, 0xb4, 0x7c, 0x30, 0x84, 0x23, + 0xdd, 0x84, 0x9a, 0xbc, 0x80, 0x3b, 0xff, 0x24, 0xb5, 0xf9, 0x3e, 0x64, 0x45, 0xda, 0x2e, 0xd8, + 0xd5, 0xd6, 0xd3, 0x31, 0xad, 0x9b, 0xed, 0x86, 0xae, 0x0e, 0xde, 0x6d, 0x77, 0xd2, 0xbb, 0xdc, + 0x49, 0xef, 0x6a, 0x27, 0xd9, 0xe7, 0x5a, 0xb2, 0x6f, 0xb5, 0x64, 0xdf, 0x6b, 0xc9, 0xb6, 0xb5, + 0x64, 0x3f, 0x6b, 0xc9, 0x7e, 0xd5, 0xd2, 0xbb, 0xaa, 0x25, 0xfb, 0xb2, 0x97, 0xde, 0x76, 0x2f, + 0xbd, 0xcb, 0xbd, 0xf4, 0xe0, 0x76, 0xa2, 0x2e, 0x6e, 0x44, 0x0d, 0x86, 0xee, 0xd5, 0xf6, 0xe2, + 0x16, 0xec, 0x37, 0x63, 0x5f, 0x3b, 0xfe, 0x9b, 0x97, 0x8b, 0xb7, 0xcb, 0xbe, 0x3b, 0xc2, 0x67, + 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x85, 0x78, 0xc8, 0xab, 0x9d, 0x02, 0x00, 0x00, +} + +func (this *Http) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Http) + if !ok { + that2, ok := that.(Http) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Rules) != len(that1.Rules) { + return false + } + for i := range this.Rules { + if !this.Rules[i].Equal(that1.Rules[i]) { + return false + } + } + if this.FullyDecodeReservedExpansion != that1.FullyDecodeReservedExpansion { + return false + } + return true +} +func (this *HttpRule) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule) + if !ok { + that2, ok := that.(HttpRule) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Selector != that1.Selector { + return false + } + if that1.Pattern == nil { + if this.Pattern != nil { + return false + } + } else if this.Pattern == nil { + return false + } else if !this.Pattern.Equal(that1.Pattern) { + return false + } + if this.Body != that1.Body { + return false + } + if this.ResponseBody != that1.ResponseBody { + return false + } + if len(this.AdditionalBindings) != len(that1.AdditionalBindings) { + return false + } + for i := range this.AdditionalBindings { + if !this.AdditionalBindings[i].Equal(that1.AdditionalBindings[i]) { + return false + } + } + return true +} +func (this *HttpRule_Get) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule_Get) + if !ok { + that2, ok := that.(HttpRule_Get) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Get != that1.Get { + return false + } + return true +} +func (this *HttpRule_Put) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule_Put) + if !ok { + that2, ok := that.(HttpRule_Put) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Put != that1.Put { + return false + } + return true +} +func (this *HttpRule_Post) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule_Post) + if !ok { + that2, ok := that.(HttpRule_Post) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Post != that1.Post { + return false + } + return true +} +func (this *HttpRule_Delete) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule_Delete) + if !ok { + that2, ok := that.(HttpRule_Delete) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Delete != that1.Delete { + return false + } + return true +} +func (this *HttpRule_Patch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule_Patch) + if !ok { + that2, ok := that.(HttpRule_Patch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Patch != that1.Patch { + return false + } + return true +} +func (this *HttpRule_Custom) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HttpRule_Custom) + if !ok { + that2, ok := that.(HttpRule_Custom) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Custom.Equal(that1.Custom) { + return false + } + return true +} +func (this *CustomHttpPattern) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CustomHttpPattern) + if !ok { + that2, ok := that.(CustomHttpPattern) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Path != that1.Path { + return false + } + return true +} +func (this *Http) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&google_api.Http{") + if this.Rules != nil { + s = append(s, "Rules: "+fmt.Sprintf("%#v", this.Rules)+",\n") + } + s = append(s, "FullyDecodeReservedExpansion: "+fmt.Sprintf("%#v", this.FullyDecodeReservedExpansion)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HttpRule) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&google_api.HttpRule{") + s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") + if this.Pattern != nil { + s = append(s, "Pattern: "+fmt.Sprintf("%#v", this.Pattern)+",\n") + } + s = append(s, "Body: "+fmt.Sprintf("%#v", this.Body)+",\n") + s = append(s, "ResponseBody: "+fmt.Sprintf("%#v", this.ResponseBody)+",\n") + if this.AdditionalBindings != nil { + s = append(s, "AdditionalBindings: "+fmt.Sprintf("%#v", this.AdditionalBindings)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *HttpRule_Get) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&google_api.HttpRule_Get{` + + `Get:` + fmt.Sprintf("%#v", this.Get) + `}`}, ", ") + return s +} +func (this *HttpRule_Put) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&google_api.HttpRule_Put{` + + `Put:` + fmt.Sprintf("%#v", this.Put) + `}`}, ", ") + return s +} +func (this *HttpRule_Post) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&google_api.HttpRule_Post{` + + `Post:` + fmt.Sprintf("%#v", this.Post) + `}`}, ", ") + return s +} +func (this *HttpRule_Delete) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&google_api.HttpRule_Delete{` + + `Delete:` + fmt.Sprintf("%#v", this.Delete) + `}`}, ", ") + return s +} +func (this *HttpRule_Patch) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&google_api.HttpRule_Patch{` + + `Patch:` + fmt.Sprintf("%#v", this.Patch) + `}`}, ", ") + return s +} +func (this *HttpRule_Custom) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&google_api.HttpRule_Custom{` + + `Custom:` + fmt.Sprintf("%#v", this.Custom) + `}`}, ", ") + return s +} +func (this *CustomHttpPattern) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&google_api.CustomHttpPattern{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringHttp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Http) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Http) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Http) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FullyDecodeReservedExpansion { + i-- + if m.FullyDecodeReservedExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHttp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HttpRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HttpRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResponseBody) > 0 { + i -= len(m.ResponseBody) + copy(dAtA[i:], m.ResponseBody) + i = encodeVarintHttp(dAtA, i, uint64(len(m.ResponseBody))) + i-- + dAtA[i] = 0x62 + } + if len(m.AdditionalBindings) > 0 { + for iNdEx := len(m.AdditionalBindings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalBindings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHttp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.Pattern != nil { + { + size := m.Pattern.Size() + i -= size + if _, err := m.Pattern.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Body) > 0 { + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x3a + } + if len(m.Selector) > 0 { + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Selector))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpRule_Get) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HttpRule_Get) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Get) + copy(dAtA[i:], m.Get) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Get))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *HttpRule_Put) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HttpRule_Put) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Put) + copy(dAtA[i:], m.Put) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Put))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *HttpRule_Post) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HttpRule_Post) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Post) + copy(dAtA[i:], m.Post) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Post))) + i-- + dAtA[i] = 0x22 + return len(dAtA) - i, nil +} +func (m *HttpRule_Delete) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HttpRule_Delete) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Delete) + copy(dAtA[i:], m.Delete) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Delete))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} +func (m *HttpRule_Patch) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HttpRule_Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x32 + return len(dAtA) - i, nil +} +func (m *HttpRule_Custom) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *HttpRule_Custom) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Custom != nil { + { + size, err := m.Custom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintHttp(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *CustomHttpPattern) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomHttpPattern) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomHttpPattern) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Kind) > 0 { + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintHttp(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintHttp(dAtA []byte, offset int, v uint64) int { + offset -= sovHttp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Http) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovHttp(uint64(l)) + } + } + if m.FullyDecodeReservedExpansion { + n += 2 + } + return n +} + +func (m *HttpRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovHttp(uint64(l)) + } + if m.Pattern != nil { + n += m.Pattern.Size() + } + l = len(m.Body) + if l > 0 { + n += 1 + l + sovHttp(uint64(l)) + } + if len(m.AdditionalBindings) > 0 { + for _, e := range m.AdditionalBindings { + l = e.Size() + n += 1 + l + sovHttp(uint64(l)) + } + } + l = len(m.ResponseBody) + if l > 0 { + n += 1 + l + sovHttp(uint64(l)) + } + return n +} + +func (m *HttpRule_Get) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Get) + n += 1 + l + sovHttp(uint64(l)) + return n +} +func (m *HttpRule_Put) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Put) + n += 1 + l + sovHttp(uint64(l)) + return n +} +func (m *HttpRule_Post) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Post) + n += 1 + l + sovHttp(uint64(l)) + return n +} +func (m *HttpRule_Delete) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Delete) + n += 1 + l + sovHttp(uint64(l)) + return n +} +func (m *HttpRule_Patch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Patch) + n += 1 + l + sovHttp(uint64(l)) + return n +} +func (m *HttpRule_Custom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Custom != nil { + l = m.Custom.Size() + n += 1 + l + sovHttp(uint64(l)) + } + return n +} +func (m *CustomHttpPattern) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovHttp(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovHttp(uint64(l)) + } + return n +} + +func sovHttp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozHttp(x uint64) (n int) { + return sovHttp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Http) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]*HttpRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(f.String(), "HttpRule", "HttpRule", 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Http{`, + `Rules:` + repeatedStringForRules + `,`, + `FullyDecodeReservedExpansion:` + fmt.Sprintf("%v", this.FullyDecodeReservedExpansion) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalBindings := "[]*HttpRule{" + for _, f := range this.AdditionalBindings { + repeatedStringForAdditionalBindings += strings.Replace(f.String(), "HttpRule", "HttpRule", 1) + "," + } + repeatedStringForAdditionalBindings += "}" + s := strings.Join([]string{`&HttpRule{`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, + `Body:` + fmt.Sprintf("%v", this.Body) + `,`, + `AdditionalBindings:` + repeatedStringForAdditionalBindings + `,`, + `ResponseBody:` + fmt.Sprintf("%v", this.ResponseBody) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule_Get) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HttpRule_Get{`, + `Get:` + fmt.Sprintf("%v", this.Get) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule_Put) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HttpRule_Put{`, + `Put:` + fmt.Sprintf("%v", this.Put) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule_Post) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HttpRule_Post{`, + `Post:` + fmt.Sprintf("%v", this.Post) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule_Delete) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HttpRule_Delete{`, + `Delete:` + fmt.Sprintf("%v", this.Delete) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule_Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HttpRule_Patch{`, + `Patch:` + fmt.Sprintf("%v", this.Patch) + `,`, + `}`, + }, "") + return s +} +func (this *HttpRule_Custom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HttpRule_Custom{`, + `Custom:` + strings.Replace(fmt.Sprintf("%v", this.Custom), "CustomHttpPattern", "CustomHttpPattern", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomHttpPattern) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomHttpPattern{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func valueToStringHttp(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Http) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Http: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Http: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &HttpRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyDecodeReservedExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FullyDecodeReservedExpansion = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipHttp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HttpRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HttpRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HttpRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Get", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = &HttpRule_Get{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = &HttpRule_Put{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = &HttpRule_Post{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = &HttpRule_Delete{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pattern = &HttpRule_Patch{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Body = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CustomHttpPattern{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Pattern = &HttpRule_Custom{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalBindings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalBindings = append(m.AdditionalBindings, &HttpRule{}) + if err := m.AdditionalBindings[len(m.AdditionalBindings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseBody", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseBody = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHttp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomHttpPattern) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomHttpPattern: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomHttpPattern: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttp + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttp + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHttp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthHttp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHttp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthHttp + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthHttp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHttp(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthHttp + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHttp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHttp = fmt.Errorf("proto: integer overflow") +) diff --git a/test/vendor/istio.io/gogo-genproto/licenses/cloud.google.com/go/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/cloud.google.com/go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/COPYING b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/client9/misspell/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/client9/misspell/LICENSE new file mode 100644 index 0000000000..423e1f9e0f --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/client9/misspell/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2017 Nick Galbreath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/gogo/protobuf/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000000..f57de90da8 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/glog/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/glog/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/mock/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/mock/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/mock/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/protobuf/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000000..0f646931a4 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/google/go-cmp/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/google/go-cmp/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/errcheck/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/errcheck/LICENSE new file mode 100644 index 0000000000..a2b16b5bd9 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/errcheck/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/gotool/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/gotool/LICENSE new file mode 100644 index 0000000000..1cbf651e2f --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/github.com/kisielk/gotool/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/crypto/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/lint/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/lint/LICENSE new file mode 100644 index 0000000000..65d761bc9f --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/net/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/net/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/oauth2/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sync/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sys/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sys/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/text/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/text/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/cmd/getgo/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/cmd/getgo/LICENSE new file mode 100644 index 0000000000..32017f8fa1 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/golang.org/x/tools/cmd/getgo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/appengine/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/appengine/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/genproto/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/genproto/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/grpc/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/LICENSE new file mode 100644 index 0000000000..dfd0314546 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 Dominik Honnef + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/gcsizes/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/gcsizes/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/gcsizes/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/lint/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/lint/LICENSE new file mode 100644 index 0000000000..796130a123 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/lint/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/ssa/LICENSE b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/ssa/LICENSE new file mode 100644 index 0000000000..aee48041e1 --- /dev/null +++ b/test/vendor/istio.io/gogo-genproto/licenses/honnef.co/go/tools/ssa/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2016 Dominik Honnef. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/test/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/test/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go new file mode 100644 index 0000000000..e44c2adabb --- /dev/null +++ b/test/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go @@ -0,0 +1,383 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcp + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "os/exec" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/yaml" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/util/jsonpath" + "k8s.io/klog" +) + +func init() { + if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { + klog.Fatalf("Failed to register gcp auth plugin: %v", err) + } +} + +var ( + // Stubbable for testing + execCommand = exec.Command + + // defaultScopes: + // - cloud-platform is the base scope to authenticate to GCP. + // - userinfo.email is used to authenticate to GKE APIs with gserviceaccount + // email instead of numeric uniqueID. + defaultScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email"} +) + +// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide +// tokens for kubectl to authenticate itself to the apiserver. A sample json config +// is provided below with all recognized options described. +// +// { +// 'auth-provider': { +// # Required +// "name": "gcp", +// +// 'config': { +// # Authentication options +// # These options are used while getting a token. +// +// # comma-separated list of GCP API scopes. default value of this field +// # is "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/userinfo.email". +// # to override the API scopes, specify this field explicitly. +// "scopes": "https://www.googleapis.com/auth/cloud-platform" +// +// # Caching options +// +// # Raw string data representing cached access token. +// "access-token": "ya29.CjWdA4GiBPTt", +// # RFC3339Nano expiration timestamp for cached access token. +// "expiry": "2016-10-31 22:31:9.123", +// +// # Command execution options +// # These options direct the plugin to execute a specified command and parse +// # token and expiry time from the output of the command. +// +// # Command to execute for access token. Command output will be parsed as JSON. +// # If "cmd-args" is not present, this value will be split on whitespace, with +// # the first element interpreted as the command, remaining elements as args. +// "cmd-path": "/usr/bin/gcloud", +// +// # Arguments to pass to command to execute for access token. +// "cmd-args": "config config-helper --output=json" +// +// # JSONPath to the string field that represents the access token in +// # command output. If omitted, defaults to "{.access_token}". +// "token-key": "{.credential.access_token}", +// +// # JSONPath to the string field that represents expiration timestamp +// # of the access token in the command output. If omitted, defaults to +// # "{.token_expiry}" +// "expiry-key": ""{.credential.token_expiry}", +// +// # golang reference time in the format that the expiration timestamp uses. +// # If omitted, defaults to time.RFC3339Nano +// "time-fmt": "2006-01-02 15:04:05.999999999" +// } +// } +// } +// +type gcpAuthProvider struct { + tokenSource oauth2.TokenSource + persister restclient.AuthProviderConfigPersister +} + +func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { + ts, err := tokenSource(isCmdTokenSource(gcpConfig), gcpConfig) + if err != nil { + return nil, err + } + cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig) + if err != nil { + return nil, err + } + return &gcpAuthProvider{cts, persister}, nil +} + +func isCmdTokenSource(gcpConfig map[string]string) bool { + _, ok := gcpConfig["cmd-path"] + return ok +} + +func tokenSource(isCmd bool, gcpConfig map[string]string) (oauth2.TokenSource, error) { + // Command-based token source + if isCmd { + cmd := gcpConfig["cmd-path"] + if len(cmd) == 0 { + return nil, fmt.Errorf("missing access token cmd") + } + if gcpConfig["scopes"] != "" { + return nil, fmt.Errorf("scopes can only be used when kubectl is using a gcp service account key") + } + var args []string + if cmdArgs, ok := gcpConfig["cmd-args"]; ok { + args = strings.Fields(cmdArgs) + } else { + fields := strings.Fields(cmd) + cmd = fields[0] + args = fields[1:] + } + return newCmdTokenSource(cmd, args, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"]), nil + } + + // Google Application Credentials-based token source + scopes := parseScopes(gcpConfig) + ts, err := google.DefaultTokenSource(context.Background(), scopes...) + if err != nil { + return nil, fmt.Errorf("cannot construct google default token source: %v", err) + } + return ts, nil +} + +// parseScopes constructs a list of scopes that should be included in token source +// from the config map. +func parseScopes(gcpConfig map[string]string) []string { + scopes, ok := gcpConfig["scopes"] + if !ok { + return defaultScopes + } + if scopes == "" { + return []string{} + } + return strings.Split(gcpConfig["scopes"], ",") +} + +func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { + var resetCache map[string]string + if cts, ok := g.tokenSource.(*cachedTokenSource); ok { + resetCache = cts.baseCache() + } else { + resetCache = make(map[string]string) + } + return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister, resetCache} +} + +func (g *gcpAuthProvider) Login() error { return nil } + +type cachedTokenSource struct { + lk sync.Mutex + source oauth2.TokenSource + accessToken string + expiry time.Time + persister restclient.AuthProviderConfigPersister + cache map[string]string +} + +func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) { + var expiryTime time.Time + if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil { + expiryTime = parsedTime + } + if cache == nil { + cache = make(map[string]string) + } + return &cachedTokenSource{ + source: ts, + accessToken: accessToken, + expiry: expiryTime, + persister: persister, + cache: cache, + }, nil +} + +func (t *cachedTokenSource) Token() (*oauth2.Token, error) { + tok := t.cachedToken() + if tok.Valid() && !tok.Expiry.IsZero() { + return tok, nil + } + tok, err := t.source.Token() + if err != nil { + return nil, err + } + cache := t.update(tok) + if t.persister != nil { + if err := t.persister.Persist(cache); err != nil { + klog.V(4).Infof("Failed to persist token: %v", err) + } + } + return tok, nil +} + +func (t *cachedTokenSource) cachedToken() *oauth2.Token { + t.lk.Lock() + defer t.lk.Unlock() + return &oauth2.Token{ + AccessToken: t.accessToken, + TokenType: "Bearer", + Expiry: t.expiry, + } +} + +func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string { + t.lk.Lock() + defer t.lk.Unlock() + t.accessToken = tok.AccessToken + t.expiry = tok.Expiry + ret := map[string]string{} + for k, v := range t.cache { + ret[k] = v + } + ret["access-token"] = t.accessToken + ret["expiry"] = t.expiry.Format(time.RFC3339Nano) + return ret +} + +// baseCache is the base configuration value for this TokenSource, without any cached ephemeral tokens. +func (t *cachedTokenSource) baseCache() map[string]string { + t.lk.Lock() + defer t.lk.Unlock() + ret := map[string]string{} + for k, v := range t.cache { + ret[k] = v + } + delete(ret, "access-token") + delete(ret, "expiry") + return ret +} + +type commandTokenSource struct { + cmd string + args []string + tokenKey string + expiryKey string + timeFmt string +} + +func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt string) *commandTokenSource { + if len(timeFmt) == 0 { + timeFmt = time.RFC3339Nano + } + if len(tokenKey) == 0 { + tokenKey = "{.access_token}" + } + if len(expiryKey) == 0 { + expiryKey = "{.token_expiry}" + } + return &commandTokenSource{ + cmd: cmd, + args: args, + tokenKey: tokenKey, + expiryKey: expiryKey, + timeFmt: timeFmt, + } +} + +func (c *commandTokenSource) Token() (*oauth2.Token, error) { + fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ") + cmd := execCommand(c.cmd, c.args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + output, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s stderr=%s", fullCmd, err, output, string(stderr.Bytes())) + } + token, err := c.parseTokenCmdOutput(output) + if err != nil { + return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err) + } + return token, nil +} + +func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) { + output, err := yaml.ToJSON(output) + if err != nil { + return nil, err + } + var data interface{} + if err := json.Unmarshal(output, &data); err != nil { + return nil, err + } + + accessToken, err := parseJSONPath(data, "token-key", c.tokenKey) + if err != nil { + return nil, fmt.Errorf("error parsing token-key %q from %q: %v", c.tokenKey, string(output), err) + } + expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey) + if err != nil { + return nil, fmt.Errorf("error parsing expiry-key %q from %q: %v", c.expiryKey, string(output), err) + } + var expiry time.Time + if t, err := time.Parse(c.timeFmt, expiryStr); err != nil { + klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) + } else { + expiry = t + } + + return &oauth2.Token{ + AccessToken: accessToken, + TokenType: "Bearer", + Expiry: expiry, + }, nil +} + +func parseJSONPath(input interface{}, name, template string) (string, error) { + j := jsonpath.New(name) + buf := new(bytes.Buffer) + if err := j.Parse(template); err != nil { + return "", err + } + if err := j.Execute(buf, input); err != nil { + return "", err + } + return buf.String(), nil +} + +type conditionalTransport struct { + oauthTransport *oauth2.Transport + persister restclient.AuthProviderConfigPersister + resetCache map[string]string +} + +var _ net.RoundTripperWrapper = &conditionalTransport{} + +func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return t.oauthTransport.Base.RoundTrip(req) + } + + res, err := t.oauthTransport.RoundTrip(req) + + if err != nil { + return nil, err + } + + if res.StatusCode == 401 { + klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") + t.persister.Persist(t.resetCache) + } + + return res, nil +} + +func (t *conditionalTransport) WrappedRoundTripper() http.RoundTripper { return t.oauthTransport.Base } diff --git a/test/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go b/test/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go new file mode 100644 index 0000000000..739fd3509c --- /dev/null +++ b/test/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go @@ -0,0 +1,94 @@ +//This package is copied from Go library text/template. +//The original private functions indirect and printableValue +//are exported as public functions. +package template + +import ( + "fmt" + "reflect" +) + +var Indirect = indirect +var PrintableValue = printableValue + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} diff --git a/test/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go b/test/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go new file mode 100644 index 0000000000..27a008b0a7 --- /dev/null +++ b/test/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go @@ -0,0 +1,599 @@ +//This package is copied from Go library text/template. +//The original private functions eq, ge, gt, le, lt, and ne +//are exported as public functions. +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var Equal = eq +var GreaterEqual = ge +var Greater = gt +var LessEqual = le +var Less = lt +var NotEqual = ne + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// AddFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string) (reflect.Value, bool) { + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/test/vendor/k8s.io/client-go/util/jsonpath/doc.go b/test/vendor/k8s.io/client-go/util/jsonpath/doc.go new file mode 100644 index 0000000000..0effb15c41 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/jsonpath/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package jsonpath is a template engine using jsonpath syntax, +// which can be seen at http://goessner.net/articles/JsonPath/. +// In addition, it has {range} {end} function to iterate list and slice. +package jsonpath // import "k8s.io/client-go/util/jsonpath" diff --git a/test/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/test/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go new file mode 100644 index 0000000000..78b6b678f7 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go @@ -0,0 +1,525 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + + "k8s.io/client-go/third_party/forked/golang/template" +) + +type JSONPath struct { + name string + parser *Parser + stack [][]reflect.Value // push and pop values in different scopes + cur []reflect.Value // current scope values + beginRange int + inRange int + endRange int + + allowMissingKeys bool +} + +// New creates a new JSONPath with the given name. +func New(name string) *JSONPath { + return &JSONPath{ + name: name, + beginRange: 0, + inRange: 0, + endRange: 0, + } +} + +// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key +// cannot be located, or simply an empty result. The receiver is returned for chaining. +func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath { + j.allowMissingKeys = allow + return j +} + +// Parse parses the given template and returns an error. +func (j *JSONPath) Parse(text string) error { + var err error + j.parser, err = Parse(j.name, text) + return err +} + +// Execute bounds data into template and writes the result. +func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { + fullResults, err := j.FindResults(data) + if err != nil { + return err + } + for ix := range fullResults { + if err := j.PrintResults(wr, fullResults[ix]); err != nil { + return err + } + } + return nil +} + +func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { + if j.parser == nil { + return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) + } + + j.cur = []reflect.Value{reflect.ValueOf(data)} + nodes := j.parser.Root.Nodes + fullResult := [][]reflect.Value{} + for i := 0; i < len(nodes); i++ { + node := nodes[i] + results, err := j.walk(j.cur, node) + if err != nil { + return nil, err + } + + // encounter an end node, break the current block + if j.endRange > 0 && j.endRange <= j.inRange { + j.endRange-- + break + } + // encounter a range node, start a range loop + if j.beginRange > 0 { + j.beginRange-- + j.inRange++ + for k, value := range results { + j.parser.Root.Nodes = nodes[i+1:] + if k == len(results)-1 { + j.inRange-- + } + nextResults, err := j.FindResults(value.Interface()) + if err != nil { + return nil, err + } + fullResult = append(fullResult, nextResults...) + } + break + } + fullResult = append(fullResult, results) + } + return fullResult, nil +} + +// PrintResults writes the results into writer +func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { + for i, r := range results { + text, err := j.evalToText(r) + if err != nil { + return err + } + if i != len(results)-1 { + text = append(text, ' ') + } + if _, err = wr.Write(text); err != nil { + return err + } + } + return nil +} + +// walk visits tree rooted at the given node in DFS order +func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { + switch node := node.(type) { + case *ListNode: + return j.evalList(value, node) + case *TextNode: + return []reflect.Value{reflect.ValueOf(node.Text)}, nil + case *FieldNode: + return j.evalField(value, node) + case *ArrayNode: + return j.evalArray(value, node) + case *FilterNode: + return j.evalFilter(value, node) + case *IntNode: + return j.evalInt(value, node) + case *BoolNode: + return j.evalBool(value, node) + case *FloatNode: + return j.evalFloat(value, node) + case *WildcardNode: + return j.evalWildcard(value, node) + case *RecursiveNode: + return j.evalRecursive(value, node) + case *UnionNode: + return j.evalUnion(value, node) + case *IdentifierNode: + return j.evalIdentifier(value, node) + default: + return value, fmt.Errorf("unexpected Node %v", node) + } +} + +// evalInt evaluates IntNode +func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalFloat evaluates FloatNode +func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalBool evaluates BoolNode +func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalList evaluates ListNode +func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { + var err error + curValue := value + for _, node := range node.Nodes { + curValue, err = j.walk(curValue, node) + if err != nil { + return curValue, err + } + } + return curValue, nil +} + +// evalIdentifier evaluates IdentifierNode +func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { + results := []reflect.Value{} + switch node.Name { + case "range": + j.stack = append(j.stack, j.cur) + j.beginRange++ + results = input + case "end": + if j.endRange < j.inRange { // inside a loop, break the current block + j.endRange++ + break + } + // the loop is about to end, pop value and continue the following execution + if len(j.stack) > 0 { + j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1] + } else { + return results, fmt.Errorf("not in range, nothing to end") + } + default: + return input, fmt.Errorf("unrecognized identifier %v", node.Name) + } + return results, nil +} + +// evalArray evaluates ArrayNode +func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + + value, isNil := template.Indirect(value) + if isNil { + continue + } + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice", value.Type()) + } + params := node.Params + if !params[0].Known { + params[0].Value = 0 + } + if params[0].Value < 0 { + params[0].Value += value.Len() + } + if !params[1].Known { + params[1].Value = value.Len() + } + + if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) { + params[1].Value += value.Len() + } + sliceLength := value.Len() + if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. + if params[0].Value >= sliceLength || params[0].Value < 0 { + return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) + } + if params[1].Value > sliceLength || params[1].Value < 0 { + return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) + } + if params[0].Value > params[1].Value { + return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value) + } + } else { + return result, nil + } + + value = value.Slice(params[0].Value, params[1].Value) + + step := 1 + if params[2].Known { + if params[2].Value <= 0 { + return input, fmt.Errorf("step must be > 0") + } + step = params[2].Value + } + for i := 0; i < value.Len(); i += step { + result = append(result, value.Index(i)) + } + } + return result, nil +} + +// evalUnion evaluates UnionNode +func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, listNode := range node.Nodes { + temp, err := j.evalList(input, listNode) + if err != nil { + return input, err + } + result = append(result, temp...) + } + return result, nil +} + +func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { + t := value.Type() + var inlineValue *reflect.Value + for ix := 0; ix < t.NumField(); ix++ { + f := t.Field(ix) + jsonTag := f.Tag.Get("json") + parts := strings.Split(jsonTag, ",") + if len(parts) == 0 { + continue + } + if parts[0] == node.Value { + return value.Field(ix), nil + } + if len(parts[0]) == 0 { + val := value.Field(ix) + inlineValue = &val + } + } + if inlineValue != nil { + if inlineValue.Kind() == reflect.Struct { + // handle 'inline' + match, err := j.findFieldInValue(inlineValue, node) + if err != nil { + return reflect.Value{}, err + } + if match.IsValid() { + return match, nil + } + } + } + return value.FieldByName(node.Value), nil +} + +// evalField evaluates field of struct or key of map. +func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { + results := []reflect.Value{} + // If there's no input, there's no output + if len(input) == 0 { + return results, nil + } + for _, value := range input { + var result reflect.Value + value, isNil := template.Indirect(value) + if isNil { + continue + } + + if value.Kind() == reflect.Struct { + var err error + if result, err = j.findFieldInValue(&value, node); err != nil { + return nil, err + } + } else if value.Kind() == reflect.Map { + mapKeyType := value.Type().Key() + nodeValue := reflect.ValueOf(node.Value) + // node value type must be convertible to map key type + if !nodeValue.Type().ConvertibleTo(mapKeyType) { + return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) + } + result = value.MapIndex(nodeValue.Convert(mapKeyType)) + } + if result.IsValid() { + results = append(results, result) + } + } + if len(results) == 0 { + if j.allowMissingKeys { + return results, nil + } + return results, fmt.Errorf("%s is not found", node.Value) + } + return results, nil +} + +// evalWildcard extracts all contents of the given value +func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + value, isNil := template.Indirect(value) + if isNil { + continue + } + + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalRecursive visits the given value recursively and pushes all of them to result +func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + results := []reflect.Value{} + value, isNil := template.Indirect(value) + if isNil { + continue + } + + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + if len(results) != 0 { + result = append(result, value) + output, err := j.evalRecursive(results, node) + if err != nil { + return result, err + } + result = append(result, output...) + } + } + return result, nil +} + +// evalFilter filters array according to FilterNode +func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + value, _ = template.Indirect(value) + + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) + } + for i := 0; i < value.Len(); i++ { + temp := []reflect.Value{value.Index(i)} + lefts, err := j.evalList(temp, node.Left) + + //case exists + if node.Operator == "exists" { + if len(lefts) > 0 { + results = append(results, value.Index(i)) + } + continue + } + + if err != nil { + return input, err + } + + var left, right interface{} + switch { + case len(lefts) == 0: + continue + case len(lefts) > 1: + return input, fmt.Errorf("can only compare one element at a time") + } + left = lefts[0].Interface() + + rights, err := j.evalList(temp, node.Right) + if err != nil { + return input, err + } + switch { + case len(rights) == 0: + continue + case len(rights) > 1: + return input, fmt.Errorf("can only compare one element at a time") + } + right = rights[0].Interface() + + pass := false + switch node.Operator { + case "<": + pass, err = template.Less(left, right) + case ">": + pass, err = template.Greater(left, right) + case "==": + pass, err = template.Equal(left, right) + case "!=": + pass, err = template.NotEqual(left, right) + case "<=": + pass, err = template.LessEqual(left, right) + case ">=": + pass, err = template.GreaterEqual(left, right) + default: + return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) + } + if err != nil { + return results, err + } + if pass { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalToText translates reflect value to corresponding text +func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { + iface, ok := template.PrintableValue(v) + if !ok { + return nil, fmt.Errorf("can't print type %s", v.Type()) + } + var buffer bytes.Buffer + fmt.Fprint(&buffer, iface) + return buffer.Bytes(), nil +} diff --git a/test/vendor/k8s.io/client-go/util/jsonpath/node.go b/test/vendor/k8s.io/client-go/util/jsonpath/node.go new file mode 100644 index 0000000000..83abe8b037 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/jsonpath/node.go @@ -0,0 +1,256 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import "fmt" + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Type returns itself and provides an easy default implementation +func (t NodeType) Type() NodeType { + return t +} + +func (t NodeType) String() string { + return NodeTypeName[t] +} + +const ( + NodeText NodeType = iota + NodeArray + NodeList + NodeField + NodeIdentifier + NodeFilter + NodeInt + NodeFloat + NodeWildcard + NodeRecursive + NodeUnion + NodeBool +) + +var NodeTypeName = map[NodeType]string{ + NodeText: "NodeText", + NodeArray: "NodeArray", + NodeList: "NodeList", + NodeField: "NodeField", + NodeIdentifier: "NodeIdentifier", + NodeFilter: "NodeFilter", + NodeInt: "NodeInt", + NodeFloat: "NodeFloat", + NodeWildcard: "NodeWildcard", + NodeRecursive: "NodeRecursive", + NodeUnion: "NodeUnion", + NodeBool: "NodeBool", +} + +type Node interface { + Type() NodeType + String() string +} + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Nodes []Node // The element nodes in lexical order. +} + +func newList() *ListNode { + return &ListNode{NodeType: NodeList} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) String() string { + return l.Type().String() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Text string // The text; may span newlines. +} + +func newText(text string) *TextNode { + return &TextNode{NodeType: NodeText, Text: text} +} + +func (t *TextNode) String() string { + return fmt.Sprintf("%s: %s", t.Type(), t.Text) +} + +// FieldNode holds field of struct +type FieldNode struct { + NodeType + Value string +} + +func newField(value string) *FieldNode { + return &FieldNode{NodeType: NodeField, Value: value} +} + +func (f *FieldNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Value) +} + +// IdentifierNode holds an identifier +type IdentifierNode struct { + NodeType + Name string +} + +func newIdentifier(value string) *IdentifierNode { + return &IdentifierNode{ + NodeType: NodeIdentifier, + Name: value, + } +} + +func (f *IdentifierNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Name) +} + +// ParamsEntry holds param information for ArrayNode +type ParamsEntry struct { + Value int + Known bool // whether the value is known when parse it + Derived bool +} + +// ArrayNode holds start, end, step information for array index selection +type ArrayNode struct { + NodeType + Params [3]ParamsEntry // start, end, step +} + +func newArray(params [3]ParamsEntry) *ArrayNode { + return &ArrayNode{ + NodeType: NodeArray, + Params: params, + } +} + +func (a *ArrayNode) String() string { + return fmt.Sprintf("%s: %v", a.Type(), a.Params) +} + +// FilterNode holds operand and operator information for filter +type FilterNode struct { + NodeType + Left *ListNode + Right *ListNode + Operator string +} + +func newFilter(left, right *ListNode, operator string) *FilterNode { + return &FilterNode{ + NodeType: NodeFilter, + Left: left, + Right: right, + Operator: operator, + } +} + +func (f *FilterNode) String() string { + return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) +} + +// IntNode holds integer value +type IntNode struct { + NodeType + Value int +} + +func newInt(num int) *IntNode { + return &IntNode{NodeType: NodeInt, Value: num} +} + +func (i *IntNode) String() string { + return fmt.Sprintf("%s: %d", i.Type(), i.Value) +} + +// FloatNode holds float value +type FloatNode struct { + NodeType + Value float64 +} + +func newFloat(num float64) *FloatNode { + return &FloatNode{NodeType: NodeFloat, Value: num} +} + +func (i *FloatNode) String() string { + return fmt.Sprintf("%s: %f", i.Type(), i.Value) +} + +// WildcardNode means a wildcard +type WildcardNode struct { + NodeType +} + +func newWildcard() *WildcardNode { + return &WildcardNode{NodeType: NodeWildcard} +} + +func (i *WildcardNode) String() string { + return i.Type().String() +} + +// RecursiveNode means a recursive descent operator +type RecursiveNode struct { + NodeType +} + +func newRecursive() *RecursiveNode { + return &RecursiveNode{NodeType: NodeRecursive} +} + +func (r *RecursiveNode) String() string { + return r.Type().String() +} + +// UnionNode is union of ListNode +type UnionNode struct { + NodeType + Nodes []*ListNode +} + +func newUnion(nodes []*ListNode) *UnionNode { + return &UnionNode{NodeType: NodeUnion, Nodes: nodes} +} + +func (u *UnionNode) String() string { + return u.Type().String() +} + +// BoolNode holds bool value +type BoolNode struct { + NodeType + Value bool +} + +func newBool(value bool) *BoolNode { + return &BoolNode{NodeType: NodeBool, Value: value} +} + +func (b *BoolNode) String() string { + return fmt.Sprintf("%s: %t", b.Type(), b.Value) +} diff --git a/test/vendor/k8s.io/client-go/util/jsonpath/parser.go b/test/vendor/k8s.io/client-go/util/jsonpath/parser.go new file mode 100644 index 0000000000..1af8f269f7 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/jsonpath/parser.go @@ -0,0 +1,526 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +const eof = -1 + +const ( + leftDelim = "{" + rightDelim = "}" +) + +type Parser struct { + Name string + Root *ListNode + input string + cur *ListNode + pos int + start int + width int +} + +var ( + ErrSyntax = errors.New("invalid syntax") + dictKeyRex = regexp.MustCompile(`^'([^']*)'$`) + sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`) +) + +// Parse parsed the given text and return a node Parser. +// If an error is encountered, parsing stops and an empty +// Parser is returned with the error +func Parse(name, text string) (*Parser, error) { + p := NewParser(name) + err := p.Parse(text) + if err != nil { + p = nil + } + return p, err +} + +func NewParser(name string) *Parser { + return &Parser{ + Name: name, + } +} + +// parseAction parsed the expression inside delimiter +func parseAction(name, text string) (*Parser, error) { + p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) + // when error happens, p will be nil, so we need to return here + if err != nil { + return p, err + } + p.Root = p.Root.Nodes[0].(*ListNode) + return p, nil +} + +func (p *Parser) Parse(text string) error { + p.input = text + p.Root = newList() + p.pos = 0 + return p.parseText(p.Root) +} + +// consumeText return the parsed text since last cosumeText +func (p *Parser) consumeText() string { + value := p.input[p.start:p.pos] + p.start = p.pos + return value +} + +// next returns the next rune in the input. +func (p *Parser) next() rune { + if p.pos >= len(p.input) { + p.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(p.input[p.pos:]) + p.width = w + p.pos += p.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (p *Parser) peek() rune { + r := p.next() + p.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (p *Parser) backup() { + p.pos -= p.width +} + +func (p *Parser) parseText(cur *ListNode) error { + for { + if strings.HasPrefix(p.input[p.pos:], leftDelim) { + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return p.parseLeftDelim(cur) + } + if p.next() == eof { + break + } + } + // Correctly reached EOF. + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return nil +} + +// parseLeftDelim scans the left delimiter, which is known to be present. +func (p *Parser) parseLeftDelim(cur *ListNode) error { + p.pos += len(leftDelim) + p.consumeText() + newNode := newList() + cur.append(newNode) + cur = newNode + return p.parseInsideAction(cur) +} + +func (p *Parser) parseInsideAction(cur *ListNode) error { + prefixMap := map[string]func(*ListNode) error{ + rightDelim: p.parseRightDelim, + "[?(": p.parseFilter, + "..": p.parseRecursive, + } + for prefix, parseFunc := range prefixMap { + if strings.HasPrefix(p.input[p.pos:], prefix) { + return parseFunc(cur) + } + } + + switch r := p.next(); { + case r == eof || isEndOfLine(r): + return fmt.Errorf("unclosed action") + case r == ' ': + p.consumeText() + case r == '@' || r == '$': //the current object, just pass it + p.consumeText() + case r == '[': + return p.parseArray(cur) + case r == '"' || r == '\'': + return p.parseQuote(cur, r) + case r == '.': + return p.parseField(cur) + case r == '+' || r == '-' || unicode.IsDigit(r): + p.backup() + return p.parseNumber(cur) + case isAlphaNumeric(r): + p.backup() + return p.parseIdentifier(cur) + default: + return fmt.Errorf("unrecognized character in action: %#U", r) + } + return p.parseInsideAction(cur) +} + +// parseRightDelim scans the right delimiter, which is known to be present. +func (p *Parser) parseRightDelim(cur *ListNode) error { + p.pos += len(rightDelim) + p.consumeText() + cur = p.Root + return p.parseText(cur) +} + +// parseIdentifier scans build-in keywords, like "range" "end" +func (p *Parser) parseIdentifier(cur *ListNode) error { + var r rune + for { + r = p.next() + if isTerminator(r) { + p.backup() + break + } + } + value := p.consumeText() + + if isBool(value) { + v, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("can not parse bool '%s': %s", value, err.Error()) + } + + cur.append(newBool(v)) + } else { + cur.append(newIdentifier(value)) + } + + return p.parseInsideAction(cur) +} + +// parseRecursive scans the recursive desent operator .. +func (p *Parser) parseRecursive(cur *ListNode) error { + p.pos += len("..") + p.consumeText() + cur.append(newRecursive()) + if r := p.peek(); isAlphaNumeric(r) { + return p.parseField(cur) + } + return p.parseInsideAction(cur) +} + +// parseNumber scans number +func (p *Parser) parseNumber(cur *ListNode) error { + r := p.peek() + if r == '+' || r == '-' { + r = p.next() + } + for { + r = p.next() + if r != '.' && !unicode.IsDigit(r) { + p.backup() + break + } + } + value := p.consumeText() + i, err := strconv.Atoi(value) + if err == nil { + cur.append(newInt(i)) + return p.parseInsideAction(cur) + } + d, err := strconv.ParseFloat(value, 64) + if err == nil { + cur.append(newFloat(d)) + return p.parseInsideAction(cur) + } + return fmt.Errorf("cannot parse number %s", value) +} + +// parseArray scans array index selection +func (p *Parser) parseArray(cur *ListNode) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated array") + case ']': + break Loop + } + } + text := p.consumeText() + text = text[1 : len(text)-1] + if text == "*" { + text = ":" + } + + //union operator + strs := strings.Split(text, ",") + if len(strs) > 1 { + union := []*ListNode{} + for _, str := range strs { + parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) + if err != nil { + return err + } + union = append(union, parser.Root) + } + cur.append(newUnion(union)) + return p.parseInsideAction(cur) + } + + // dict key + value := dictKeyRex.FindStringSubmatch(text) + if value != nil { + parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) + if err != nil { + return err + } + for _, node := range parser.Root.Nodes { + cur.append(node) + } + return p.parseInsideAction(cur) + } + + //slice operator + value = sliceOperatorRex.FindStringSubmatch(text) + if value == nil { + return fmt.Errorf("invalid array index %s", text) + } + value = value[1:] + params := [3]ParamsEntry{} + for i := 0; i < 3; i++ { + if value[i] != "" { + if i > 0 { + value[i] = value[i][1:] + } + if i > 0 && value[i] == "" { + params[i].Known = false + } else { + var err error + params[i].Known = true + params[i].Value, err = strconv.Atoi(value[i]) + if err != nil { + return fmt.Errorf("array index %s is not a number", value[i]) + } + } + } else { + if i == 1 { + params[i].Known = true + params[i].Value = params[0].Value + 1 + params[i].Derived = true + } else { + params[i].Known = false + params[i].Value = 0 + } + } + } + cur.append(newArray(params)) + return p.parseInsideAction(cur) +} + +// parseFilter scans filter inside array selection +func (p *Parser) parseFilter(cur *ListNode) error { + p.pos += len("[?(") + p.consumeText() + begin := false + end := false + var pair rune + +Loop: + for { + r := p.next() + switch r { + case eof, '\n': + return fmt.Errorf("unterminated filter") + case '"', '\'': + if begin == false { + //save the paired rune + begin = true + pair = r + continue + } + //only add when met paired rune + if p.input[p.pos-2] != '\\' && r == pair { + end = true + } + case ')': + //in rightParser below quotes only appear zero or once + //and must be paired at the beginning and end + if begin == end { + break Loop + } + } + } + if p.next() != ']' { + return fmt.Errorf("unclosed array expect ]") + } + reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) + text := p.consumeText() + text = text[:len(text)-2] + value := reg.FindStringSubmatch(text) + if value == nil { + parser, err := parseAction("text", text) + if err != nil { + return err + } + cur.append(newFilter(parser.Root, newList(), "exists")) + } else { + leftParser, err := parseAction("left", value[1]) + if err != nil { + return err + } + rightParser, err := parseAction("right", value[3]) + if err != nil { + return err + } + cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) + } + return p.parseInsideAction(cur) +} + +// parseQuote unquotes string inside double or single quote +func (p *Parser) parseQuote(cur *ListNode, end rune) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated quoted string") + case end: + //if it's not escape break the Loop + if p.input[p.pos-2] != '\\' { + break Loop + } + } + } + value := p.consumeText() + s, err := UnquoteExtend(value) + if err != nil { + return fmt.Errorf("unquote string %s error %v", value, err) + } + cur.append(newText(s)) + return p.parseInsideAction(cur) +} + +// parseField scans a field until a terminator +func (p *Parser) parseField(cur *ListNode) error { + p.consumeText() + for p.advance() { + } + value := p.consumeText() + if value == "*" { + cur.append(newWildcard()) + } else { + cur.append(newField(strings.Replace(value, "\\", "", -1))) + } + return p.parseInsideAction(cur) +} + +// advance scans until next non-escaped terminator +func (p *Parser) advance() bool { + r := p.next() + if r == '\\' { + p.next() + } else if isTerminator(r) { + p.backup() + return false + } + return true +} + +// isTerminator reports whether the input is at valid termination character to appear after an identifier. +func isTerminator(r rune) bool { + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '[', ']', '$', '@', '{', '}': + return true + } + return false +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} + +// isBool reports whether s is a boolean value. +func isBool(s string) bool { + return s == "true" || s == "false" +} + +//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string +func UnquoteExtend(s string) (string, error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' && quote != '\'' { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + return s, nil + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := strconv.UnquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + } + return string(buf), nil +} + +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} diff --git a/test/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/test/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go new file mode 100644 index 0000000000..71bb6322e0 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -0,0 +1,211 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "math" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} + +// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultControllerRateLimiter() RateLimiter { + return NewMaxOfRateLimiter( + NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) +} + +// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type BucketRateLimiter struct { + *rate.Limiter +} + +var _ RateLimiter = &BucketRateLimiter{} + +func (r *BucketRateLimiter) When(item interface{}) time.Duration { + return r.Limiter.Reserve().Delay() +} + +func (r *BucketRateLimiter) NumRequeues(item interface{}) int { + return 0 +} + +func (r *BucketRateLimiter) Forget(item interface{}) { +} + +// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// dealing with max failures and expiration are up to the caller +type ItemExponentialFailureRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + baseDelay time.Duration + maxDelay time.Duration +} + +var _ RateLimiter = &ItemExponentialFailureRateLimiter{} + +func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { + return &ItemExponentialFailureRateLimiter{ + failures: map[interface{}]int{}, + baseDelay: baseDelay, + maxDelay: maxDelay, + } +} + +func DefaultItemBasedRateLimiter() RateLimiter { + return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) +} + +func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + exp := r.failures[item] + r.failures[item] = r.failures[item] + 1 + + // The backoff is capped such that 'calculated' value never overflows. + backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) + if backoff > math.MaxInt64 { + return r.maxDelay + } + + calculated := time.Duration(backoff) + if calculated > r.maxDelay { + return r.maxDelay + } + + return calculated +} + +func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type ItemFastSlowRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + maxFastAttempts int + fastDelay time.Duration + slowDelay time.Duration +} + +var _ RateLimiter = &ItemFastSlowRateLimiter{} + +func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { + return &ItemFastSlowRateLimiter{ + failures: map[interface{}]int{}, + fastDelay: fastDelay, + slowDelay: slowDelay, + maxFastAttempts: maxFastAttempts, + } +} + +func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + r.failures[item] = r.failures[item] + 1 + + if r.failures[item] <= r.maxFastAttempts { + return r.fastDelay + } + + return r.slowDelay +} + +func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// MaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type MaxOfRateLimiter struct { + limiters []RateLimiter +} + +func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { + ret := time.Duration(0) + for _, limiter := range r.limiters { + curr := limiter.When(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { + return &MaxOfRateLimiter{limiters: limiters} +} + +func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { + ret := 0 + for _, limiter := range r.limiters { + curr := limiter.NumRequeues(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func (r *MaxOfRateLimiter) Forget(item interface{}) { + for _, limiter := range r.limiters { + limiter.Forget(item) + } +} diff --git a/test/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/test/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go new file mode 100644 index 0000000000..6c9e944715 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -0,0 +1,264 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "container/heap" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type DelayingInterface interface { + Interface + // AddAfter adds an item to the workqueue after the indicated duration has passed + AddAfter(item interface{}, duration time.Duration) +} + +// NewDelayingQueue constructs a new workqueue with delayed queuing ability +func NewDelayingQueue() DelayingInterface { + return newDelayingQueue(clock.RealClock{}, "") +} + +func NewNamedDelayingQueue(name string) DelayingInterface { + return newDelayingQueue(clock.RealClock{}, name) +} + +func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { + ret := &delayingType{ + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), + deprecatedMetrics: newDeprecatedRetryMetrics(name), + } + + go ret.waitingLoop() + + return ret +} + +// delayingType wraps an Interface and provides delayed re-enquing +type delayingType struct { + Interface + + // clock tracks time for delayed firing + clock clock.Clock + + // stopCh lets us signal a shutdown to the waiting loop + stopCh chan struct{} + // stopOnce guarantees we only signal shutdown a single time + stopOnce sync.Once + + // heartbeat ensures we wait no more than maxWait before firing + heartbeat clock.Ticker + + // waitingForAddCh is a buffered channel that feeds waitingForAdd + waitingForAddCh chan *waitFor + + // metrics counts the number of retries + metrics retryMetrics + deprecatedMetrics retryMetrics +} + +// waitFor holds the data to add and the time it should be added +type waitFor struct { + data t + readyAt time.Time + // index in the priority queue (heap) + index int +} + +// waitForPriorityQueue implements a priority queue for waitFor items. +// +// waitForPriorityQueue implements heap.Interface. The item occurring next in +// time (i.e., the item with the smallest readyAt) is at the root (index 0). +// Peek returns this minimum item at index 0. Pop returns the minimum item after +// it has been removed from the queue and placed at index Len()-1 by +// container/heap. Push adds an item at index Len(), and container/heap +// percolates it into the correct location. +type waitForPriorityQueue []*waitFor + +func (pq waitForPriorityQueue) Len() int { + return len(pq) +} +func (pq waitForPriorityQueue) Less(i, j int) bool { + return pq[i].readyAt.Before(pq[j].readyAt) +} +func (pq waitForPriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push adds an item to the queue. Push should not be called directly; instead, +// use `heap.Push`. +func (pq *waitForPriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*waitFor) + item.index = n + *pq = append(*pq, item) +} + +// Pop removes an item from the queue. Pop should not be called directly; +// instead, use `heap.Pop`. +func (pq *waitForPriorityQueue) Pop() interface{} { + n := len(*pq) + item := (*pq)[n-1] + item.index = -1 + *pq = (*pq)[0:(n - 1)] + return item +} + +// Peek returns the item at the beginning of the queue, without removing the +// item or otherwise mutating the queue. It is safe to call directly. +func (pq waitForPriorityQueue) Peek() interface{} { + return pq[0] +} + +// ShutDown stops the queue. After the queue drains, the returned shutdown bool +// on Get() will be true. This method may be invoked more than once. +func (q *delayingType) ShutDown() { + q.stopOnce.Do(func() { + q.Interface.ShutDown() + close(q.stopCh) + q.heartbeat.Stop() + }) +} + +// AddAfter adds the given item to the work queue after the given delay +func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { + // don't add if we're already shutting down + if q.ShuttingDown() { + return + } + + q.metrics.retry() + q.deprecatedMetrics.retry() + + // immediately add things with no delay + if duration <= 0 { + q.Add(item) + return + } + + select { + case <-q.stopCh: + // unblock if ShutDown() is called + case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + } +} + +// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. +// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an +// expired item sitting for more than 10 seconds. +const maxWait = 10 * time.Second + +// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. +func (q *delayingType) waitingLoop() { + defer utilruntime.HandleCrash() + + // Make a placeholder channel to use when there are no items in our list + never := make(<-chan time.Time) + + waitingForQueue := &waitForPriorityQueue{} + heap.Init(waitingForQueue) + + waitingEntryByData := map[t]*waitFor{} + + for { + if q.Interface.ShuttingDown() { + return + } + + now := q.clock.Now() + + // Add ready entries + for waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) + if entry.readyAt.After(now) { + break + } + + entry = heap.Pop(waitingForQueue).(*waitFor) + q.Add(entry.data) + delete(waitingEntryByData, entry.data) + } + + // Set up a wait for the first item's readyAt (if one exists) + nextReadyAt := never + if waitingForQueue.Len() > 0 { + entry := waitingForQueue.Peek().(*waitFor) + nextReadyAt = q.clock.After(entry.readyAt.Sub(now)) + } + + select { + case <-q.stopCh: + return + + case <-q.heartbeat.C(): + // continue the loop, which will add ready items + + case <-nextReadyAt: + // continue the loop, which will add ready items + + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + + drained := false + for !drained { + select { + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + default: + drained = true + } + } + } + } +} + +// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue +func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { + // if the entry already exists, update the time only if it would cause the item to be queued sooner + existing, exists := knownEntries[entry.data] + if exists { + if existing.readyAt.After(entry.readyAt) { + existing.readyAt = entry.readyAt + heap.Fix(q, existing.index) + } + + return + } + + heap.Push(q, entry) + knownEntries[entry.data] = entry +} diff --git a/test/vendor/k8s.io/client-go/util/workqueue/doc.go b/test/vendor/k8s.io/client-go/util/workqueue/doc.go new file mode 100644 index 0000000000..2a00c74ac5 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package workqueue provides a simple queue that supports the following +// features: +// * Fair: items processed in the order in which they are added. +// * Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// * Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +// * Shutdown notifications. +package workqueue diff --git a/test/vendor/k8s.io/client-go/util/workqueue/metrics.go b/test/vendor/k8s.io/client-go/util/workqueue/metrics.go new file mode 100644 index 0000000000..be23ddd05f --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -0,0 +1,334 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" +) + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +type queueMetrics interface { + add(item t) + get(item t) + done(item t) + updateUnfinishedWork() +} + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Inc() + Dec() +} + +// SettableGaugeMetric represents a single numerical value that can arbitrarily go up +// and down. (Separate from GaugeMetric to preserve backwards compatibility.) +type SettableGaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +// HistogramMetric counts individual observations. +type HistogramMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Observe(float64) {} + +// defaultQueueMetrics expects the caller to lock before setting any metrics. +type defaultQueueMetrics struct { + clock clock.Clock + + // current depth of a workqueue + depth GaugeMetric + // total number of adds handled by a workqueue + adds CounterMetric + // how long an item stays in a workqueue + latency HistogramMetric + // how long processing an item from a workqueue takes + workDuration HistogramMetric + addTimes map[t]time.Time + processingStartTimes map[t]time.Time + + // how long have current threads been working? + unfinishedWorkSeconds SettableGaugeMetric + longestRunningProcessor SettableGaugeMetric + + // TODO(danielqsj): Remove the following metrics, they are deprecated + deprecatedDepth GaugeMetric + deprecatedAdds CounterMetric + deprecatedLatency SummaryMetric + deprecatedWorkDuration SummaryMetric + deprecatedUnfinishedWorkSeconds SettableGaugeMetric + deprecatedLongestRunningProcessor SettableGaugeMetric +} + +func (m *defaultQueueMetrics) add(item t) { + if m == nil { + return + } + + m.adds.Inc() + m.deprecatedAdds.Inc() + m.depth.Inc() + m.deprecatedDepth.Inc() + if _, exists := m.addTimes[item]; !exists { + m.addTimes[item] = m.clock.Now() + } +} + +func (m *defaultQueueMetrics) get(item t) { + if m == nil { + return + } + + m.depth.Dec() + m.deprecatedDepth.Dec() + m.processingStartTimes[item] = m.clock.Now() + if startTime, exists := m.addTimes[item]; exists { + m.latency.Observe(m.sinceInSeconds(startTime)) + m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime)) + delete(m.addTimes, item) + } +} + +func (m *defaultQueueMetrics) done(item t) { + if m == nil { + return + } + + if startTime, exists := m.processingStartTimes[item]; exists { + m.workDuration.Observe(m.sinceInSeconds(startTime)) + m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime)) + delete(m.processingStartTimes, item) + } +} + +func (m *defaultQueueMetrics) updateUnfinishedWork() { + // Note that a summary metric would be better for this, but prometheus + // doesn't seem to have non-hacky ways to reset the summary metrics. + var total float64 + var oldest float64 + for _, t := range m.processingStartTimes { + age := m.sinceInMicroseconds(t) + total += age + if age > oldest { + oldest = age + } + } + // Convert to seconds; microseconds is unhelpfully granular for this. + total /= 1000000 + m.unfinishedWorkSeconds.Set(total) + m.deprecatedUnfinishedWorkSeconds.Set(total) + m.longestRunningProcessor.Set(oldest / 1000000) + m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds. +} + +type noMetrics struct{} + +func (noMetrics) add(item t) {} +func (noMetrics) get(item t) {} +func (noMetrics) done(item t) {} +func (noMetrics) updateUnfinishedWork() {} + +// Gets the time since the specified start in microseconds. +func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 { + return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} + +// Gets the time since the specified start in seconds. +func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 { + return m.clock.Since(start).Seconds() +} + +type retryMetrics interface { + retry() +} + +type defaultRetryMetrics struct { + retries CounterMetric +} + +func (m *defaultRetryMetrics) retry() { + if m == nil { + return + } + + m.retries.Inc() +} + +// MetricsProvider generates various metrics used by the queue. +type MetricsProvider interface { + NewDepthMetric(name string) GaugeMetric + NewAddsMetric(name string) CounterMetric + NewLatencyMetric(name string) HistogramMetric + NewWorkDurationMetric(name string) HistogramMetric + NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric + NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric + NewRetriesMetric(name string) CounterMetric + NewDeprecatedDepthMetric(name string) GaugeMetric + NewDeprecatedAddsMetric(name string) CounterMetric + NewDeprecatedLatencyMetric(name string) SummaryMetric + NewDeprecatedWorkDurationMetric(name string) SummaryMetric + NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric + NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric + NewDeprecatedRetriesMetric(name string) CounterMetric +} + +type noopMetricsProvider struct{} + +func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric { + return noopMetric{} +} + +var globalMetricsFactory = queueMetricsFactory{ + metricsProvider: noopMetricsProvider{}, +} + +type queueMetricsFactory struct { + metricsProvider MetricsProvider + + onlyOnce sync.Once +} + +func (f *queueMetricsFactory) setProvider(mp MetricsProvider) { + f.onlyOnce.Do(func() { + f.metricsProvider = mp + }) +} + +func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics { + mp := f.metricsProvider + if len(name) == 0 || mp == (noopMetricsProvider{}) { + return noMetrics{} + } + return &defaultQueueMetrics{ + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + deprecatedDepth: mp.NewDeprecatedDepthMetric(name), + deprecatedAdds: mp.NewDeprecatedAddsMetric(name), + deprecatedLatency: mp.NewDeprecatedLatencyMetric(name), + deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name), + deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name), + deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, + } +} + +func newRetryMetrics(name string) retryMetrics { + var ret *defaultRetryMetrics + if len(name) == 0 { + return ret + } + return &defaultRetryMetrics{ + retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), + } +} + +func newDeprecatedRetryMetrics(name string) retryMetrics { + var ret *defaultRetryMetrics + if len(name) == 0 { + return ret + } + return &defaultRetryMetrics{ + retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name), + } +} + +// SetProvider sets the metrics provider for all subsequently created work +// queues. Only the first call has an effect. +func SetProvider(metricsProvider MetricsProvider) { + globalMetricsFactory.setProvider(metricsProvider) +} diff --git a/test/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/test/vendor/k8s.io/client-go/util/workqueue/parallelizer.go new file mode 100644 index 0000000000..5928a0c5b7 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "context" + "sync" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +type DoWorkPieceFunc func(piece int) + +// ParallelizeUntil is a framework that allows for parallelizing N +// independent pieces of work until done or the context is canceled. +func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) { + var stop <-chan struct{} + if ctx != nil { + stop = ctx.Done() + } + + toProcess := make(chan int, pieces) + for i := 0; i < pieces; i++ { + toProcess <- i + } + close(toProcess) + + if pieces < workers { + workers = pieces + } + + wg := sync.WaitGroup{} + wg.Add(workers) + for i := 0; i < workers; i++ { + go func() { + defer utilruntime.HandleCrash() + defer wg.Done() + for piece := range toProcess { + select { + case <-stop: + return + default: + doWorkPiece(piece) + } + } + }() + } + wg.Wait() +} diff --git a/test/vendor/k8s.io/client-go/util/workqueue/queue.go b/test/vendor/k8s.io/client-go/util/workqueue/queue.go new file mode 100644 index 0000000000..39009b8e79 --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -0,0 +1,212 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" +) + +type Interface interface { + Add(item interface{}) + Len() int + Get() (item interface{}, shutdown bool) + Done(item interface{}) + ShutDown() + ShuttingDown() bool +} + +// New constructs a new work queue (see the package comment). +func New() *Type { + return NewNamed("") +} + +func NewNamed(name string) *Type { + rc := clock.RealClock{} + return newQueue( + rc, + globalMetricsFactory.newQueueMetrics(name, rc), + defaultUnfinishedWorkUpdatePeriod, + ) +} + +func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type { + t := &Type{ + clock: c, + dirty: set{}, + processing: set{}, + cond: sync.NewCond(&sync.Mutex{}), + metrics: metrics, + unfinishedWorkUpdatePeriod: updatePeriod, + } + go t.updateUnfinishedWorkLoop() + return t +} + +const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond + +// Type is a work queue (see the package comment). +type Type struct { + // queue defines the order in which we will work on items. Every + // element of queue should be in the dirty set and not in the + // processing set. + queue []t + + // dirty defines all of the items that need to be processed. + dirty set + + // Things that are currently being processed are in the processing set. + // These things may be simultaneously in the dirty set. When we finish + // processing something and remove it from this set, we'll check if + // it's in the dirty set, and if so, add it to the queue. + processing set + + cond *sync.Cond + + shuttingDown bool + + metrics queueMetrics + + unfinishedWorkUpdatePeriod time.Duration + clock clock.Clock +} + +type empty struct{} +type t interface{} +type set map[t]empty + +func (s set) has(item t) bool { + _, exists := s[item] + return exists +} + +func (s set) insert(item t) { + s[item] = empty{} +} + +func (s set) delete(item t) { + delete(s, item) +} + +// Add marks item as needing processing. +func (q *Type) Add(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if q.shuttingDown { + return + } + if q.dirty.has(item) { + return + } + + q.metrics.add(item) + + q.dirty.insert(item) + if q.processing.has(item) { + return + } + + q.queue = append(q.queue, item) + q.cond.Signal() +} + +// Len returns the current queue length, for informational purposes only. You +// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular +// value, that can't be synchronized properly. +func (q *Type) Len() int { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return len(q.queue) +} + +// Get blocks until it can return an item to be processed. If shutdown = true, +// the caller should end their goroutine. You must call Done with item when you +// have finished processing it. +func (q *Type) Get() (item interface{}, shutdown bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + for len(q.queue) == 0 && !q.shuttingDown { + q.cond.Wait() + } + if len(q.queue) == 0 { + // We must be shutting down. + return nil, true + } + + item, q.queue = q.queue[0], q.queue[1:] + + q.metrics.get(item) + + q.processing.insert(item) + q.dirty.delete(item) + + return item, false +} + +// Done marks item as done processing, and if it has been marked as dirty again +// while it was being processed, it will be re-added to the queue for +// re-processing. +func (q *Type) Done(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.metrics.done(item) + + q.processing.delete(item) + if q.dirty.has(item) { + q.queue = append(q.queue, item) + q.cond.Signal() + } +} + +// ShutDown will cause q to ignore all new items added to it. As soon as the +// worker goroutines have drained the existing items in the queue, they will be +// instructed to exit. +func (q *Type) ShutDown() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.shuttingDown = true + q.cond.Broadcast() +} + +func (q *Type) ShuttingDown() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + return q.shuttingDown +} + +func (q *Type) updateUnfinishedWorkLoop() { + t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) + defer t.Stop() + for range t.C() { + if !func() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if !q.shuttingDown { + q.metrics.updateUnfinishedWork() + return true + } + return false + + }() { + return + } + } +} diff --git a/test/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/test/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go new file mode 100644 index 0000000000..8321876acf --- /dev/null +++ b/test/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +// RateLimitingInterface is an interface that rate limits items being added to the queue. +type RateLimitingInterface interface { + DelayingInterface + + // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok + AddRateLimited(item interface{}) + + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing + // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you + // still have to call `Done` on the queue. + Forget(item interface{}) + + // NumRequeues returns back how many times the item was requeued + NumRequeues(item interface{}) int +} + +// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewDelayingQueue(), + rateLimiter: rateLimiter, + } +} + +func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewNamedDelayingQueue(name), + rateLimiter: rateLimiter, + } +} + +// rateLimitingType wraps an Interface and provides rateLimited re-enquing +type rateLimitingType struct { + DelayingInterface + + rateLimiter RateLimiter +} + +// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok +func (q *rateLimitingType) AddRateLimited(item interface{}) { + q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +} + +func (q *rateLimitingType) NumRequeues(item interface{}) int { + return q.rateLimiter.NumRequeues(item) +} + +func (q *rateLimitingType) Forget(item interface{}) { + q.rateLimiter.Forget(item) +} diff --git a/test/vendor/knative.dev/pkg/apis/condition_set.go b/test/vendor/knative.dev/pkg/apis/condition_set.go index 7f1917e89c..eba01e94b8 100644 --- a/test/vendor/knative.dev/pkg/apis/condition_set.go +++ b/test/vendor/knative.dev/pkg/apis/condition_set.go @@ -52,6 +52,9 @@ type ConditionManager interface { // set to true. IsHappy() bool + // GetTopLevelCondition finds and returns the top level Condition (happy Condition). + GetTopLevelCondition() *Condition + // GetCondition finds and returns the Condition that matches the ConditionType // previously set on Conditions. GetCondition(t ConditionType) *Condition @@ -139,13 +142,15 @@ func (r ConditionSet) Manage(status ConditionsAccessor) ConditionManager { } } -// IsHappy looks at the happy condition and returns true if that condition is +// IsHappy looks at the top level Condition (happy Condition) and returns true if that condition is // set to true. func (r conditionsImpl) IsHappy() bool { - if c := r.GetCondition(r.happy); c == nil || !c.IsTrue() { - return false - } - return true + return r.GetTopLevelCondition().IsTrue() +} + +// GetTopLevelCondition finds and returns the top level Condition (happy Condition). +func (r conditionsImpl) GetTopLevelCondition() *Condition { + return r.GetCondition(r.happy) } // GetCondition finds and returns the Condition that matches the ConditionType diff --git a/test/vendor/knative.dev/pkg/apis/condition_types.go b/test/vendor/knative.dev/pkg/apis/condition_types.go index c90438648b..a3d3f2b2f6 100644 --- a/test/vendor/knative.dev/pkg/apis/condition_types.go +++ b/test/vendor/knative.dev/pkg/apis/condition_types.go @@ -55,7 +55,6 @@ const ( // Conditions defines a readiness condition for a Knative resource. // See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties // +k8s:deepcopy-gen=true -// +k8s:openapi-gen=true type Condition struct { // Type of condition. // +required diff --git a/test/vendor/github.com/knative/pkg/apis/duck/cached.go b/test/vendor/knative.dev/pkg/apis/duck/cached.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/cached.go rename to test/vendor/knative.dev/pkg/apis/duck/cached.go diff --git a/test/vendor/knative.dev/pkg/apis/duck/const.go b/test/vendor/knative.dev/pkg/apis/duck/const.go new file mode 100644 index 0000000000..a64576ae06 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/const.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package duck + +const ( + // BindingExcludeLabel is a label that is placed on namespaces and + // resources to exclude them from consideration when binding things. + // It is critical that bindings dealing with Deployments label their + // controller Deployment (or enclosing namespace). If you do not + // specify this label, they are considered for binding (i.e. you opt-in + // to getting everything considered for bindings). This is the default. + BindingExcludeLabel = "bindings.knative.dev/exclude" + + // BindingIncludeLabel is a label that is placed on namespaces and + // resources to include them in consideration when binding things. + // This means that you have to explicitly label the namespaces/resources + // for consideration for bindings. + BindingIncludeLabel = "bindings.knative.dev/include" +) diff --git a/test/vendor/github.com/knative/pkg/apis/duck/doc.go b/test/vendor/knative.dev/pkg/apis/duck/doc.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/doc.go rename to test/vendor/knative.dev/pkg/apis/duck/doc.go diff --git a/test/vendor/github.com/knative/pkg/apis/duck/enqueue.go b/test/vendor/knative.dev/pkg/apis/duck/enqueue.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/enqueue.go rename to test/vendor/knative.dev/pkg/apis/duck/enqueue.go diff --git a/test/vendor/knative.dev/pkg/apis/duck/interface.go b/test/vendor/knative.dev/pkg/apis/duck/interface.go new file mode 100644 index 0000000000..99cf4ac29c --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/interface.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package duck + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/tracker" +) + +// InformerFactory is used to create Informer/Lister pairs for a schema.GroupVersionResource +type InformerFactory interface { + // Get returns a synced Informer/Lister pair for the provided schema.GroupVersionResource. + Get(schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error) +} + +// OneOfOurs is the union of our Accessor interface and the OwnerRefable interface +// that is implemented by our resources that implement the kmeta.Accessor. +type OneOfOurs interface { + kmeta.Accessor + kmeta.OwnerRefable +} + +// BindableStatus is the interface that the .status of Bindable resources must +// implement to work smoothly with our BaseReconciler. +type BindableStatus interface { + // InitializeConditions seeds the resource's status.conditions field + // with all of the conditions that this Binding surfaces. + InitializeConditions() + + // MarkBindingAvailable notes that this Binding has been properly + // configured. + MarkBindingAvailable() + + // MarkBindingUnavailable notes the provided reason for why the Binding + // has failed. + MarkBindingUnavailable(reason string, message string) + + // SetObservedGeneration updates the .status.observedGeneration to the + // provided generation value. + SetObservedGeneration(int64) +} + +// Bindable may be implemented by Binding resources to use shared libraries. +type Bindable interface { + OneOfOurs + + // GetSubject returns the standard Binding duck's "Subject" field. + GetSubject() tracker.Reference + + // GetBindingStatus returns the status of the Binding, which must + // implement BindableStatus. + GetBindingStatus() BindableStatus +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/patch.go b/test/vendor/knative.dev/pkg/apis/duck/patch.go similarity index 75% rename from test/vendor/github.com/knative/pkg/apis/duck/patch.go rename to test/vendor/knative.dev/pkg/apis/duck/patch.go index 386aa1f32e..d4a01d1aab 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/patch.go +++ b/test/vendor/knative.dev/pkg/apis/duck/patch.go @@ -37,6 +37,8 @@ func marshallBeforeAfter(before, after interface{}) ([]byte, []byte, error) { return rawBefore, rawAfter, nil } +// CreateMergePatch creates a json merge patch as specified in +// http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 func CreateMergePatch(before, after interface{}) ([]byte, error) { rawBefore, rawAfter, err := marshallBeforeAfter(before, after) if err != nil { @@ -45,6 +47,17 @@ func CreateMergePatch(before, after interface{}) ([]byte, error) { return jsonmergepatch.CreateMergePatch(rawBefore, rawAfter) } +// CreateBytePatch is a helper function that creates the same content as +// CreatePatch, but returns in []byte format instead of JSONPatch. +func CreateBytePatch(before, after interface{}) ([]byte, error) { + patch, err := CreatePatch(before, after) + if err != nil { + return nil, err + } + return patch.MarshalJSON() +} + +// CreatePatch creates a patch as specified in http://jsonpatch.com/ func CreatePatch(before, after interface{}) (JSONPatch, error) { rawBefore, rawAfter, err := marshallBeforeAfter(before, after) if err != nil { diff --git a/test/vendor/github.com/knative/pkg/apis/duck/proxy.go b/test/vendor/knative.dev/pkg/apis/duck/proxy.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/proxy.go rename to test/vendor/knative.dev/pkg/apis/duck/proxy.go diff --git a/test/vendor/github.com/knative/pkg/apis/duck/register.go b/test/vendor/knative.dev/pkg/apis/duck/register.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/register.go rename to test/vendor/knative.dev/pkg/apis/duck/register.go diff --git a/test/vendor/github.com/knative/pkg/apis/duck/typed.go b/test/vendor/knative.dev/pkg/apis/duck/typed.go similarity index 92% rename from test/vendor/github.com/knative/pkg/apis/duck/typed.go rename to test/vendor/knative.dev/pkg/apis/duck/typed.go index 397f3a8749..5e1644a939 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/typed.go +++ b/test/vendor/knative.dev/pkg/apis/duck/typed.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/cache" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // TypedInformerFactory implements InformerFactory such that the elements @@ -46,6 +46,12 @@ var _ InformerFactory = (*TypedInformerFactory)(nil) // Get implements InformerFactory. func (dif *TypedInformerFactory) Get(gvr schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error) { + // Avoid error cases, like the GVR does not exist. + // It is not a full check. Some RBACs might sneak by, but the window is very small. + if _, err := dif.Client.Resource(gvr).List(metav1.ListOptions{}); err != nil { + return nil, nil, err + } + listObj := dif.Type.GetListType() lw := &cache.ListWatch{ ListFunc: asStructuredLister(dif.Client.Resource(gvr).List, listObj), @@ -60,7 +66,7 @@ func (dif *TypedInformerFactory) Get(gvr schema.GroupVersionResource) (cache.Sha go inf.Run(dif.StopChannel) if ok := cache.WaitForCacheSync(dif.StopChannel, inf.HasSynced); !ok { - return nil, nil, fmt.Errorf("Failed starting shared index informer for %v with type %T", gvr, dif.Type) + return nil, nil, fmt.Errorf("failed starting shared index informer for %v with type %T", gvr, dif.Type) } return inf, lister, nil diff --git a/test/vendor/knative.dev/pkg/apis/duck/unstructured.go b/test/vendor/knative.dev/pkg/apis/duck/unstructured.go new file mode 100644 index 0000000000..3a80390ddb --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/unstructured.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package duck + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// ToUnstructured takes an instance of a OneOfOurs compatible type and +// converts it to unstructured.Unstructured. We take OneOfOurs in place +// or runtime.Object because sometimes we get resources that do not have their +// TypeMeta populated but that is required for unstructured.Unstructured to +// deserialize things, so we leverage our content-agnostic GroupVersionKind() +// method to populate this as-needed (in a copy, so that we don't modify the +// informer's copy, if that is what we are passed). +func ToUnstructured(desired OneOfOurs) (*unstructured.Unstructured, error) { + // If the TypeMeta is not populated, then unmarshalling will fail, so ensure + // the TypeMeta is populated. See also EnsureTypeMeta. + if gvk := desired.GroupVersionKind(); gvk.Version == "" || gvk.Kind == "" { + gvk = desired.GetGroupVersionKind() + desired = desired.DeepCopyObject().(OneOfOurs) + desired.SetGroupVersionKind(gvk) + } + + // Convert desired to unstructured.Unstructured + b, err := json.Marshal(desired) + if err != nil { + return nil, err + } + ud := &unstructured.Unstructured{} + if err := json.Unmarshal(b, ud); err != nil { + return nil, err + } + return ud, nil +} + +// FromUnstructured takes unstructured object from (say from client-go/dynamic) and +// converts it into our duck types. +func FromUnstructured(obj json.Marshaler, target interface{}) error { + // Use the unstructured marshaller to ensure it's proper JSON + raw, err := obj.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(raw, &target) +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/addressable_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go similarity index 79% rename from test/vendor/github.com/knative/pkg/apis/duck/v1beta1/addressable_types.go rename to test/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go index 379098e7f2..e5955aeb69 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/addressable_types.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go @@ -14,19 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( + "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) +// +genduck + // Addressable provides a generic mechanism for a custom resource // definition to indicate a destination for message delivery. - +// // Addressable is the schema for the destination information. This is // typically stored in the object's `status`, as this information may // be generated by the controller. @@ -34,10 +39,13 @@ type Addressable struct { URL *apis.URL `json:"url,omitempty"` } -// Addressable is an Implementable "duck type". -var _ duck.Implementable = (*Addressable)(nil) +var ( + // Addressable is an Implementable "duck type". + _ duck.Implementable = (*Addressable)(nil) + // Addressable is a Convertible type. + _ apis.Convertible = (*Addressable)(nil) +) -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AddressableType is a skeleton type wrapping Addressable in the manner we expect @@ -68,6 +76,16 @@ func (*Addressable) GetFullType() duck.Populatable { return &AddressableType{} } +// ConvertUp implements apis.Convertible +func (a *Addressable) ConvertUp(ctx context.Context, to apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", to) +} + +// ConvertDown implements apis.Convertible +func (a *Addressable) ConvertDown(ctx context.Context, from apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", from) +} + // Populate implements duck.Populatable func (t *AddressableType) Populate() { t.Status = AddressStatus{ diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/destination.go b/test/vendor/knative.dev/pkg/apis/duck/v1/destination.go new file mode 100644 index 0000000000..136e757aea --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/destination.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" +) + +// Destination represents a target of an invocation over HTTP. +type Destination struct { + // Ref points to an Addressable. + // +optional + Ref *corev1.ObjectReference `json:"ref,omitempty"` + + // URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. + // +optional + URI *apis.URL `json:"uri,omitempty"` +} + +func (dest *Destination) Validate(ctx context.Context) *apis.FieldError { + if dest == nil { + return nil + } + return ValidateDestination(*dest).ViaField(apis.CurrentField) +} + +// ValidateDestination validates Destination. +func ValidateDestination(dest Destination) *apis.FieldError { + var ref *corev1.ObjectReference + if dest.Ref != nil { + ref = dest.Ref + } + if ref == nil && dest.URI == nil { + return apis.ErrGeneric("expected at least one, got none", "ref", "uri") + } + + if ref != nil && dest.URI != nil && dest.URI.URL().IsAbs() { + return apis.ErrGeneric("Absolute URI is not allowed when Ref or [apiVersion, kind, name] is present", "[apiVersion, kind, name]", "ref", "uri") + } + // IsAbs() check whether the URL has a non-empty scheme. Besides the non-empty scheme, we also require dest.URI has a non-empty host + if ref == nil && dest.URI != nil && (!dest.URI.URL().IsAbs() || dest.URI.Host == "") { + return apis.ErrInvalidValue("Relative URI is not allowed when Ref and [apiVersion, kind, name] is absent", "uri") + } + if ref != nil && dest.URI == nil { + if dest.Ref != nil { + return validateDestinationRef(*ref).ViaField("ref") + } + } + return nil +} + +// GetRef gets the ObjectReference from this Destination, if one is present. If no ref is present, +// then nil is returned. +func (dest *Destination) GetRef() *corev1.ObjectReference { + if dest == nil { + return nil + } + return dest.Ref +} + +func validateDestinationRef(ref corev1.ObjectReference) *apis.FieldError { + // Check the object. + var errs *apis.FieldError + // Required Fields + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) + } + if ref.APIVersion == "" { + errs = errs.Also(apis.ErrMissingField("apiVersion")) + } + if ref.Kind == "" { + errs = errs.Also(apis.ErrMissingField("kind")) + } + + return errs +} diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/doc.go b/test/vendor/knative.dev/pkg/apis/duck/v1/doc.go new file mode 100644 index 0000000000..161005b080 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource + +// +k8s:deepcopy-gen=package +// +groupName=duck.knative.dev +package v1 diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/podspec_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1/podspec_types.go new file mode 100644 index 0000000000..0dd9ec3386 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/podspec_types.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" +) + +// +genduck + +// PodSpecable is implemented by types containing a PodTemplateSpec +// in the manner of ReplicaSet, Deployment, DaemonSet, StatefulSet. +type PodSpecable corev1.PodTemplateSpec + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WithPod is the shell that demonstrates how PodSpecable types wrap +// a PodSpec. +type WithPod struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WithPodSpec `json:"spec,omitempty"` +} + +// WithPodSpec is the shell around the PodSpecable within WithPod. +type WithPodSpec struct { + Template PodSpecable `json:"template,omitempty"` +} + +// Assert that we implement the interfaces necessary to +// use duck.VerifyType. +var ( + _ duck.Populatable = (*WithPod)(nil) + _ duck.Implementable = (*PodSpecable)(nil) + _ apis.Listable = (*WithPod)(nil) +) + +// GetFullType implements duck.Implementable +func (*PodSpecable) GetFullType() duck.Populatable { + return &WithPod{} +} + +// Populate implements duck.Populatable +func (t *WithPod) Populate() { + t.Spec.Template = PodSpecable{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "container-name", + Image: "container-image:latest", + }}, + }, + } +} + +// GetListType implements apis.Listable +func (*WithPod) GetListType() runtime.Object { + return &WithPodList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WithPodList is a list of WithPod resources +type WithPodList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []WithPod `json:"items"` +} diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/register.go b/test/vendor/knative.dev/pkg/apis/duck/v1/register.go new file mode 100644 index 0000000000..e3af46d6f8 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: duck.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes( + SchemeGroupVersion, + &KResource{}, + (&KResource{}).GetListType(), + &AddressableType{}, + (&AddressableType{}).GetListType(), + &Source{}, + (&Source{}).GetListType(), + &WithPod{}, + (&WithPod{}).GetListType(), + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/source_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1/source_types.go new file mode 100644 index 0000000000..321156d731 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/source_types.go @@ -0,0 +1,156 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" +) + +// Source is an Implementable "duck type". +var _ duck.Implementable = (*Source)(nil) + +// +genduck +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Source is the minimum resource shape to adhere to the Source Specification. +// This duck type is intended to allow implementors of Sources and +// Importers to verify their own resources meet the expectations. +// This is not a real resource. +// NOTE: The Source Specification is in progress and the shape and names could +// be modified until it has been accepted. +type Source struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SourceSpec `json:"spec"` + Status SourceStatus `json:"status"` +} + +type SourceSpec struct { + // Sink is a reference to an object that will resolve to a domain name or a + // URI directly to use as the sink. + Sink Destination `json:"sink,omitempty"` + + // CloudEventOverrides defines overrides to control the output format and + // modifications of the event sent to the sink. + // +optional + CloudEventOverrides *CloudEventOverrides `json:"ceOverrides,omitempty"` +} + +// CloudEventOverrides defines arguments for a Source that control the output +// format of the CloudEvents produced by the Source. +type CloudEventOverrides struct { + // Extensions specify what attribute are added or overridden on the + // outbound event. Each `Extensions` key-value pair are set on the event as + // an attribute extension independently. + // +optional + Extensions map[string]string `json:"extensions,omitempty"` +} + +// SourceStatus shows how we expect folks to embed Addressable in +// their Status field. +type SourceStatus struct { + // inherits duck/v1beta1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last + // processed by the controller. + // * Conditions - the latest available observations of a resource's current + // state. + Status `json:",inline"` + + // SinkURI is the current active sink URI that has been configured for the + // Source. + // +optional + SinkURI *apis.URL `json:"sinkUri,omitempty"` +} + +// IsReady returns true if the resource is ready overall. +func (ss *SourceStatus) IsReady() bool { + for _, c := range ss.Conditions { + switch c.Type { + // Look for the "happy" condition, which is the only condition that + // we can reliably understand to be the overall state of the resource. + case apis.ConditionReady, apis.ConditionSucceeded: + return c.IsTrue() + } + } + return false +} + +var ( + // Verify Source resources meet duck contracts. + _ duck.Populatable = (*Source)(nil) + _ apis.Listable = (*Source)(nil) +) + +const ( + // SourceConditionSinkProvided has status True when the Source + // has been configured with a sink target that is resolvable. + SourceConditionSinkProvided apis.ConditionType = "SinkProvided" +) + +// GetFullType implements duck.Implementable +func (*Source) GetFullType() duck.Populatable { + return &Source{} +} + +// Populate implements duck.Populatable +func (s *Source) Populate() { + s.Spec.Sink = Destination{ + URI: &apis.URL{ + Scheme: "https", + Host: "tableflip.dev", + RawQuery: "flip=mattmoor", + }, + } + s.Spec.CloudEventOverrides = &CloudEventOverrides{ + Extensions: map[string]string{"boosh": "kakow"}, + } + s.Status.ObservedGeneration = 42 + s.Status.Conditions = Conditions{{ + // Populate ALL fields + Type: SourceConditionSinkProvided, + Status: corev1.ConditionTrue, + LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))}, + }} + s.Status.SinkURI = &apis.URL{ + Scheme: "https", + Host: "tableflip.dev", + RawQuery: "flip=mattmoor", + } +} + +// GetListType implements apis.Listable +func (*Source) GetListType() runtime.Object { + return &SourceList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SourceList is a list of Source resources +type SourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Source `json:"items"` +} diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/status_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1/status_types.go new file mode 100644 index 0000000000..c2cb989a19 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/status_types.go @@ -0,0 +1,141 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" +) + +// +genduck + +// Conditions is a simple wrapper around apis.Conditions to implement duck.Implementable. +type Conditions apis.Conditions + +// Conditions is an Implementable "duck type". +var _ duck.Implementable = (*Conditions)(nil) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KResource is a skeleton type wrapping Conditions in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize Conditions ObjectReferences and +// access the Conditions data. This is not a real resource. +type KResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status Status `json:"status"` +} + +// Status shows how we expect folks to embed Conditions in +// their Status field. +// WARNING: Adding fields to this struct will add them to all Knative resources. +type Status struct { + // ObservedGeneration is the 'Generation' of the Service that + // was last processed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions the latest available observations of a resource's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions Conditions `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +var _ apis.ConditionsAccessor = (*Status)(nil) + +// GetConditions implements apis.ConditionsAccessor +func (s *Status) GetConditions() apis.Conditions { + return apis.Conditions(s.Conditions) +} + +// SetConditions implements apis.ConditionsAccessor +func (s *Status) SetConditions(c apis.Conditions) { + s.Conditions = Conditions(c) +} + +// In order for Conditions to be Implementable, KResource must be Populatable. +var _ duck.Populatable = (*KResource)(nil) + +// Ensure KResource satisfies apis.Listable +var _ apis.Listable = (*KResource)(nil) + +// GetFullType implements duck.Implementable +func (*Conditions) GetFullType() duck.Populatable { + return &KResource{} +} + +// GetCondition fetches the condition of the specified type. +func (s *Status) GetCondition(t apis.ConditionType) *apis.Condition { + for _, cond := range s.Conditions { + if cond.Type == t { + return &cond + } + } + return nil +} + +// ConvertTo helps implement apis.Convertible for types embedding this Status. +func (source *Status) ConvertTo(ctx context.Context, sink *Status) { + sink.ObservedGeneration = source.ObservedGeneration + for _, c := range source.Conditions { + switch c.Type { + // Copy over the "happy" condition, which is the only condition that + // we can reliably transfer. + case apis.ConditionReady, apis.ConditionSucceeded: + sink.SetConditions(apis.Conditions{c}) + return + } + } +} + +// Populate implements duck.Populatable +func (t *KResource) Populate() { + t.Status.ObservedGeneration = 42 + t.Status.Conditions = Conditions{{ + // Populate ALL fields + Type: "Birthday", + Status: corev1.ConditionTrue, + LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))}, + Reason: "Celebrate", + Message: "n3wScott, find your party hat :tada:", + }} +} + +// GetListType implements apis.Listable +func (*KResource) GetListType() runtime.Object { + return &KResourceList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KResourceList is a list of KResource resources +type KResourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []KResource `json:"items"` +} diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..fd54ec6038 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go @@ -0,0 +1,483 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { + *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(Addressable) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressStatus. +func (in *AddressStatus) DeepCopy() *AddressStatus { + if in == nil { + return nil + } + out := new(AddressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Addressable) DeepCopyInto(out *Addressable) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Addressable. +func (in *Addressable) DeepCopy() *Addressable { + if in == nil { + return nil + } + out := new(Addressable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressableType) DeepCopyInto(out *AddressableType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableType. +func (in *AddressableType) DeepCopy() *AddressableType { + if in == nil { + return nil + } + out := new(AddressableType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressableType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AddressableType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressableTypeList. +func (in *AddressableTypeList) DeepCopy() *AddressableTypeList { + if in == nil { + return nil + } + out := new(AddressableTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AddressableTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudEventOverrides) DeepCopyInto(out *CloudEventOverrides) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventOverrides. +func (in *CloudEventOverrides) DeepCopy() *CloudEventOverrides { + if in == nil { + return nil + } + out := new(CloudEventOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + *out = new(corev1.ObjectReference) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KResource) DeepCopyInto(out *KResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResource. +func (in *KResource) DeepCopy() *KResource { + if in == nil { + return nil + } + out := new(KResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KResourceList) DeepCopyInto(out *KResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KResourceList. +func (in *KResourceList) DeepCopy() *KResourceList { + if in == nil { + return nil + } + out := new(KResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpecable) DeepCopyInto(out *PodSpecable) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpecable. +func (in *PodSpecable) DeepCopy() *PodSpecable { + if in == nil { + return nil + } + out := new(PodSpecable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Source) DeepCopyInto(out *Source) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Source. +func (in *Source) DeepCopy() *Source { + if in == nil { + return nil + } + out := new(Source) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Source) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceList) DeepCopyInto(out *SourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Source, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceList. +func (in *SourceList) DeepCopy() *SourceList { + if in == nil { + return nil + } + out := new(SourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { + *out = *in + in.Sink.DeepCopyInto(&out.Sink) + if in.CloudEventOverrides != nil { + in, out := &in.CloudEventOverrides, &out.CloudEventOverrides + *out = new(CloudEventOverrides) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec. +func (in *SourceSpec) DeepCopy() *SourceSpec { + if in == nil { + return nil + } + out := new(SourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStatus) DeepCopyInto(out *SourceStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.SinkURI != nil { + in, out := &in.SinkURI, &out.SinkURI + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStatus. +func (in *SourceStatus) DeepCopy() *SourceStatus { + if in == nil { + return nil + } + out := new(SourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithPod) DeepCopyInto(out *WithPod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithPod. +func (in *WithPod) DeepCopy() *WithPod { + if in == nil { + return nil + } + out := new(WithPod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WithPod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithPodList) DeepCopyInto(out *WithPodList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]WithPod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithPodList. +func (in *WithPodList) DeepCopy() *WithPodList { + if in == nil { + return nil + } + out := new(WithPodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WithPodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WithPodSpec) DeepCopyInto(out *WithPodSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WithPodSpec. +func (in *WithPodSpec) DeepCopy() *WithPodSpec { + if in == nil { + return nil + } + out := new(WithPodSpec) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/addressable_types.go similarity index 72% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/addressable_types.go index 75ab2f573e..05a8d91ae7 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/addressable_types.go @@ -17,17 +17,23 @@ limitations under the License. package v1alpha1 import ( + "context" + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" - "github.com/knative/pkg/apis/duck/v1beta1" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + v1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/apis/duck/v1beta1" ) +// +genduck + // Addressable provides a generic mechanism for a custom resource // definition to indicate a destination for message delivery. - +// // Addressable is the schema for the destination information. This is // typically stored in the object's `status`, as this information may // be generated by the controller. @@ -37,10 +43,13 @@ type Addressable struct { Hostname string `json:"hostname,omitempty"` } -// Addressable is an Implementable "duck type". -var _ duck.Implementable = (*Addressable)(nil) +var ( + // Addressable is an Implementable "duck type". + _ duck.Implementable = (*Addressable)(nil) + // Addressable is a Convertible type. + _ apis.Convertible = (*Addressable)(nil) +) -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AddressableType is a skeleton type wrapping Addressable in the manner we expect @@ -71,6 +80,35 @@ func (*Addressable) GetFullType() duck.Populatable { return &AddressableType{} } +// ConvertUp implements apis.Convertible +func (a *Addressable) ConvertUp(ctx context.Context, to apis.Convertible) error { + url := a.GetURL() + switch sink := to.(type) { + case *v1.Addressable: + sink.URL = url.DeepCopy() + return nil + case *v1beta1.Addressable: + sink.URL = url.DeepCopy() + return nil + default: + return fmt.Errorf("unknown version, got: %T", to) + } +} + +// ConvertDown implements apis.Convertible +func (a *Addressable) ConvertDown(ctx context.Context, from apis.Convertible) error { + switch source := from.(type) { + case *v1.Addressable: + a.URL = source.URL.DeepCopy() + return nil + case *v1beta1.Addressable: + a.URL = source.URL.DeepCopy() + return nil + default: + return fmt.Errorf("unknown version, got: %T", from) + } +} + // Populate implements duck.Populatable func (t *AddressableType) Populate() { t.Status = AddressStatus{ @@ -87,6 +125,7 @@ func (t *AddressableType) Populate() { } } +// GetURL returns the URL type for the Addressable. func (a Addressable) GetURL() apis.URL { if a.URL != nil { return *a.URL diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go new file mode 100644 index 0000000000..168439686c --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/binding_types.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/tracker" +) + +// +genduck +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding is a duck type that specifies the partial schema to which all +// Binding implementations should adhere. +type Binding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BindingSpec `json:"spec"` +} + +// Verify that Binding implements the appropriate interfaces. +var ( + _ duck.Implementable = (*Binding)(nil) + _ duck.Populatable = (*Binding)(nil) + _ apis.Listable = (*Binding)(nil) +) + +// BindingSpec specifies the spec portion of the Binding partial-schema. +type BindingSpec struct { + // Subject references the resource(s) whose "runtime contract" should be + // augmented by Binding implementations. + Subject tracker.Reference `json:"subject"` +} + +// GetFullType implements duck.Implementable +func (*Binding) GetFullType() duck.Populatable { + return &Binding{} +} + +// Populate implements duck.Populatable +func (t *Binding) Populate() { + t.Spec = BindingSpec{ + Subject: tracker.Reference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "default", + // Name and Selector are mutually exclusive, + // but we fill them both in for this test. + Name: "bazinga", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + }, + }, + } +} + +// GetListType implements apis.Listable +func (*Binding) GetListType() runtime.Object { + return &BindingList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BindingList is a list of Binding resources +type BindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Binding `json:"items"` +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/condition_set.go similarity index 99% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/condition_set.go index 72d4bf605d..04c1c7317d 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/condition_set.go @@ -23,9 +23,9 @@ import ( "fmt" - "github.com/knative/pkg/apis" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" ) // Conditions is the interface for a Resource that implements the getter and @@ -218,11 +218,7 @@ func (r conditionsImpl) isTerminal(t ConditionType) bool { } } - if t == r.happy { - return true - } - - return false + return t == r.happy } func (r conditionsImpl) severity(t ConditionType) ConditionSeverity { diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/conditions_types.go similarity index 97% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/conditions_types.go index b82de3c4c2..5093898c93 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/conditions_types.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/conditions_types.go @@ -23,10 +23,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) +// +genduck + // Conditions is the schema for the conditions portion of the payload type Conditions []Condition @@ -60,7 +62,7 @@ const ( ) // Conditions defines a readiness condition for a Knative resource. -// See: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#typical-status-properties +// See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties // +k8s:deepcopy-gen=true type Condition struct { // Type of condition. @@ -118,7 +120,6 @@ func (c *Condition) IsUnknown() bool { // Conditions is an Implementable "duck type". var _ duck.Implementable = (*Conditions)(nil) -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KResource is a skeleton type wrapping Conditions in the manner we expect diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/doc.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/doc.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/doc.go diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/legacy_targetable_types.go similarity index 97% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/legacy_targetable_types.go index 5e4d6f2e38..c8a32a7350 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/legacy_targetable_types.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/legacy_targetable_types.go @@ -20,10 +20,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) +// +genduck + // LegacyTargetable left around until we migrate to Addressable in the // dependent resources. Addressable has more structure in the way it // defines the fields. LegacyTargetable only assumed a single string @@ -32,7 +34,7 @@ import ( // This is to support existing resources until they migrate. // // Do not use this for anything new, use Addressable - +// // LegacyTargetable is the old schema for the addressable portion // of the payload // @@ -44,7 +46,6 @@ type LegacyTargetable struct { // LegacyTargetable is an Implementable "duck type". var _ duck.Implementable = (*LegacyTargetable)(nil) -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LegacyTarget is a skeleton type wrapping LegacyTargetable in the manner we diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/register.go similarity index 98% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/register.go index 4bb344f2ac..a66a2c38d4 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/register.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/register.go @@ -17,10 +17,10 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis/duck" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" ) // SchemeGroupVersion is group version used to register these objects diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/retired_targetable_types.go similarity index 97% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/retired_targetable_types.go index 0e91aef8ac..d8afb324a4 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/retired_targetable_types.go @@ -20,10 +20,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) +// +genduck + // Targetable is an earlier version of the Callable interface. // Callable is a higher-level interface which implements Addressable // but further promises that the destination may synchronously return @@ -31,7 +33,7 @@ import ( // // Targetable implementations should instead implement Addressable and // include an `eventing.knative.dev/returns=any` annotation. - +// // Targetable is retired; implement Addressable for now. type Targetable struct { DomainInternal string `json:"domainInternal,omitempty"` @@ -40,7 +42,6 @@ type Targetable struct { // Targetable is an Implementable "duck type". var _ duck.Implementable = (*Targetable)(nil) -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Target is a skeleton type wrapping Targetable in the manner we expect diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go similarity index 83% rename from test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go rename to test/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go index a59e67ce3f..2dd82428bf 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -93,7 +93,7 @@ func (in *AddressableType) DeepCopyObject() runtime.Object { func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AddressableType, len(*in)) @@ -122,6 +122,83 @@ func (in *AddressableTypeList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingList) DeepCopyInto(out *BindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Binding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingList. +func (in *BindingList) DeepCopy() *BindingList { + if in == nil { + return nil + } + out := new(BindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BindingSpec) DeepCopyInto(out *BindingSpec) { + *out = *in + in.Subject.DeepCopyInto(&out.Subject) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BindingSpec. +func (in *BindingSpec) DeepCopy() *BindingSpec { + if in == nil { + return nil + } + out := new(BindingSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Condition) DeepCopyInto(out *Condition) { *out = *in @@ -192,7 +269,7 @@ func (in *KResource) DeepCopyObject() runtime.Object { func (in *KResourceList) DeepCopyInto(out *KResourceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KResource, len(*in)) @@ -275,7 +352,7 @@ func (in *LegacyTarget) DeepCopyObject() runtime.Object { func (in *LegacyTargetList) DeepCopyInto(out *LegacyTargetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LegacyTarget, len(*in)) @@ -374,7 +451,7 @@ func (in *Target) DeepCopyObject() runtime.Object { func (in *TargetList) DeepCopyInto(out *TargetList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Target, len(*in)) diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go new file mode 100644 index 0000000000..6093bd0338 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/addressable_types.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + v1 "knative.dev/pkg/apis/duck/v1" +) + +// +genduck + +// Addressable provides a generic mechanism for a custom resource +// definition to indicate a destination for message delivery. +// +// Addressable is the schema for the destination information. This is +// typically stored in the object's `status`, as this information may +// be generated by the controller. +type Addressable struct { + URL *apis.URL `json:"url,omitempty"` +} + +var ( + // Addressable is an Implementable "duck type". + _ duck.Implementable = (*Addressable)(nil) + // Addressable is a Convertible type. + _ apis.Convertible = (*Addressable)(nil) +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddressableType is a skeleton type wrapping Addressable in the manner we expect +// resource writers defining compatible resources to embed it. We will +// typically use this type to deserialize Addressable ObjectReferences and +// access the Addressable data. This is not a real resource. +type AddressableType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Status AddressStatus `json:"status"` +} + +// AddressStatus shows how we expect folks to embed Addressable in +// their Status field. +type AddressStatus struct { + Address *Addressable `json:"address,omitempty"` +} + +var ( + // Verify AddressableType resources meet duck contracts. + _ duck.Populatable = (*AddressableType)(nil) + _ apis.Listable = (*AddressableType)(nil) +) + +// GetFullType implements duck.Implementable +func (*Addressable) GetFullType() duck.Populatable { + return &AddressableType{} +} + +// ConvertUp implements apis.Convertible +func (a *Addressable) ConvertUp(ctx context.Context, to apis.Convertible) error { + switch sink := to.(type) { + case *v1.Addressable: + sink.URL = a.URL.DeepCopy() + return nil + default: + return fmt.Errorf("unknown version, got: %T", to) + } +} + +// ConvertDown implements apis.Convertible +func (a *Addressable) ConvertDown(ctx context.Context, from apis.Convertible) error { + switch source := from.(type) { + case *v1.Addressable: + a.URL = source.URL.DeepCopy() + return nil + default: + return fmt.Errorf("unknown version, got: %T", from) + } +} + +// Populate implements duck.Populatable +func (t *AddressableType) Populate() { + t.Status = AddressStatus{ + &Addressable{ + // Populate ALL fields + URL: &apis.URL{ + Scheme: "http", + Host: "foo.com", + }, + }, + } +} + +// GetListType implements apis.Listable +func (*AddressableType) GetListType() runtime.Object { + return &AddressableTypeList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AddressableTypeList is a list of AddressableType resources +type AddressableTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []AddressableType `json:"items"` +} diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go new file mode 100644 index 0000000000..9c247b0305 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/destination.go @@ -0,0 +1,161 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" +) + +// Destination represents a target of an invocation over HTTP. +type Destination struct { + // Ref points to an Addressable. + // +optional + Ref *corev1.ObjectReference `json:"ref,omitempty"` + + // +optional + DeprecatedAPIVersion string `json:"apiVersion,omitempty"` + + // +optional + DeprecatedKind string `json:"kind,omitempty"` + + // +optional + DeprecatedName string `json:"name,omitempty"` + + // +optional + DeprecatedNamespace string `json:"namespace,omitempty"` + + // URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. + // +optional + URI *apis.URL `json:"uri,omitempty"` +} + +func (dest *Destination) Validate(ctx context.Context) *apis.FieldError { + if dest == nil { + return nil + } + return ValidateDestination(*dest, true).ViaField(apis.CurrentField) +} + +func (dest *Destination) ValidateDisallowDeprecated(ctx context.Context) *apis.FieldError { + if dest == nil { + return nil + } + return ValidateDestination(*dest, false).ViaField(apis.CurrentField) +} + +// ValidateDestination validates Destination and either allows or disallows +// Deprecated* fields depending on the flag. +func ValidateDestination(dest Destination, allowDeprecatedFields bool) *apis.FieldError { + if !allowDeprecatedFields { + var errs *apis.FieldError + if dest.DeprecatedAPIVersion != "" { + errs = errs.Also(apis.ErrInvalidValue("apiVersion is not allowed here, it's a deprecated value", "apiVersion")) + } + if dest.DeprecatedKind != "" { + errs = errs.Also(apis.ErrInvalidValue("kind is not allowed here, it's a deprecated value", "kind")) + } + if dest.DeprecatedName != "" { + errs = errs.Also(apis.ErrInvalidValue("name is not allowed here, it's a deprecated value", "name")) + } + if dest.DeprecatedNamespace != "" { + errs = errs.Also(apis.ErrInvalidValue("namespace is not allowed here, it's a deprecated value", "namespace")) + } + if errs != nil { + return errs + } + } + + deprecatedObjectReference := dest.deprecatedObjectReference() + if dest.Ref != nil && deprecatedObjectReference != nil { + return apis.ErrGeneric("Ref and [apiVersion, kind, name] can't be both present", "[apiVersion, kind, name]", "ref") + } + + var ref *corev1.ObjectReference + if dest.Ref != nil { + ref = dest.Ref + } else { + ref = deprecatedObjectReference + } + if ref == nil && dest.URI == nil { + return apis.ErrGeneric("expected at least one, got none", "[apiVersion, kind, name]", "ref", "uri") + } + + if ref != nil && dest.URI != nil && dest.URI.URL().IsAbs() { + return apis.ErrGeneric("Absolute URI is not allowed when Ref or [apiVersion, kind, name] is present", "[apiVersion, kind, name]", "ref", "uri") + } + // IsAbs() check whether the URL has a non-empty scheme. Besides the non-empty scheme, we also require dest.URI has a non-empty host + if ref == nil && dest.URI != nil && (!dest.URI.URL().IsAbs() || dest.URI.Host == "") { + return apis.ErrInvalidValue("Relative URI is not allowed when Ref and [apiVersion, kind, name] is absent", "uri") + } + if ref != nil && dest.URI == nil { + if dest.Ref != nil { + return validateDestinationRef(*ref).ViaField("ref") + } else { + return validateDestinationRef(*ref) + } + } + return nil +} + +func (dest Destination) deprecatedObjectReference() *corev1.ObjectReference { + if dest.DeprecatedAPIVersion == "" && dest.DeprecatedKind == "" && dest.DeprecatedName == "" && dest.DeprecatedNamespace == "" { + return nil + } + return &corev1.ObjectReference{ + Kind: dest.DeprecatedKind, + APIVersion: dest.DeprecatedAPIVersion, + Name: dest.DeprecatedName, + Namespace: dest.DeprecatedNamespace, + } +} + +// GetRef gets the ObjectReference from this Destination, if one is present. If no ref is present, +// then nil is returned. +// Note: this mostly exists to abstract away the deprecated ObjectReference fields. Once they are +// removed, then this method should probably be removed too. +func (dest *Destination) GetRef() *corev1.ObjectReference { + if dest == nil { + return nil + } + if dest.Ref != nil { + return dest.Ref + } + if ref := dest.deprecatedObjectReference(); ref != nil { + return ref + } + return nil +} + +func validateDestinationRef(ref corev1.ObjectReference) *apis.FieldError { + // Check the object. + var errs *apis.FieldError + // Required Fields + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) + } + if ref.APIVersion == "" { + errs = errs.Also(apis.ErrMissingField("apiVersion")) + } + if ref.Kind == "" { + errs = errs.Also(apis.ErrMissingField("kind")) + } + + return errs +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/doc.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/doc.go similarity index 100% rename from test/vendor/github.com/knative/pkg/apis/duck/v1beta1/doc.go rename to test/vendor/knative.dev/pkg/apis/duck/v1beta1/doc.go diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/register.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go similarity index 97% rename from test/vendor/github.com/knative/pkg/apis/duck/v1beta1/register.go rename to test/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go index c337e4a619..ca8388ad48 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/register.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/register.go @@ -17,10 +17,10 @@ limitations under the License. package v1beta1 import ( - "github.com/knative/pkg/apis/duck" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" ) // SchemeGroupVersion is group version used to register these objects diff --git a/test/vendor/knative.dev/pkg/apis/duck/v1beta1/source_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/source_types.go new file mode 100644 index 0000000000..5853a1ae41 --- /dev/null +++ b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/source_types.go @@ -0,0 +1,156 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" +) + +// Source is an Implementable "duck type". +var _ duck.Implementable = (*Source)(nil) + +// +genduck +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Source is the minimum resource shape to adhere to the Source Specification. +// This duck type is intended to allow implementors of Sources and +// Importers to verify their own resources meet the expectations. +// This is not a real resource. +// NOTE: The Source Specification is in progress and the shape and names could +// be modified until it has been accepted. +type Source struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SourceSpec `json:"spec"` + Status SourceStatus `json:"status"` +} + +type SourceSpec struct { + // Sink is a reference to an object that will resolve to a domain name or a + // URI directly to use as the sink. + Sink Destination `json:"sink,omitempty"` + + // CloudEventOverrides defines overrides to control the output format and + // modifications of the event sent to the sink. + // +optional + CloudEventOverrides *CloudEventOverrides `json:"ceOverrides,omitempty"` +} + +// CloudEventOverrides defines arguments for a Source that control the output +// format of the CloudEvents produced by the Source. +type CloudEventOverrides struct { + // Extensions specify what attribute are added or overridden on the + // outbound event. Each `Extensions` key-value pair are set on the event as + // an attribute extension independently. + // +optional + Extensions map[string]string `json:"extensions,omitempty"` +} + +// SourceStatus shows how we expect folks to embed Addressable in +// their Status field. +type SourceStatus struct { + // inherits duck/v1beta1 Status, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last + // processed by the controller. + // * Conditions - the latest available observations of a resource's current + // state. + Status `json:",inline"` + + // SinkURI is the current active sink URI that has been configured for the + // Source. + // +optional + SinkURI *apis.URL `json:"sinkUri,omitempty"` +} + +// IsReady returns true if the resource is ready overall. +func (ss *SourceStatus) IsReady() bool { + for _, c := range ss.Conditions { + switch c.Type { + // Look for the "happy" condition, which is the only condition that + // we can reliably understand to be the overall state of the resource. + case apis.ConditionReady, apis.ConditionSucceeded: + return c.IsTrue() + } + } + return false +} + +var ( + // Verify Source resources meet duck contracts. + _ duck.Populatable = (*Source)(nil) + _ apis.Listable = (*Source)(nil) +) + +const ( + // SourceConditionSinkProvided has status True when the Source + // has been configured with a sink target that is resolvable. + SourceConditionSinkProvided apis.ConditionType = "SinkProvided" +) + +// GetFullType implements duck.Implementable +func (*Source) GetFullType() duck.Populatable { + return &Source{} +} + +// Populate implements duck.Populatable +func (s *Source) Populate() { + s.Spec.Sink = Destination{ + URI: &apis.URL{ + Scheme: "https", + Host: "tableflip.dev", + RawQuery: "flip=mattmoor", + }, + } + s.Spec.CloudEventOverrides = &CloudEventOverrides{ + Extensions: map[string]string{"boosh": "kakow"}, + } + s.Status.ObservedGeneration = 42 + s.Status.Conditions = Conditions{{ + // Populate ALL fields + Type: SourceConditionSinkProvided, + Status: corev1.ConditionTrue, + LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Date(1984, 02, 28, 18, 52, 00, 00, time.UTC))}, + }} + s.Status.SinkURI = &apis.URL{ + Scheme: "https", + Host: "tableflip.dev", + RawQuery: "flip=mattmoor", + } +} + +// GetListType implements apis.Listable +func (*Source) GetListType() runtime.Object { + return &SourceList{} +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SourceList is a list of Source resources +type SourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Source `json:"items"` +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/status_types.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/status_types.go similarity index 98% rename from test/vendor/github.com/knative/pkg/apis/duck/v1beta1/status_types.go rename to test/vendor/knative.dev/pkg/apis/duck/v1beta1/status_types.go index b999737ae1..b2095fb584 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/status_types.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/status_types.go @@ -24,17 +24,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) +// +genduck + // Conditions is a simple wrapper around apis.Conditions to implement duck.Implementable. type Conditions apis.Conditions // Conditions is an Implementable "duck type". var _ duck.Implementable = (*Conditions)(nil) -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KResource is a skeleton type wrapping Conditions in the manner we expect diff --git a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go similarity index 58% rename from test/vendor/github.com/knative/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go rename to test/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go index 791c06d96a..032dccb3c3 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/pkg/apis/duck/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,9 @@ limitations under the License. package v1beta1 import ( - apis "github.com/knative/pkg/apis" + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -98,7 +99,7 @@ func (in *AddressableType) DeepCopyObject() runtime.Object { func (in *AddressableTypeList) DeepCopyInto(out *AddressableTypeList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AddressableType, len(*in)) @@ -127,6 +128,29 @@ func (in *AddressableTypeList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudEventOverrides) DeepCopyInto(out *CloudEventOverrides) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventOverrides. +func (in *CloudEventOverrides) DeepCopy() *CloudEventOverrides { + if in == nil { + return nil + } + out := new(CloudEventOverrides) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Conditions) DeepCopyInto(out *Conditions) { { @@ -149,6 +173,32 @@ func (in Conditions) DeepCopy() Conditions { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + if in.Ref != nil { + in, out := &in.Ref, &out.Ref + *out = new(v1.ObjectReference) + **out = **in + } + if in.URI != nil { + in, out := &in.URI, &out.URI + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KResource) DeepCopyInto(out *KResource) { *out = *in @@ -180,7 +230,7 @@ func (in *KResource) DeepCopyObject() runtime.Object { func (in *KResourceList) DeepCopyInto(out *KResourceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]KResource, len(*in)) @@ -209,6 +259,111 @@ func (in *KResourceList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Source) DeepCopyInto(out *Source) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Source. +func (in *Source) DeepCopy() *Source { + if in == nil { + return nil + } + out := new(Source) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Source) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceList) DeepCopyInto(out *SourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Source, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceList. +func (in *SourceList) DeepCopy() *SourceList { + if in == nil { + return nil + } + out := new(SourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { + *out = *in + in.Sink.DeepCopyInto(&out.Sink) + if in.CloudEventOverrides != nil { + in, out := &in.CloudEventOverrides, &out.CloudEventOverrides + *out = new(CloudEventOverrides) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec. +func (in *SourceSpec) DeepCopy() *SourceSpec { + if in == nil { + return nil + } + out := new(SourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStatus) DeepCopyInto(out *SourceStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.SinkURI != nil { + in, out := &in.SinkURI, &out.SinkURI + *out = new(apis.URL) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStatus. +func (in *SourceStatus) DeepCopy() *SourceStatus { + if in == nil { + return nil + } + out := new(SourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Status) DeepCopyInto(out *Status) { *out = *in diff --git a/test/vendor/github.com/knative/pkg/apis/duck/verify.go b/test/vendor/knative.dev/pkg/apis/duck/verify.go similarity index 98% rename from test/vendor/github.com/knative/pkg/apis/duck/verify.go rename to test/vendor/knative.dev/pkg/apis/duck/verify.go index eb6bdebf43..3f42330fff 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/verify.go +++ b/test/vendor/knative.dev/pkg/apis/duck/verify.go @@ -20,7 +20,8 @@ import ( "encoding/json" "fmt" - "github.com/knative/pkg/kmp" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmp" ) // Implementable is implemented by the Fooable duck type that consumers @@ -36,6 +37,8 @@ type Implementable interface { // duck type. It will generally have TypeMeta, ObjectMeta, and a Status field // wrapping a Fooable field. type Populatable interface { + apis.Listable + // Populate fills in all possible fields, so that we can verify that // they roundtrip properly through JSON. Populate() diff --git a/test/vendor/knative.dev/pkg/apis/interfaces.go b/test/vendor/knative.dev/pkg/apis/interfaces.go index 6b6c772d74..fef69d8b31 100644 --- a/test/vendor/knative.dev/pkg/apis/interfaces.go +++ b/test/vendor/knative.dev/pkg/apis/interfaces.go @@ -44,15 +44,6 @@ type Convertible interface { ConvertDown(ctx context.Context, from Convertible) error } -// Immutable indicates that a particular type has fields that should -// not change after creation. -// DEPRECATED: Use WithinUpdate / GetBaseline from within Validatable instead. -type Immutable interface { - // CheckImmutableFields checks that the current instance's immutable - // fields haven't changed from the provided original. - CheckImmutableFields(ctx context.Context, original Immutable) *FieldError -} - // Listable indicates that a particular type can be returned via the returned // list type by the API server. type Listable interface { diff --git a/test/vendor/knative.dev/pkg/apis/url.go b/test/vendor/knative.dev/pkg/apis/url.go index 181685cd38..89a4d2454c 100644 --- a/test/vendor/knative.dev/pkg/apis/url.go +++ b/test/vendor/knative.dev/pkg/apis/url.go @@ -20,6 +20,8 @@ import ( "encoding/json" "fmt" "net/url" + + "k8s.io/apimachinery/pkg/api/equality" ) // URL is an alias of url.URL. @@ -41,6 +43,30 @@ func ParseURL(u string) (*URL, error) { return (*URL)(pu), nil } +// HTTP creates an http:// URL pointing to a known domain. +func HTTP(domain string) *URL { + return &URL{ + Scheme: "http", + Host: domain, + } +} + +// HTTPS creates an https:// URL pointing to a known domain. +func HTTPS(domain string) *URL { + return &URL{ + Scheme: "https", + Host: domain, + } +} + +// IsEmpty returns true if the URL is `nil` or represents an empty URL. +func (u *URL) IsEmpty() bool { + if u == nil { + return true + } + return *u == URL{} +} + // MarshalJSON implements a custom json marshal method used when this type is // marshaled using json.Marshal. // json.Marshaler impl @@ -76,3 +102,39 @@ func (u *URL) String() string { uu := url.URL(*u) return uu.String() } + +// URL returns the URL as a url.URL. +func (u *URL) URL() *url.URL { + if u == nil { + return &url.URL{} + } + url := url.URL(*u) + return &url +} + +// ResolveReference calls the underlying ResolveReference method +// and returns an apis.URL +func (u *URL) ResolveReference(ref *URL) *URL { + if ref == nil { + return u + } + // Turn both u / ref to url.URL + uRef := url.URL(*ref) + uu := url.URL(*u) + + newU := uu.ResolveReference(&uRef) + + // Turn new back to apis.URL + ret := URL(*newU) + return &ret +} + +func init() { + equality.Semantic.AddFunc( + // url.URL has an unexported type (UserInfo) which causes semantic + // equality to panic unless we add a custom equality function + func(a, b URL) bool { + return a.String() == b.String() + }, + ) +} diff --git a/test/vendor/knative.dev/pkg/apis/volatile_time.go b/test/vendor/knative.dev/pkg/apis/volatile_time.go index 48d790d92e..3d2daa2772 100644 --- a/test/vendor/knative.dev/pkg/apis/volatile_time.go +++ b/test/vendor/knative.dev/pkg/apis/volatile_time.go @@ -22,7 +22,6 @@ import ( ) // VolatileTime wraps metav1.Time -// +k8s:openapi-gen=true type VolatileTime struct { Inner metav1.Time } diff --git a/test/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go index be670d4a87..e2b84acd27 100644 --- a/test/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/pkg/apis/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/github.com/knative/pkg/configmap/doc.go b/test/vendor/knative.dev/pkg/configmap/doc.go similarity index 100% rename from test/vendor/github.com/knative/pkg/configmap/doc.go rename to test/vendor/knative.dev/pkg/configmap/doc.go diff --git a/test/vendor/github.com/knative/pkg/configmap/filter.go b/test/vendor/knative.dev/pkg/configmap/filter.go similarity index 58% rename from test/vendor/github.com/knative/pkg/configmap/filter.go rename to test/vendor/knative.dev/pkg/configmap/filter.go index 27bf13df98..ed1040e27b 100644 --- a/test/vendor/github.com/knative/pkg/configmap/filter.go +++ b/test/vendor/knative.dev/pkg/configmap/filter.go @@ -16,7 +16,12 @@ limitations under the License. package configmap -import "reflect" +import ( + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" +) // TypeFilter accepts instances of types to check against and returns a function transformer that would only let // the call to f through if value is assignable to any one of types of ts. Example: @@ -42,3 +47,28 @@ func TypeFilter(ts ...interface{}) func(func(string, interface{})) func(string, } } } + +// ValidateConstructor checks the type of the constructor it evaluates +// the constructor to be a function with correct signature. +// +// The expectation is for the constructor to receive a single input +// parameter of type corev1.ConfigMap as the input and return two +// values with the second value being of type error +func ValidateConstructor(constructor interface{}) error { + cType := reflect.TypeOf(constructor) + + if cType.Kind() != reflect.Func { + return fmt.Errorf("config constructor must be a function") + } + + if cType.NumIn() != 1 || cType.In(0) != reflect.TypeOf(&corev1.ConfigMap{}) { + return fmt.Errorf("config constructor must be of the type func(*k8s.io/api/core/v1/ConfigMap) (..., error)") + } + + errorType := reflect.TypeOf((*error)(nil)).Elem() + + if cType.NumOut() != 2 || !cType.Out(1).Implements(errorType) { + return fmt.Errorf("config constructor must be of the type func(*k8s.io/api/core/v1/ConfigMap) (..., error)") + } + return nil +} diff --git a/test/vendor/github.com/knative/pkg/configmap/informed_watcher.go b/test/vendor/knative.dev/pkg/configmap/informed_watcher.go similarity index 70% rename from test/vendor/github.com/knative/pkg/configmap/informed_watcher.go rename to test/vendor/knative.dev/pkg/configmap/informed_watcher.go index 5903d59d7e..3231e7d287 100644 --- a/test/vendor/github.com/knative/pkg/configmap/informed_watcher.go +++ b/test/vendor/knative.dev/pkg/configmap/informed_watcher.go @@ -18,12 +18,17 @@ package configmap import ( "errors" - "time" + "fmt" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" - informers "k8s.io/client-go/informers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/informers" corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/informers/internalinterfaces" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" ) @@ -35,7 +40,7 @@ func NewDefaultWatcher(kc kubernetes.Interface, namespace string) *InformedWatch return NewInformedWatcher(kc, namespace) } -// NewInformedWatcherFromFactory watches a Kubernetes namespace for configmap changes. +// NewInformedWatcherFromFactory watches a Kubernetes namespace for ConfigMap changes. func NewInformedWatcherFromFactory(sif informers.SharedInformerFactory, namespace string) *InformedWatcher { return &InformedWatcher{ sif: sif, @@ -47,16 +52,46 @@ func NewInformedWatcherFromFactory(sif informers.SharedInformerFactory, namespac } } -// NewInformedWatcher watches a Kubernetes namespace for configmap changes. -func NewInformedWatcher(kc kubernetes.Interface, namespace string) *InformedWatcher { +// NewInformedWatcher watches a Kubernetes namespace for ConfigMap changes. +// Optional label requirements allow restricting the list of ConfigMap objects +// that is tracked by the underlying Informer. +func NewInformedWatcher(kc kubernetes.Interface, namespace string, lr ...labels.Requirement) *InformedWatcher { return NewInformedWatcherFromFactory(informers.NewSharedInformerFactoryWithOptions( kc, - // This is the default resync period from controller-runtime. - 10*time.Hour, + // We noticed that we're getting updates all the time anyway, due to the + // watches being terminated and re-spawned. + 0, informers.WithNamespace(namespace), + informers.WithTweakListOptions(addLabelRequirementsToListOptions(lr)), ), namespace) } +// addLabelRequirementsToListOptions returns a function which injects label +// requirements to existing metav1.ListOptions. +func addLabelRequirementsToListOptions(lr []labels.Requirement) internalinterfaces.TweakListOptionsFunc { + if len(lr) == 0 { + return nil + } + + return func(lo *metav1.ListOptions) { + sel, err := labels.Parse(lo.LabelSelector) + if err != nil { + panic(fmt.Errorf("could not parse label selector %q: %w", lo.LabelSelector, err)) + } + lo.LabelSelector = sel.Add(lr...).String() + } +} + +// FilterConfigByLabelExists returns an "exists" label requirement for the +// given label key. +func FilterConfigByLabelExists(labelKey string) (*labels.Requirement, error) { + req, err := labels.NewRequirement(labelKey, selection.Exists, nil) + if err != nil { + return nil, fmt.Errorf("could not construct label requirement: %w", err) + } + return req, nil +} + // InformedWatcher provides an informer-based implementation of Watcher. type InformedWatcher struct { sif informers.SharedInformerFactory @@ -79,7 +114,7 @@ var _ Watcher = (*InformedWatcher)(nil) var _ DefaultingWatcher = (*InformedWatcher)(nil) // WatchWithDefault implements DefaultingWatcher. -func (i *InformedWatcher) WatchWithDefault(cm corev1.ConfigMap, o Observer) { +func (i *InformedWatcher) WatchWithDefault(cm corev1.ConfigMap, o ...Observer) { i.defaults[cm.Name] = &cm i.m.Lock() @@ -94,7 +129,7 @@ func (i *InformedWatcher) WatchWithDefault(cm corev1.ConfigMap, o Observer) { panic("cannot WatchWithDefault after the InformedWatcher has started") } - i.Watch(cm.Name, o) + i.Watch(cm.Name, o...) } // Start implements Watcher. @@ -140,17 +175,14 @@ func (i *InformedWatcher) registerCallbackAndStartInformer(stopCh <-chan struct{ } func (i *InformedWatcher) checkObservedResourcesExist() error { - i.m.Lock() - defer i.m.Unlock() + i.m.RLock() + defer i.m.RUnlock() // Check that all objects with Observers exist in our informers. for k := range i.observers { - _, err := i.informer.Lister().ConfigMaps(i.Namespace).Get(k) - if err != nil { - if k8serrors.IsNotFound(err) { - if _, ok := i.defaults[k]; ok { - // It is defaulted, so it is OK that it doesn't exist. - continue - } + if _, err := i.informer.Lister().ConfigMaps(i.Namespace).Get(k); err != nil { + if _, ok := i.defaults[k]; ok && k8serrors.IsNotFound(err) { + // It is defaulted, so it is OK that it doesn't exist. + continue } return err } @@ -163,8 +195,13 @@ func (i *InformedWatcher) addConfigMapEvent(obj interface{}) { i.OnChange(configMap) } -func (i *InformedWatcher) updateConfigMapEvent(old, new interface{}) { - configMap := new.(*corev1.ConfigMap) +func (i *InformedWatcher) updateConfigMapEvent(o, n interface{}) { + // Ignore updates that are idempotent. We are seeing those + // periodically. + if equality.Semantic.DeepEqual(o, n) { + return + } + configMap := n.(*corev1.ConfigMap) i.OnChange(configMap) } diff --git a/test/vendor/github.com/knative/pkg/configmap/load.go b/test/vendor/knative.dev/pkg/configmap/load.go similarity index 100% rename from test/vendor/github.com/knative/pkg/configmap/load.go rename to test/vendor/knative.dev/pkg/configmap/load.go diff --git a/test/vendor/github.com/knative/pkg/configmap/manual_watcher.go b/test/vendor/knative.dev/pkg/configmap/manual_watcher.go similarity index 82% rename from test/vendor/github.com/knative/pkg/configmap/manual_watcher.go rename to test/vendor/knative.dev/pkg/configmap/manual_watcher.go index 759641058c..ad39bb8b9e 100644 --- a/test/vendor/github.com/knative/pkg/configmap/manual_watcher.go +++ b/test/vendor/knative.dev/pkg/configmap/manual_watcher.go @@ -27,7 +27,7 @@ type ManualWatcher struct { Namespace string // Guards mutations to defaultImpl fields - m sync.Mutex + m sync.RWMutex observers map[string][]Observer } @@ -35,29 +35,29 @@ type ManualWatcher struct { var _ Watcher = (*ManualWatcher)(nil) // Watch implements Watcher -func (w *ManualWatcher) Watch(name string, o Observer) { +func (w *ManualWatcher) Watch(name string, o ...Observer) { w.m.Lock() defer w.m.Unlock() if w.observers == nil { - w.observers = make(map[string][]Observer) + w.observers = make(map[string][]Observer, len(o)) } - - wl, _ := w.observers[name] - w.observers[name] = append(wl, o) + w.observers[name] = append(w.observers[name], o...) } +// Start implements Watcher func (w *ManualWatcher) Start(<-chan struct{}) error { return nil } +// OnChange invokes the callbacks of all observers of the given ConfigMap. func (w *ManualWatcher) OnChange(configMap *corev1.ConfigMap) { if configMap.Namespace != w.Namespace { return } // Within our namespace, take the lock and see if there are any registered observers. - w.m.Lock() - defer w.m.Unlock() + w.m.RLock() + defer w.m.RUnlock() observers, ok := w.observers[configMap.Name] if !ok { return // No observers. diff --git a/test/vendor/github.com/knative/pkg/configmap/static_watcher.go b/test/vendor/knative.dev/pkg/configmap/static_watcher.go similarity index 83% rename from test/vendor/github.com/knative/pkg/configmap/static_watcher.go rename to test/vendor/knative.dev/pkg/configmap/static_watcher.go index 96a01140db..2ce7c866bf 100644 --- a/test/vendor/github.com/knative/pkg/configmap/static_watcher.go +++ b/test/vendor/knative.dev/pkg/configmap/static_watcher.go @@ -22,13 +22,6 @@ import ( corev1 "k8s.io/api/core/v1" ) -// NewFixedWatcher returns a StaticWatcher that exposes a collection of ConfigMaps. -// -// Deprecated: Use NewStaticWatcher -func NewFixedWatcher(cms ...*corev1.ConfigMap) *StaticWatcher { - return NewStaticWatcher(cms...) -} - // NewStaticWatcher returns an StaticWatcher that exposes a collection of ConfigMaps. func NewStaticWatcher(cms ...*corev1.ConfigMap) *StaticWatcher { cmm := make(map[string]*corev1.ConfigMap) @@ -48,10 +41,12 @@ type StaticWatcher struct { var _ Watcher = (*StaticWatcher)(nil) // Watch implements Watcher -func (di *StaticWatcher) Watch(name string, o Observer) { +func (di *StaticWatcher) Watch(name string, o ...Observer) { cm, ok := di.cfgs[name] if ok { - o(cm) + for _, observer := range o { + observer(cm) + } } else { panic(fmt.Sprintf("Tried to watch unknown config with name %q", name)) } diff --git a/test/vendor/github.com/knative/pkg/configmap/store.go b/test/vendor/knative.dev/pkg/configmap/store.go similarity index 88% rename from test/vendor/github.com/knative/pkg/configmap/store.go rename to test/vendor/knative.dev/pkg/configmap/store.go index 452830eb6f..825803c254 100644 --- a/test/vendor/github.com/knative/pkg/configmap/store.go +++ b/test/vendor/knative.dev/pkg/configmap/store.go @@ -101,20 +101,8 @@ func NewUntypedStore( } func (s *UntypedStore) registerConfig(name string, constructor interface{}) { - cType := reflect.TypeOf(constructor) - - if cType.Kind() != reflect.Func { - panic("config constructor must be a function") - } - - if cType.NumIn() != 1 || cType.In(0) != reflect.TypeOf(&corev1.ConfigMap{}) { - panic("config constructor must be of the type func(*k8s.io/api/core/v1/ConfigMap) (..., error)") - } - - errorType := reflect.TypeOf((*error)(nil)).Elem() - - if cType.NumOut() != 2 || !cType.Out(1).Implements(errorType) { - panic("config constructor must be of the type func(*k8s.io/api/core/v1/ConfigMap) (..., error)") + if err := ValidateConstructor(constructor); err != nil { + panic(err) } s.storages[name] = &atomic.Value{} @@ -166,7 +154,7 @@ func (s *UntypedStore) OnConfigChanged(c *corev1.ConfigMap) { return } - s.logger.Infof("%s config %q config was added or updated: %v", s.name, name, result) + s.logger.Infof("%s config %q config was added or updated: %#v", s.name, name, result) storage.Store(result) go func() { diff --git a/test/vendor/github.com/knative/pkg/configmap/watcher.go b/test/vendor/knative.dev/pkg/configmap/watcher.go similarity index 86% rename from test/vendor/github.com/knative/pkg/configmap/watcher.go rename to test/vendor/knative.dev/pkg/configmap/watcher.go index 71a18f4953..ff703dbc75 100644 --- a/test/vendor/github.com/knative/pkg/configmap/watcher.go +++ b/test/vendor/knative.dev/pkg/configmap/watcher.go @@ -28,8 +28,8 @@ type Observer func(*corev1.ConfigMap) // Watcher defines the interface that a configmap implementation must implement. type Watcher interface { - // Watch is called to register a callback to be notified when a named ConfigMap changes. - Watch(string, Observer) + // Watch is called to register callbacks to be notified when a named ConfigMap changes. + Watch(string, ...Observer) // Start is called to initiate the watches and provide a channel to signal when we should // stop watching. When Start returns, all registered Observers will be called with the @@ -42,8 +42,8 @@ type Watcher interface { type DefaultingWatcher interface { Watcher - // WatchWithDefault is called to register a callback to be notified when a named ConfigMap + // WatchWithDefault is called to register callbacks to be notified when a named ConfigMap // changes. The provided default value is always observed before any real ConfigMap with that // name is. If the real ConfigMap with that name is deleted, then the default value is observed. - WatchWithDefault(cm corev1.ConfigMap, o Observer) + WatchWithDefault(cm corev1.ConfigMap, o ...Observer) } diff --git a/test/vendor/knative.dev/pkg/controller/controller.go b/test/vendor/knative.dev/pkg/controller/controller.go new file mode 100644 index 0000000000..d385908c76 --- /dev/null +++ b/test/vendor/knative.dev/pkg/controller/controller.go @@ -0,0 +1,551 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + + "knative.dev/pkg/kmeta" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" +) + +const ( + falseString = "false" + trueString = "true" + + // DefaultResyncPeriod is the default duration that is used when no + // resync period is associated with a controllers initialization context. + DefaultResyncPeriod = 10 * time.Hour +) + +var ( + // DefaultThreadsPerController is the number of threads to use + // when processing the controller's workqueue. Controller binaries + // may adjust this process-wide default. For finer control, invoke + // Run on the controller directly. + DefaultThreadsPerController = 2 +) + +// Reconciler is the interface that controller implementations are expected +// to implement, so that the shared controller.Impl can drive work through it. +type Reconciler interface { + Reconcile(ctx context.Context, key string) error +} + +// PassNew makes it simple to create an UpdateFunc for use with +// cache.ResourceEventHandlerFuncs that can delegate the same methods +// as AddFunc/DeleteFunc but passing through only the second argument +// (which is the "new" object). +func PassNew(f func(interface{})) func(interface{}, interface{}) { + return func(first, second interface{}) { + f(second) + } +} + +// HandleAll wraps the provided handler function into a cache.ResourceEventHandler +// that sends all events to the given handler. For Updates, only the new object +// is forwarded. +func HandleAll(h func(interface{})) cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: h, + UpdateFunc: PassNew(h), + DeleteFunc: h, + } +} + +// Filter makes it simple to create FilterFunc's for use with +// cache.FilteringResourceEventHandler that filter based on the +// schema.GroupVersionKind of the controlling resources. +func Filter(gvk schema.GroupVersionKind) func(obj interface{}) bool { + return func(obj interface{}) bool { + if object, ok := obj.(metav1.Object); ok { + owner := metav1.GetControllerOf(object) + return owner != nil && + owner.APIVersion == gvk.GroupVersion().String() && + owner.Kind == gvk.Kind + } + return false + } +} + +// FilterWithName makes it simple to create FilterFunc's for use with +// cache.FilteringResourceEventHandler that filter based on a name. +func FilterWithName(name string) func(obj interface{}) bool { + return func(obj interface{}) bool { + if object, ok := obj.(metav1.Object); ok { + return name == object.GetName() + } + return false + } +} + +// FilterWithNameAndNamespace makes it simple to create FilterFunc's for use with +// cache.FilteringResourceEventHandler that filter based on a namespace and a name. +func FilterWithNameAndNamespace(namespace, name string) func(obj interface{}) bool { + return func(obj interface{}) bool { + if object, ok := obj.(metav1.Object); ok { + return name == object.GetName() && + namespace == object.GetNamespace() + } + return false + } +} + +// Impl is our core controller implementation. It handles queuing and feeding work +// from the queue to an implementation of Reconciler. +type Impl struct { + // Reconciler is the workhorse of this controller, it is fed the keys + // from the workqueue to process. Public for testing. + Reconciler Reconciler + + // WorkQueue is a rate limited work queue. This is used to queue work to be + // processed instead of performing it as soon as a change happens. This + // means we can ensure we only process a fixed amount of resources at a + // time, and makes it easy to ensure we are never processing the same item + // simultaneously in two different workers. + WorkQueue workqueue.RateLimitingInterface + + // Sugared logger is easier to use but is not as performant as the + // raw logger. In performance critical paths, call logger.Desugar() + // and use the returned raw logger instead. In addition to the + // performance benefits, raw logger also preserves type-safety at + // the expense of slightly greater verbosity. + logger *zap.SugaredLogger + + // StatsReporter is used to send common controller metrics. + statsReporter StatsReporter +} + +// NewImpl instantiates an instance of our controller that will feed work to the +// provided Reconciler as it is enqueued. +func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string) *Impl { + return NewImplWithStats(r, logger, workQueueName, MustNewStatsReporter(workQueueName, logger)) +} + +func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl { + return &Impl{ + Reconciler: r, + WorkQueue: workqueue.NewNamedRateLimitingQueue( + workqueue.DefaultControllerRateLimiter(), + workQueueName, + ), + logger: logger, + statsReporter: reporter, + } +} + +// EnqueueAfter takes a resource, converts it into a namespace/name string, +// and passes it to EnqueueKey. +func (c *Impl) EnqueueAfter(obj interface{}, after time.Duration) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Errorw("Enqueue", zap.Error(err)) + return + } + c.EnqueueKeyAfter(types.NamespacedName{Namespace: object.GetNamespace(), Name: object.GetName()}, after) +} + +// Enqueue takes a resource, converts it into a namespace/name string, +// and passes it to EnqueueKey. +func (c *Impl) Enqueue(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Errorw("Enqueue", zap.Error(err)) + return + } + c.EnqueueKey(types.NamespacedName{Namespace: object.GetNamespace(), Name: object.GetName()}) +} + +// EnqueueSentinel returns a Enqueue method which will always enqueue a +// predefined key instead of the object key. +func (c *Impl) EnqueueSentinel(k types.NamespacedName) func(interface{}) { + return func(interface{}) { + c.EnqueueKey(k) + } +} + +// EnqueueControllerOf takes a resource, identifies its controller resource, +// converts it into a namespace/name string, and passes that to EnqueueKey. +func (c *Impl) EnqueueControllerOf(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Error(err) + return + } + + // If we can determine the controller ref of this object, then + // add that object to our workqueue. + if owner := metav1.GetControllerOf(object); owner != nil { + c.EnqueueKey(types.NamespacedName{Namespace: object.GetNamespace(), Name: owner.Name}) + } +} + +// EnqueueLabelOfNamespaceScopedResource returns with an Enqueue func that +// takes a resource, identifies its controller resource through given namespace +// and name labels, converts it into a namespace/name string, and passes that +// to EnqueueKey. The controller resource must be of namespace-scoped. +func (c *Impl) EnqueueLabelOfNamespaceScopedResource(namespaceLabel, nameLabel string) func(obj interface{}) { + return func(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Error(err) + return + } + + labels := object.GetLabels() + controllerKey, ok := labels[nameLabel] + if !ok { + c.logger.Debugf("Object %s/%s does not have a referring name label %s", + object.GetNamespace(), object.GetName(), nameLabel) + return + } + + if namespaceLabel != "" { + controllerNamespace, ok := labels[namespaceLabel] + if !ok { + c.logger.Debugf("Object %s/%s does not have a referring namespace label %s", + object.GetNamespace(), object.GetName(), namespaceLabel) + return + } + + c.EnqueueKey(types.NamespacedName{Namespace: controllerNamespace, Name: controllerKey}) + return + } + + // Pass through namespace of the object itself if no namespace label specified. + // This is for the scenario that object and the parent resource are of same namespace, + // e.g. to enqueue the revision of an endpoint. + c.EnqueueKey(types.NamespacedName{Namespace: object.GetNamespace(), Name: controllerKey}) + } +} + +// EnqueueLabelOfClusterScopedResource returns with an Enqueue func +// that takes a resource, identifies its controller resource through +// given name label, and passes it to EnqueueKey. +// The controller resource must be of cluster-scoped. +func (c *Impl) EnqueueLabelOfClusterScopedResource(nameLabel string) func(obj interface{}) { + return func(obj interface{}) { + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + c.logger.Error(err) + return + } + + labels := object.GetLabels() + controllerKey, ok := labels[nameLabel] + if !ok { + c.logger.Debugf("Object %s/%s does not have a referring name label %s", + object.GetNamespace(), object.GetName(), nameLabel) + return + } + + c.EnqueueKey(types.NamespacedName{Namespace: "", Name: controllerKey}) + } +} + +// EnqueueKey takes a namespace/name string and puts it onto the work queue. +func (c *Impl) EnqueueKey(key types.NamespacedName) { + c.WorkQueue.Add(key) + c.logger.Debugf("Adding to queue %s (depth: %d)", safeKey(key), c.WorkQueue.Len()) +} + +// EnqueueKeyAfter takes a namespace/name string and schedules its execution in +// the work queue after given delay. +func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) { + c.WorkQueue.AddAfter(key, delay) + c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", safeKey(key), delay, c.WorkQueue.Len()) +} + +// Run starts the controller's worker threads, the number of which is threadiness. +// It then blocks until stopCh is closed, at which point it shuts down its internal +// work queue and waits for workers to finish processing their current work items. +func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + sg := sync.WaitGroup{} + defer sg.Wait() + defer func() { + c.WorkQueue.ShutDown() + for c.WorkQueue.Len() > 0 { + time.Sleep(time.Millisecond * 100) + } + }() + + // Launch workers to process resources that get enqueued to our workqueue. + logger := c.logger + logger.Info("Starting controller and workers") + for i := 0; i < threadiness; i++ { + sg.Add(1) + go func() { + defer sg.Done() + for c.processNextWorkItem() { + } + }() + } + + logger.Info("Started workers") + <-stopCh + logger.Info("Shutting down workers") + + return nil +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling Reconcile on our Reconciler. +func (c *Impl) processNextWorkItem() bool { + obj, shutdown := c.WorkQueue.Get() + if shutdown { + return false + } + key := obj.(types.NamespacedName) + keyStr := safeKey(key) + + c.logger.Debugf("Processing from queue %s (depth: %d)", safeKey(key), c.WorkQueue.Len()) + + startTime := time.Now() + // Send the metrics for the current queue depth + c.statsReporter.ReportQueueDepth(int64(c.WorkQueue.Len())) + + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if + // reconcile succeeds. If a transient error occurs, we do not call + // Forget and put the item back to the queue with an increased + // delay. + defer c.WorkQueue.Done(key) + + var err error + defer func() { + status := trueString + if err != nil { + status = falseString + } + c.statsReporter.ReportReconcile(time.Since(startTime), keyStr, status) + }() + + // Embed the key into the logger and attach that to the context we pass + // to the Reconciler. + logger := c.logger.With(zap.String(logkey.TraceId, uuid.New().String()), zap.String(logkey.Key, keyStr)) + ctx := logging.WithLogger(context.TODO(), logger) + + // Run Reconcile, passing it the namespace/name string of the + // resource to be synced. + if err = c.Reconciler.Reconcile(ctx, keyStr); err != nil { + c.handleErr(err, key) + logger.Infof("Reconcile failed. Time taken: %v.", time.Since(startTime)) + return true + } + + // Finally, if no error occurs we Forget this item so it does not + // have any delay when another change happens. + c.WorkQueue.Forget(key) + logger.Infof("Reconcile succeeded. Time taken: %v.", time.Since(startTime)) + + return true +} + +func (c *Impl) handleErr(err error, key types.NamespacedName) { + c.logger.Errorw("Reconcile error", zap.Error(err)) + + // Re-queue the key if it's an transient error. + // We want to check that the queue is shutting down here + // since controller Run might have exited by now (since while this item was + // being processed, queue.Len==0). + if !IsPermanentError(err) && !c.WorkQueue.ShuttingDown() { + c.WorkQueue.AddRateLimited(key) + c.logger.Debugf("Requeuing key %s due to non-permanent error (depth: %d)", safeKey(key), c.WorkQueue.Len()) + return + } + + c.WorkQueue.Forget(key) +} + +// GlobalResync enqueues (with a delay) all objects from the passed SharedInformer +func (c *Impl) GlobalResync(si cache.SharedInformer) { + alwaysTrue := func(interface{}) bool { return true } + c.FilteredGlobalResync(alwaysTrue, si) +} + +// FilteredGlobalResync enqueues (with a delay) all objects from the +// SharedInformer that pass the filter function +func (c *Impl) FilteredGlobalResync(f func(interface{}) bool, si cache.SharedInformer) { + if c.WorkQueue.ShuttingDown() { + return + } + list := si.GetStore().List() + count := float64(len(list)) + for _, obj := range list { + if f(obj) { + c.EnqueueAfter(obj, wait.Jitter(time.Second, count)) + } + } +} + +// NewPermanentError returns a new instance of permanentError. +// Users can wrap an error as permanentError with this in reconcile, +// when he does not expect the key to get re-queued. +func NewPermanentError(err error) error { + return permanentError{e: err} +} + +// permanentError is an error that is considered not transient. +// We should not re-queue keys when it returns with thus error in reconcile. +type permanentError struct { + e error +} + +// IsPermanentError returns true if given error is permanentError +func IsPermanentError(err error) bool { + switch err.(type) { + case permanentError: + return true + default: + return false + } +} + +// Error implements the Error() interface of error. +func (err permanentError) Error() string { + if err.e == nil { + return "" + } + + return err.e.Error() +} + +// Informer is the group of methods that a type must implement to be passed to +// StartInformers. +type Informer interface { + Run(<-chan struct{}) + HasSynced() bool +} + +// StartInformers kicks off all of the passed informers and then waits for all +// of them to synchronize. +func StartInformers(stopCh <-chan struct{}, informers ...Informer) error { + for _, informer := range informers { + informer := informer + go informer.Run(stopCh) + } + + for i, informer := range informers { + if ok := cache.WaitForCacheSync(stopCh, informer.HasSynced); !ok { + return fmt.Errorf("failed to wait for cache at index %d to sync", i) + } + } + return nil +} + +// RunInformers kicks off all of the passed informers and then waits for all of +// them to synchronize. Returned function will wait for all informers to finish. +func RunInformers(stopCh <-chan struct{}, informers ...Informer) (func(), error) { + var wg sync.WaitGroup + wg.Add(len(informers)) + for _, informer := range informers { + informer := informer + go func() { + defer wg.Done() + informer.Run(stopCh) + }() + } + + for i, informer := range informers { + if ok := cache.WaitForCacheSync(stopCh, informer.HasSynced); !ok { + return wg.Wait, fmt.Errorf("failed to wait for cache at index %d to sync", i) + } + } + return wg.Wait, nil +} + +// StartAll kicks off all of the passed controllers with DefaultThreadsPerController. +func StartAll(stopCh <-chan struct{}, controllers ...*Impl) { + wg := sync.WaitGroup{} + // Start all of the controllers. + for _, ctrlr := range controllers { + wg.Add(1) + go func(c *Impl) { + defer wg.Done() + c.Run(DefaultThreadsPerController, stopCh) + }(ctrlr) + } + wg.Wait() +} + +// This is attached to contexts passed to controller constructors to associate +// a resync period. +type resyncPeriodKey struct{} + +// WithResyncPeriod associates the given resync period with the given context in +// the context that is returned. +func WithResyncPeriod(ctx context.Context, resync time.Duration) context.Context { + return context.WithValue(ctx, resyncPeriodKey{}, resync) +} + +// GetResyncPeriod returns the resync period associated with the given context. +// When none is specified a default resync period is used. +func GetResyncPeriod(ctx context.Context) time.Duration { + rp := ctx.Value(resyncPeriodKey{}) + if rp == nil { + return DefaultResyncPeriod + } + return rp.(time.Duration) +} + +// GetTrackerLease fetches the tracker lease from the controller context. +func GetTrackerLease(ctx context.Context) time.Duration { + return 3 * GetResyncPeriod(ctx) +} + +// erKey is used to associate record.EventRecorders with contexts. +type erKey struct{} + +// WithEventRecorder attaches the given record.EventRecorder to the provided context +// in the returned context. +func WithEventRecorder(ctx context.Context, er record.EventRecorder) context.Context { + return context.WithValue(ctx, erKey{}, er) +} + +// GetEventRecorder attempts to look up the record.EventRecorder on a given context. +// It may return null if none is found. +func GetEventRecorder(ctx context.Context) record.EventRecorder { + untyped := ctx.Value(erKey{}) + if untyped == nil { + return nil + } + return untyped.(record.EventRecorder) +} + +func safeKey(key types.NamespacedName) string { + if key.Namespace == "" { + return key.Name + } + return key.String() +} diff --git a/test/vendor/knative.dev/pkg/controller/helper.go b/test/vendor/knative.dev/pkg/controller/helper.go new file mode 100644 index 0000000000..b8cc08fb06 --- /dev/null +++ b/test/vendor/knative.dev/pkg/controller/helper.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/kmeta" +) + +type Callback func(interface{}) + +func EnsureTypeMeta(f Callback, gvk schema.GroupVersionKind) Callback { + apiVersion, kind := gvk.ToAPIVersionAndKind() + + return func(untyped interface{}) { + typed, err := kmeta.DeletionHandlingAccessor(untyped) + if err != nil { + // TODO: We should consider logging here. + return + } + // We need to populated TypeMeta, but cannot trample the + // informer's copy. + // TODO(mattmoor): Avoid the copy if TypeMeta is set. + copy := typed.DeepCopyObject() + + accessor, err := meta.TypeAccessor(copy) + if err != nil { + return + } + accessor.SetAPIVersion(apiVersion) + accessor.SetKind(kind) + + // Pass in the mutated copy (accessor is not just a type cast) + f(copy) + } +} diff --git a/test/vendor/knative.dev/pkg/controller/stats_reporter.go b/test/vendor/knative.dev/pkg/controller/stats_reporter.go new file mode 100644 index 0000000000..de0a938d44 --- /dev/null +++ b/test/vendor/knative.dev/pkg/controller/stats_reporter.go @@ -0,0 +1,250 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.uber.org/zap" + "k8s.io/client-go/tools/cache" + kubemetrics "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/workqueue" + "knative.dev/pkg/metrics" +) + +var ( + workQueueDepthStat = stats.Int64("work_queue_depth", "Depth of the work queue", stats.UnitNone) + reconcileCountStat = stats.Int64("reconcile_count", "Number of reconcile operations", stats.UnitNone) + reconcileLatencyStat = stats.Int64("reconcile_latency", "Latency of reconcile operations", stats.UnitMilliseconds) + + // reconcileDistribution defines the bucket boundaries for the histogram of reconcile latency metric. + // Bucket boundaries are 10ms, 100ms, 1s, 10s, 30s and 60s. + reconcileDistribution = view.Distribution(10, 100, 1000, 10000, 30000, 60000) + + // Create the tag keys that will be used to add tags to our measurements. + // Tag keys must conform to the restrictions described in + // go.opencensus.io/tag/validate.go. Currently those restrictions are: + // - length between 1 and 255 inclusive + // - characters are printable US-ASCII + reconcilerTagKey = tag.MustNewKey("reconciler") + keyTagKey = tag.MustNewKey("key") + successTagKey = tag.MustNewKey("success") +) + +func init() { + // Register to receive metrics from kubernetes workqueues. + wp := &metrics.WorkqueueProvider{ + Adds: stats.Int64( + "workqueue_adds_total", + "Total number of adds handled by workqueue", + stats.UnitNone, + ), + Depth: stats.Int64( + "workqueue_depth", + "Current depth of workqueue", + stats.UnitNone, + ), + Latency: stats.Float64( + "workqueue_queue_latency_seconds", + "How long in seconds an item stays in workqueue before being requested.", + "s", + ), + Retries: stats.Int64( + "workqueue_retries_total", + "Total number of retries handled by workqueue", + "s", + ), + WorkDuration: stats.Float64( + "workqueue_work_duration_seconds", + "How long in seconds processing an item from workqueue takes.", + "s", + ), + UnfinishedWorkSeconds: stats.Float64( + "workqueue_unfinished_work_seconds", + "How long in seconds the outstanding workqueue items have been in flight (total).", + "s", + ), + LongestRunningProcessorSeconds: stats.Float64( + "workqueue_longest_running_processor_seconds", + "How long in seconds the longest outstanding workqueue item has been in flight.", + "s", + ), + } + workqueue.SetProvider(wp) + + // Register to receive metrics from kubernetes reflectors (what powers informers) + // NOTE: today these don't actually seem to wire up to anything in Kubernetes. + rp := &metrics.ReflectorProvider{ + ItemsInList: stats.Float64( + "reflector_items_in_list", + "How many items an API list returns to the reflectors", + stats.UnitNone, + ), + // TODO(mattmoor): This is not in the latest version, so it will + // be removed in a future version. + ItemsInMatch: stats.Float64( + "reflector_items_in_match", + "", + stats.UnitNone, + ), + ItemsInWatch: stats.Float64( + "reflector_items_in_watch", + "How many items an API watch returns to the reflectors", + stats.UnitNone, + ), + LastResourceVersion: stats.Float64( + "reflector_last_resource_version", + "Last resource version seen for the reflectors", + stats.UnitNone, + ), + ListDuration: stats.Float64( + "reflector_list_duration_seconds", + "How long an API list takes to return and decode for the reflectors", + stats.UnitNone, + ), + Lists: stats.Int64( + "reflector_lists_total", + "Total number of API lists done by the reflectors", + stats.UnitNone, + ), + ShortWatches: stats.Int64( + "reflector_short_watches_total", + "Total number of short API watches done by the reflectors", + stats.UnitNone, + ), + WatchDuration: stats.Float64( + "reflector_watch_duration_seconds", + "How long an API watch takes to return and decode for the reflectors", + stats.UnitNone, + ), + Watches: stats.Int64( + "reflector_watches_total", + "Total number of API watches done by the reflectors", + stats.UnitNone, + ), + } + cache.SetReflectorMetricsProvider(rp) + + cp := &metrics.ClientProvider{ + Latency: stats.Float64( + "client_latency", + "How long Kubernetes API requests take", + "s", + ), + Result: stats.Int64( + "client_results", + "Total number of API requests (broken down by status code)", + stats.UnitNone, + ), + } + kubemetrics.Register(cp.NewLatencyMetric(), cp.NewResultMetric()) + + views := []*view.View{{ + Description: "Depth of the work queue", + Measure: workQueueDepthStat, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{reconcilerTagKey}, + }, { + Description: "Number of reconcile operations", + Measure: reconcileCountStat, + Aggregation: view.Count(), + TagKeys: []tag.Key{reconcilerTagKey, keyTagKey, successTagKey}, + }, { + Description: "Latency of reconcile operations", + Measure: reconcileLatencyStat, + Aggregation: reconcileDistribution, + TagKeys: []tag.Key{reconcilerTagKey, keyTagKey, successTagKey}, + }} + views = append(views, wp.DefaultViews()...) + views = append(views, rp.DefaultViews()...) + views = append(views, cp.DefaultViews()...) + + // Create views to see our measurements. This can return an error if + // a previously-registered view has the same name with a different value. + // View name defaults to the measure name if unspecified. + if err := view.Register(views...); err != nil { + panic(err) + } +} + +// StatsReporter defines the interface for sending metrics +type StatsReporter interface { + // ReportQueueDepth reports the queue depth metric + ReportQueueDepth(v int64) error + + // ReportReconcile reports the count and latency metrics for a reconcile operation + ReportReconcile(duration time.Duration, key, success string) error +} + +// Reporter holds cached metric objects to report metrics +type reporter struct { + reconciler string + globalCtx context.Context +} + +// NewStatsReporter creates a reporter that collects and reports metrics +func NewStatsReporter(reconciler string) (StatsReporter, error) { + // Reconciler tag is static. Create a context containing that and cache it. + ctx, err := tag.New( + context.Background(), + tag.Insert(reconcilerTagKey, reconciler)) + if err != nil { + return nil, err + } + + return &reporter{reconciler: reconciler, globalCtx: ctx}, nil +} + +// MustNewStatsReporter creates a new instance of StatsReporter. +// Logs fatally if creation fails. +func MustNewStatsReporter(reconciler string, logger *zap.SugaredLogger) StatsReporter { + stats, err := NewStatsReporter(reconciler) + if err != nil { + logger.Fatalw("Failed to initialize the stats reporter", zap.Error(err)) + } + return stats +} + +// ReportQueueDepth reports the queue depth metric +func (r *reporter) ReportQueueDepth(v int64) error { + if r.globalCtx == nil { + return errors.New("reporter is not initialized correctly") + } + metrics.Record(r.globalCtx, workQueueDepthStat.M(v)) + return nil +} + +// ReportReconcile reports the count and latency metrics for a reconcile operation +func (r *reporter) ReportReconcile(duration time.Duration, key, success string) error { + ctx, err := tag.New( + context.Background(), + tag.Insert(reconcilerTagKey, r.reconciler), + tag.Insert(keyTagKey, key), + tag.Insert(successTagKey, success)) + if err != nil { + return err + } + + metrics.Record(ctx, reconcileCountStat.M(1)) + metrics.Record(ctx, reconcileLatencyStat.M(int64(duration/time.Millisecond))) + return nil +} diff --git a/test/vendor/github.com/knative/pkg/kmeta/accessor.go b/test/vendor/knative.dev/pkg/kmeta/accessor.go similarity index 59% rename from test/vendor/github.com/knative/pkg/kmeta/accessor.go rename to test/vendor/knative.dev/pkg/kmeta/accessor.go index 07c69bedaa..e43aaf0e15 100644 --- a/test/vendor/github.com/knative/pkg/kmeta/accessor.go +++ b/test/vendor/knative.dev/pkg/kmeta/accessor.go @@ -19,10 +19,10 @@ package kmeta import ( "fmt" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" ) @@ -30,6 +30,8 @@ import ( // runtime.Object and metav1.Object that Kubernetes API types // registered with runtime.Scheme must support. type Accessor interface { + metav1.Object + // Interfaces for metav1.TypeMeta GroupVersionKind() schema.GroupVersionKind SetGroupVersionKind(gvk schema.GroupVersionKind) @@ -37,40 +39,6 @@ type Accessor interface { // Interfaces for runtime.Object GetObjectKind() schema.ObjectKind DeepCopyObject() runtime.Object - - // Interfaces for metav1.Object - GetNamespace() string - SetNamespace(namespace string) - GetName() string - SetName(name string) - GetGenerateName() string - SetGenerateName(name string) - GetUID() types.UID - SetUID(uid types.UID) - GetResourceVersion() string - SetResourceVersion(version string) - GetGeneration() int64 - SetGeneration(generation int64) - GetSelfLink() string - SetSelfLink(selfLink string) - GetCreationTimestamp() metav1.Time - SetCreationTimestamp(timestamp metav1.Time) - GetDeletionTimestamp() *metav1.Time - SetDeletionTimestamp(timestamp *metav1.Time) - GetDeletionGracePeriodSeconds() *int64 - SetDeletionGracePeriodSeconds(*int64) - GetLabels() map[string]string - SetLabels(labels map[string]string) - GetAnnotations() map[string]string - SetAnnotations(annotations map[string]string) - GetInitializers() *metav1.Initializers - SetInitializers(initializers *metav1.Initializers) - GetFinalizers() []string - SetFinalizers(finalizers []string) - GetOwnerReferences() []metav1.OwnerReference - SetOwnerReferences([]metav1.OwnerReference) - GetClusterName() string - SetClusterName(clusterName string) } // DeletionHandlingAccessor tries to convert given interface into Accessor first; @@ -82,13 +50,26 @@ func DeletionHandlingAccessor(obj interface{}) (Accessor, error) { // To handle obj deletion, try to fetch info from DeletedFinalStateUnknown. tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - return nil, fmt.Errorf("Couldn't get Accessor from tombstone %#v", obj) + return nil, fmt.Errorf("couldn't get Accessor from tombstone %#v", obj) } accessor, ok = tombstone.Obj.(Accessor) if !ok { - return nil, fmt.Errorf("The object that Tombstone contained is not of kmeta.Accessor %#v", obj) + return nil, fmt.Errorf("the object that Tombstone contained is not of kmeta.Accessor %#v", obj) } } return accessor, nil } + +// ObjectReference returns an core/v1.ObjectReference for the given object +func ObjectReference(obj Accessor) corev1.ObjectReference { + gvk := obj.GroupVersionKind() + apiVersion, kind := gvk.ToAPIVersionAndKind() + + return corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } +} diff --git a/test/vendor/github.com/knative/pkg/kmeta/doc.go b/test/vendor/knative.dev/pkg/kmeta/doc.go similarity index 100% rename from test/vendor/github.com/knative/pkg/kmeta/doc.go rename to test/vendor/knative.dev/pkg/kmeta/doc.go diff --git a/test/vendor/github.com/knative/pkg/kmeta/labels.go b/test/vendor/knative.dev/pkg/kmeta/labels.go similarity index 100% rename from test/vendor/github.com/knative/pkg/kmeta/labels.go rename to test/vendor/knative.dev/pkg/kmeta/labels.go diff --git a/test/vendor/knative.dev/pkg/kmeta/names.go b/test/vendor/knative.dev/pkg/kmeta/names.go new file mode 100644 index 0000000000..c59090b52b --- /dev/null +++ b/test/vendor/knative.dev/pkg/kmeta/names.go @@ -0,0 +1,64 @@ +/* +copyright 2019 the knative authors + +licensed under the apache license, version 2.0 (the "license"); +you may not use this file except in compliance with the license. +you may obtain a copy of the license at + + http://www.apache.org/licenses/license-2.0 + +unless required by applicable law or agreed to in writing, software +distributed under the license is distributed on an "as is" basis, +without warranties or conditions of any kind, either express or implied. +see the license for the specific language governing permissions and +limitations under the license. +*/ + +package kmeta + +import ( + "crypto/md5" + "fmt" + "strings" +) + +// The longest name supported by the K8s is 63. +// These constants +const ( + longest = 63 + md5Len = 32 + head = longest - md5Len // How much to truncate to fit the hash. +) + +// ChildName generates a name for the resource based upon the parent resource and suffix. +// If the concatenated name is longer than K8s permits the name is hashed and truncated to permit +// construction of the resource, but still keeps it unique. +// If the suffix itself is longer than 31 characters, then the whole string will be hashed +// and `parent|hash|suffix` will be returned, where parent and suffix will be trimmed to +// fit (prefix of parent at most of length 31, and prefix of suffix at most length 30). +func ChildName(parent, suffix string) string { + n := parent + if len(parent) > (longest - len(suffix)) { + // If the suffix is longer than the longest allowed suffix, then + // we hash the whole combined string and use that as the suffix. + if head-len(suffix) <= 0 { + h := md5.Sum([]byte(parent + suffix)) + // 1. trim parent, if needed + if head < len(parent) { + parent = parent[:head] + } + // Format the return string, if it's shorter than longest: pad with + // beginning of the suffix. This happens, for example, when parent is + // short, but the suffix is very long. + ret := parent + fmt.Sprintf("%x", h) + if d := longest - len(ret); d > 0 { + ret += suffix[:d] + } + // If due to trimming above we're terminating the string with a `-`, + // remove it. + return strings.TrimRight(ret, "-") + } + n = fmt.Sprintf("%s%x", parent[:head-len(suffix)], md5.Sum([]byte(parent))) + } + return n + suffix +} diff --git a/test/vendor/github.com/knative/pkg/kmeta/owner_references.go b/test/vendor/knative.dev/pkg/kmeta/owner_references.go similarity index 100% rename from test/vendor/github.com/knative/pkg/kmeta/owner_references.go rename to test/vendor/knative.dev/pkg/kmeta/owner_references.go diff --git a/test/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go b/test/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go new file mode 100644 index 0000000000..822a7ac0c4 --- /dev/null +++ b/test/vendor/knative.dev/pkg/kmeta/ownerrefable_accessor.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package kmeta + +// OwnerRefableAccessor is a combination of OwnerRefable interface and Accessor interface +// which inidcates that it has 1) sufficient information to produce a metav1.OwnerReference to an object, +// 2) and a collection of interfaces from metav1.TypeMeta runtime.Object and metav1.Object that Kubernetes API types +// registered with runtime.Scheme must support. +type OwnerRefableAccessor interface { + OwnerRefable + Accessor +} diff --git a/test/vendor/knative.dev/pkg/logging/config.go b/test/vendor/knative.dev/pkg/logging/config.go index b6100c2dc5..6f59bdec24 100644 --- a/test/vendor/knative.dev/pkg/logging/config.go +++ b/test/vendor/knative.dev/pkg/logging/config.go @@ -33,7 +33,12 @@ import ( const ConfigMapNameEnv = "CONFIG_LOGGING_NAME" -var zapLoggerConfig = "zap-logger-config" +const ( + loggerConfigKey = "zap-logger-config" + fallbackLoggerName = "fallback-logger" +) + +var errEmptyLoggerConfig = errors.New("empty logger configuration") // NewLogger creates a logger with the supplied configuration. // In addition to the logger, it returns AtomicLevel that can @@ -48,7 +53,7 @@ func NewLogger(configJSON string, levelOverride string, opts ...zap.Option) (*za } loggingCfg := zap.NewProductionConfig() - if len(levelOverride) > 0 { + if levelOverride != "" { if level, err := levelFromString(levelOverride); err == nil { loggingCfg.Level = zap.NewAtomicLevelAt(*level) } @@ -58,7 +63,7 @@ func NewLogger(configJSON string, levelOverride string, opts ...zap.Option) (*za if err2 != nil { panic(err2) } - return enrichLoggerWithCommitID(logger.Named("fallback-logger").Sugar()), loggingCfg.Level + return enrichLoggerWithCommitID(logger.Named(fallbackLoggerName).Sugar()), loggingCfg.Level } func enrichLoggerWithCommitID(logger *zap.SugaredLogger) *zap.SugaredLogger { @@ -74,21 +79,22 @@ func enrichLoggerWithCommitID(logger *zap.SugaredLogger) *zap.SugaredLogger { // NewLoggerFromConfig creates a logger using the provided Config func NewLoggerFromConfig(config *Config, name string, opts ...zap.Option) (*zap.SugaredLogger, zap.AtomicLevel) { - logger, level := NewLogger(config.LoggingConfig, config.LoggingLevel[name].String(), opts...) + var componentLvl string + if lvl, defined := config.LoggingLevel[name]; defined { + componentLvl = lvl.String() + } + + logger, level := NewLogger(config.LoggingConfig, componentLvl, opts...) return logger.Named(name), level } func newLoggerFromConfig(configJSON string, levelOverride string, opts []zap.Option) (*zap.Logger, zap.AtomicLevel, error) { - if len(configJSON) == 0 { - return nil, zap.AtomicLevel{}, errors.New("empty logging configuration") - } - - var loggingCfg zap.Config - if err := json.Unmarshal([]byte(configJSON), &loggingCfg); err != nil { + loggingCfg, err := zapConfigFromJSON(configJSON) + if err != nil { return nil, zap.AtomicLevel{}, err } - if len(levelOverride) > 0 { + if levelOverride != "" { if level, err := levelFromString(levelOverride); err == nil { loggingCfg.Level = zap.NewAtomicLevelAt(*level) } @@ -99,11 +105,23 @@ func newLoggerFromConfig(configJSON string, levelOverride string, opts []zap.Opt return nil, zap.AtomicLevel{}, err } - logger.Info("Successfully created the logger.", zap.String(logkey.JSONConfig, configJSON)) + logger.Info("Successfully created the logger.") logger.Sugar().Infof("Logging level set to %v", loggingCfg.Level) return logger, loggingCfg.Level, nil } +func zapConfigFromJSON(configJSON string) (*zap.Config, error) { + if configJSON == "" { + return nil, errEmptyLoggerConfig + } + + loggingCfg := &zap.Config{} + if err := json.Unmarshal([]byte(configJSON), loggingCfg); err != nil { + return nil, err + } + return loggingCfg, nil +} + // Config contains the configuration defined in the logging ConfigMap. // +k8s:deepcopy-gen=true type Config struct { @@ -136,7 +154,7 @@ const defaultZLC = `{ // expecting the given list of components. func NewConfigFromMap(data map[string]string) (*Config, error) { lc := &Config{} - if zlc, ok := data["zap-logger-config"]; ok { + if zlc, ok := data[loggerConfigKey]; ok { lc.LoggingConfig = zlc } else { lc.LoggingConfig = defaultZLC @@ -157,7 +175,7 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { return lc, nil } -// NewConfigFromConfigMap creates a LoggingConfig from the supplied ConfigMap, +// NewConfigFromConfigMap creates a Config from the supplied ConfigMap, // expecting the given list of components. func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { return NewConfigFromMap(configMap.Data) @@ -175,14 +193,30 @@ func levelFromString(level string) (*zapcore.Level, error) { // when a config map is updated func UpdateLevelFromConfigMap(logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel, levelKey string) func(configMap *corev1.ConfigMap) { + return func(configMap *corev1.ConfigMap) { - loggingConfig, err := NewConfigFromConfigMap(configMap) + config, err := NewConfigFromConfigMap(configMap) if err != nil { logger.Errorw("Failed to parse the logging configmap. Previous config map will be used.", zap.Error(err)) return } - level := loggingConfig.LoggingLevel[levelKey] + level, defined := config.LoggingLevel[levelKey] + if !defined { + // reset to global level + loggingCfg, err := zapConfigFromJSON(config.LoggingConfig) + switch { + case err == errEmptyLoggerConfig: + level = zap.NewAtomicLevel().Level() + case err != nil: + logger.With(zap.Error(err)).Errorf("Failed to parse logger configuration. "+ + "Previous log level retained for %v", levelKey) + return + default: + level = loggingCfg.Level.Level() + } + } + if atomicLevel.Level() != level { logger.Infof("Updating logging level for %v from %v to %v.", levelKey, atomicLevel.Level(), level) atomicLevel.SetLevel(level) @@ -214,7 +248,7 @@ func JsonToLoggingConfig(jsonCfg string) (*Config, error) { cfg, err := NewConfigFromMap(configMap) if err != nil { // Get the default config from logging package. - if cfg, err = NewConfigFromMap(map[string]string{}); err != nil { + if cfg, err = NewConfigFromMap(nil); err != nil { return nil, err } } @@ -228,7 +262,7 @@ func LoggingConfigToJson(cfg *Config) (string, error) { } jsonCfg, err := json.Marshal(map[string]string{ - zapLoggerConfig: cfg.LoggingConfig, + loggerConfigKey: cfg.LoggingConfig, }) if err != nil { return "", err diff --git a/test/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go index 8611e93957..53345202cb 100644 --- a/test/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/pkg/logging/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/pkg/metrics/client.go b/test/vendor/knative.dev/pkg/metrics/client.go new file mode 100644 index 0000000000..81a1d1148e --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/client.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "k8s.io/client-go/tools/metrics" +) + +// ClientProvider implements the pattern of Kubernetes MetricProvider that may +// be used to produce suitable metrics for use with metrics.Register() +type ClientProvider struct { + Latency *stats.Float64Measure + Result *stats.Int64Measure +} + +// NewLatencyMetric implements MetricsProvider +func (cp *ClientProvider) NewLatencyMetric() metrics.LatencyMetric { + return latencyMetric{ + measure: cp.Latency, + } +} + +// LatencyView returns a view of the Latency metric. +func (cp *ClientProvider) LatencyView() *view.View { + return measureView(cp.Latency, view.Distribution(BucketsNBy10(0.00001, 8)...)) +} + +// NewResultMetric implements MetricsProvider +func (cp *ClientProvider) NewResultMetric() metrics.ResultMetric { + return resultMetric{ + measure: cp.Result, + } +} + +// ResultView returns a view of the Result metric. +func (cp *ClientProvider) ResultView() *view.View { + return measureView(cp.Result, view.Count()) +} + +// DefaultViews returns a list of views suitable for passing to view.Register +func (cp *ClientProvider) DefaultViews() []*view.View { + return []*view.View{ + cp.LatencyView(), + cp.ResultView(), + } +} diff --git a/test/vendor/knative.dev/pkg/metrics/config.go b/test/vendor/knative.dev/pkg/metrics/config.go new file mode 100644 index 0000000000..16d7e5f334 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/config.go @@ -0,0 +1,322 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "time" + + "go.opencensus.io/stats" + "go.uber.org/zap" + "knative.dev/pkg/metrics/metricskey" +) + +const ( + DomainEnv = "METRICS_DOMAIN" +) + +// metricsBackend specifies the backend to use for metrics +type metricsBackend string + +const ( + // The following keys are used to configure metrics reporting. + // See https://github.com/knative/serving/blob/master/config/config-observability.yaml + // for details. + AllowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics" + BackendDestinationKey = "metrics.backend-destination" + ReportingPeriodKey = "metrics.reporting-period-seconds" + StackdriverCustomMetricSubDomainKey = "metrics.stackdriver-custom-metrics-subdomain" + // Stackdriver client configuration keys + StackdriverProjectIDKey = "metrics.stackdriver-project-id" + StackdriverGCPLocationKey = "metrics.stackdriver-gcp-location" + StackdriverClusterNameKey = "metrics.stackdriver-cluster-name" + StackdriverUseSecretKey = "metrics.stackdriver-use-secret" + + // Stackdriver is used for Stackdriver backend + Stackdriver metricsBackend = "stackdriver" + // Prometheus is used for Prometheus backend + Prometheus metricsBackend = "prometheus" + // OpenCensus is used to export to the OpenCensus Agent / Collector, + // which can send to many other services. + OpenCensus metricsBackend = "opencensus" + + defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" + + CollectorAddressKey = "metrics.opencensus-address" + CollectorSecureKey = "metrics.opencensus-require-tls" + + defaultPrometheusPort = 9090 + maxPrometheusPort = 65535 + minPrometheusPort = 1024 +) + +type metricsConfig struct { + // The metrics domain. e.g. "serving.knative.dev" or "build.knative.dev". + domain string + // The component that emits the metrics. e.g. "activator", "autoscaler". + component string + // The metrics backend destination. + backendDestination metricsBackend + // reportingPeriod specifies the interval between reporting aggregated views. + // If duration is less than or equal to zero, it enables the default behavior. + reportingPeriod time.Duration + + // recorder provides a hook for performing custom transformations before + // writing the metrics to the stats.RecordWithOptions interface. + recorder func(context.Context, stats.Measurement, ...stats.Options) error + + // ---- OpenCensus specific below ---- + // collectorAddress is the address of the collector, if not `localhost:55678` + collectorAddress string + // Require mutual TLS. Defaults to "false" because mutual TLS is hard to set up. + requireSecure bool + + // ---- Prometheus specific below ---- + // prometheusPort is the port where metrics are exposed in Prometheus + // format. It defaults to 9090. + prometheusPort int + + // ---- Stackdriver specific below ---- + // True if backendDestination equals to "stackdriver". Store this in a variable + // to reduce string comparison operations. + isStackdriverBackend bool + // stackdriverMetricTypePrefix is the metric domain joins component, e.g. + // "knative.dev/serving/activator". Store this in a variable to reduce string + // join operations. + stackdriverMetricTypePrefix string + // stackdriverCustomMetricTypePrefix is "custom.googleapis.com" joined with the subdomain and component. + // E.g., "custom.googleapis.com//". + // Store this in a variable to reduce string join operations. + stackdriverCustomMetricTypePrefix string + // stackdriverClientConfig is the metadata to configure the metrics exporter's Stackdriver client. + stackdriverClientConfig StackdriverClientConfig +} + +// StackdriverClientConfig encapsulates the metadata required to configure a Stackdriver client. +type StackdriverClientConfig struct { + // ProjectID is the stackdriver project ID to which data is uploaded. + // This is not necessarily the GCP project ID where the Kubernetes cluster is hosted. + // Required when the Kubernetes cluster is not hosted on GCE. + ProjectID string + // GCPLocation is the GCP region or zone to which data is uploaded. + // This is not necessarily the GCP location where the Kubernetes cluster is hosted. + // Required when the Kubernetes cluster is not hosted on GCE. + GCPLocation string + // ClusterName is the cluster name with which the data will be associated in Stackdriver. + // Required when the Kubernetes cluster is not hosted on GCE. + ClusterName string + // UseSecret is whether the credentials stored in a Kubernetes Secret should be used to + // authenticate with Stackdriver. The Secret name and namespace can be specified by calling + // metrics.SetStackdriverSecretLocation. + // If UseSecret is false, Google Application Default Credentials + // will be used (https://cloud.google.com/docs/authentication/production). + UseSecret bool +} + +// NewStackdriverClientConfigFromMap creates a stackdriverClientConfig from the given map +func NewStackdriverClientConfigFromMap(config map[string]string) *StackdriverClientConfig { + return &StackdriverClientConfig{ + ProjectID: config[StackdriverProjectIDKey], + GCPLocation: config[StackdriverGCPLocationKey], + ClusterName: config[StackdriverClusterNameKey], + UseSecret: strings.EqualFold(config[StackdriverUseSecretKey], "true"), + } +} + +// Record applies the `ros` Options to `ms` and then records the resulting +// measurements in the metricsConfig's designated backend. +func (mc *metricsConfig) Record(ctx context.Context, ms stats.Measurement, ros ...stats.Options) error { + if mc == nil || mc.recorder == nil { + return stats.RecordWithOptions(ctx, append(ros, stats.WithMeasurements(ms))...) + } + return mc.recorder(ctx, ms, ros...) +} + +func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metricsConfig, error) { + var mc metricsConfig + + if ops.Domain == "" { + return nil, errors.New("metrics domain cannot be empty") + } + mc.domain = ops.Domain + + if ops.Component == "" { + return nil, errors.New("metrics component name cannot be empty") + } + mc.component = ops.Component + + if ops.ConfigMap == nil { + return nil, errors.New("metrics config map cannot be empty") + } + m := ops.ConfigMap + // Read backend setting from environment variable first + backend := os.Getenv(defaultBackendEnvName) + if backend == "" { + // Use Prometheus if DEFAULT_METRICS_BACKEND does not exist or is empty + backend = string(Prometheus) + } + // Override backend if it is set in the config map. + if backendFromConfig, ok := m[BackendDestinationKey]; ok { + backend = backendFromConfig + } + lb := metricsBackend(strings.ToLower(backend)) + switch lb { + case Stackdriver, Prometheus, OpenCensus: + mc.backendDestination = lb + default: + return nil, fmt.Errorf("unsupported metrics backend value %q", backend) + } + + if mc.backendDestination == OpenCensus { + mc.collectorAddress = ops.ConfigMap[CollectorAddressKey] + if isSecure := ops.ConfigMap[CollectorSecureKey]; isSecure != "" { + var err error + if mc.requireSecure, err = strconv.ParseBool(isSecure); err != nil { + return nil, fmt.Errorf("invalid %s value %q", CollectorSecureKey, isSecure) + } + } + } + + if mc.backendDestination == Prometheus { + pp := ops.PrometheusPort + if pp == 0 { + pp = defaultPrometheusPort + } + if pp < minPrometheusPort || pp > maxPrometheusPort { + return nil, fmt.Errorf("invalid port %v, should between %v and %v", pp, minPrometheusPort, maxPrometheusPort) + } + mc.prometheusPort = pp + } + + // If stackdriverClientConfig is not provided for stackdriver backend destination, OpenCensus will try to + // use the application default credentials. If that is not available, Opencensus would fail to create the + // metrics exporter. + if mc.backendDestination == Stackdriver { + scc := NewStackdriverClientConfigFromMap(m) + mc.stackdriverClientConfig = *scc + mc.isStackdriverBackend = true + var allowCustomMetrics bool + var err error + mc.stackdriverMetricTypePrefix = path.Join(mc.domain, mc.component) + + customMetricsSubDomain := m[StackdriverCustomMetricSubDomainKey] + if customMetricsSubDomain == "" { + customMetricsSubDomain = defaultCustomMetricSubDomain + } + mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, customMetricsSubDomain, mc.component) + if ascmStr := m[AllowStackdriverCustomMetricsKey]; ascmStr != "" { + allowCustomMetrics, err = strconv.ParseBool(ascmStr) + if err != nil { + return nil, fmt.Errorf("invalid %s value %q", AllowStackdriverCustomMetricsKey, ascmStr) + } + } + + if !allowCustomMetrics { + servingOrEventing := metricskey.KnativeRevisionMetrics.Union( + metricskey.KnativeTriggerMetrics).Union(metricskey.KnativeBrokerMetrics) + mc.recorder = func(ctx context.Context, ms stats.Measurement, ros ...stats.Options) error { + metricType := path.Join(mc.stackdriverMetricTypePrefix, ms.Measure().Name()) + + if servingOrEventing.Has(metricType) { + return stats.RecordWithOptions(ctx, append(ros, stats.WithMeasurements(ms))...) + } + // Otherwise, skip (because it won't be accepted) + return nil + } + } + } + + // If reporting period is specified, use the value from the configuration. + // If not, set a default value based on the selected backend. + // Each exporter makes different promises about what the lowest supported + // reporting period is. For Stackdriver, this value is 1 minute. + // For Prometheus, we will use a lower value since the exporter doesn't + // push anything but just responds to pull requests, and shorter durations + // do not really hurt the performance and we rely on the scraping configuration. + if repStr, ok := m[ReportingPeriodKey]; ok && repStr != "" { + repInt, err := strconv.Atoi(repStr) + if err != nil { + return nil, fmt.Errorf("invalid %s value %q", ReportingPeriodKey, repStr) + } + mc.reportingPeriod = time.Duration(repInt) * time.Second + } else if mc.backendDestination == Stackdriver { + mc.reportingPeriod = 60 * time.Second + } else if mc.backendDestination == Prometheus { + mc.reportingPeriod = 5 * time.Second + } + + return &mc, nil +} + +// Domain holds the metrics domain to use for surfacing metrics. +func Domain() string { + if domain := os.Getenv(DomainEnv); domain != "" { + return domain + } + + panic(fmt.Sprintf(`The environment variable %q is not set + +If this is a process running on Kubernetes, then it should be specifying +this via: + + env: + - name: %s + value: knative.dev/some-repository + +If this is a Go unit test consuming metric.Domain() then it should add the +following import: + +import ( + _ "knative.dev/pkg/metrics/testing" +)`, DomainEnv, DomainEnv)) +} + +// JsonToMetricsOptions converts a json string of a +// ExporterOptions. Returns a non-nil ExporterOptions always. +func JsonToMetricsOptions(jsonOpts string) (*ExporterOptions, error) { + var opts ExporterOptions + if jsonOpts == "" { + return nil, errors.New("json options string is empty") + } + + if err := json.Unmarshal([]byte(jsonOpts), &opts); err != nil { + return nil, err + } + + return &opts, nil +} + +// MetricsOptionsToJson converts a ExporterOptions to a json string. +func MetricsOptionsToJson(opts *ExporterOptions) (string, error) { + if opts == nil { + return "", nil + } + + jsonOpts, err := json.Marshal(opts) + if err != nil { + return "", err + } + + return string(jsonOpts), nil +} diff --git a/test/vendor/knative.dev/pkg/metrics/config_observability.go b/test/vendor/knative.dev/pkg/metrics/config_observability.go new file mode 100644 index 0000000000..496443e91f --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/config_observability.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + "strings" + texttemplate "text/template" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // The following is used to set the default log url template + DefaultLogURLTemplate = "http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))" + // The env var name for config-observability + ConfigMapNameEnv = "CONFIG_OBSERVABILITY_NAME" +) + +// ObservabilityConfig contains the configuration defined in the observability ConfigMap. +// +k8s:deepcopy-gen=true +type ObservabilityConfig struct { + // EnableVarLogCollection specifies whether the logs under /var/log/ should be available + // for collection on the host node by the fluentd daemon set. + EnableVarLogCollection bool + + // LoggingURLTemplate is a string containing the logging url template where + // the variable REVISION_UID will be replaced with the created revision's UID. + LoggingURLTemplate string + + // RequestLogTemplate is the go template to use to shape the request logs. + RequestLogTemplate string + + // EnableProbeRequestLog enables queue-proxy to write health check probe request logs. + EnableProbeRequestLog bool + + // RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus, + // Stackdriver. + RequestMetricsBackend string + + // EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from + // the pods via an HTTP server in the format expected by the pprof visualization tool. + EnableProfiling bool +} + +// NewObservabilityConfigFromConfigMap creates a ObservabilityConfig from the supplied ConfigMap +func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*ObservabilityConfig, error) { + oc := &ObservabilityConfig{} + if evlc, ok := configMap.Data["logging.enable-var-log-collection"]; ok { + oc.EnableVarLogCollection = strings.EqualFold(evlc, "true") + } + + if rut, ok := configMap.Data["logging.revision-url-template"]; ok { + oc.LoggingURLTemplate = rut + } else { + oc.LoggingURLTemplate = DefaultLogURLTemplate + } + + if rlt, ok := configMap.Data["logging.request-log-template"]; ok { + // Verify that we get valid templates. + if _, err := texttemplate.New("requestLog").Parse(rlt); err != nil { + return nil, err + } + oc.RequestLogTemplate = rlt + } + + if eprl, ok := configMap.Data["logging.enable-probe-request-log"]; ok { + oc.EnableProbeRequestLog = strings.EqualFold(eprl, "true") + } + + if mb, ok := configMap.Data["metrics.request-metrics-backend-destination"]; ok { + oc.RequestMetricsBackend = mb + } + + if prof, ok := configMap.Data["profiling.enable"]; ok { + oc.EnableProfiling = strings.EqualFold(prof, "true") + } + + return oc, nil +} + +// ConfigMapName gets the name of the metrics ConfigMap +func ConfigMapName() string { + cm := os.Getenv(ConfigMapNameEnv) + if cm == "" { + return "config-observability" + } + return cm +} diff --git a/test/vendor/knative.dev/pkg/metrics/doc.go b/test/vendor/knative.dev/pkg/metrics/doc.go new file mode 100644 index 0000000000..631bb5966c --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/doc.go @@ -0,0 +1,16 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides Knative utilities for exporting metrics to Stackdriver +// backend or Prometheus backend based on config-observability settings. +package metrics diff --git a/test/vendor/knative.dev/pkg/metrics/exporter.go b/test/vendor/knative.dev/pkg/metrics/exporter.go new file mode 100644 index 0000000000..ce81d0c7e3 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/exporter.go @@ -0,0 +1,208 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "go.opencensus.io/stats/view" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +var ( + curMetricsExporter view.Exporter + curMetricsConfig *metricsConfig + metricsMux sync.RWMutex +) + +type flushable interface { + // Flush waits for metrics to be uploaded. + Flush() +} + +type stoppable interface { + // StopMetricsExporter stops the exporter + StopMetricsExporter() +} + +// ExporterOptions contains options for configuring the exporter. +type ExporterOptions struct { + // Domain is the metrics domain. e.g. "knative.dev". Must be present. + // + // Stackdriver uses the following format to construct full metric name: + // // + // Prometheus uses the following format to construct full metric name: + // _ + // Domain is actually not used if metrics backend is Prometheus. + Domain string + + // Component is the name of the component that emits the metrics. e.g. + // "activator", "queue_proxy". Should only contains alphabets and underscore. + // Must be present. + Component string + + // PrometheusPort is the port to expose metrics if metrics backend is Prometheus. + // It should be between maxPrometheusPort and maxPrometheusPort. 0 value means + // using the default 9090 value. If is ignored if metrics backend is not + // Prometheus. + PrometheusPort int + + // ConfigMap is the data from config map config-observability. Must be present. + // See https://github.com/knative/serving/blob/master/config/config-observability.yaml + // for details. + ConfigMap map[string]string +} + +// UpdateExporterFromConfigMap returns a helper func that can be used to update the exporter +// when a config map is updated. +func UpdateExporterFromConfigMap(component string, logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) { + domain := Domain() + return func(configMap *corev1.ConfigMap) { + UpdateExporter(ExporterOptions{ + Domain: domain, + Component: component, + ConfigMap: configMap.Data, + }, logger) + } +} + +// UpdateExporter updates the exporter based on the given ExporterOptions. +// This is a thread-safe function. The entire series of operations is locked +// to prevent a race condition between reading the current configuration +// and updating the current exporter. +func UpdateExporter(ops ExporterOptions, logger *zap.SugaredLogger) error { + newConfig, err := createMetricsConfig(ops, logger) + if err != nil { + if getCurMetricsConfig() == nil { + // Fail the process if there doesn't exist an exporter. + logger.Errorw("Failed to get a valid metrics config", zap.Error(err)) + } else { + logger.Errorw("Failed to get a valid metrics config; Skip updating the metrics exporter", zap.Error(err)) + } + return err + } + + if isNewExporterRequired(newConfig) { + logger.Info("Flushing the existing exporter before setting up the new exporter.") + FlushExporter() + e, err := newMetricsExporter(newConfig, logger) + if err != nil { + logger.Errorf("Failed to update a new metrics exporter based on metric config %v. error: %v", newConfig, err) + return err + } + existingConfig := getCurMetricsConfig() + setCurMetricsExporter(e) + logger.Infof("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, newConfig) + } + + setCurMetricsConfig(newConfig) + return nil +} + +// isNewExporterRequired compares the non-nil newConfig against curMetricsConfig. When backend changes, +// or stackdriver project ID changes for stackdriver backend, we need to update the metrics exporter. +// This function is not implicitly thread-safe. +func isNewExporterRequired(newConfig *metricsConfig) bool { + cc := getCurMetricsConfig() + if cc == nil || newConfig.backendDestination != cc.backendDestination { + return true + } + + // If the OpenCensus address has changed, restart the exporter. + // TODO(evankanderson): Should we just always restart the opencensus agent? + if newConfig.backendDestination == OpenCensus { + return newConfig.collectorAddress != cc.collectorAddress || newConfig.requireSecure != cc.requireSecure + } + + return newConfig.backendDestination == Stackdriver && newConfig.stackdriverClientConfig != cc.stackdriverClientConfig +} + +// newMetricsExporter gets a metrics exporter based on the config. +// This function is not implicitly thread-safe. +func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + ce := getCurMetricsExporter() + // If there is a Prometheus Exporter server running, stop it. + resetCurPromSrv() + + // TODO(https://github.com/knative/pkg/issues/866): Move Stackdriver and Promethus + // operations before stopping to an interface. + if se, ok := ce.(stoppable); ok { + se.StopMetricsExporter() + } + + var err error + var e view.Exporter + switch config.backendDestination { + case OpenCensus: + e, err = newOpenCensusExporter(config, logger) + case Stackdriver: + e, err = newStackdriverExporter(config, logger) + case Prometheus: + e, err = newPrometheusExporter(config, logger) + default: + err = fmt.Errorf("unsupported metrics backend %v", config.backendDestination) + } + if err != nil { + return nil, err + } + return e, nil +} + +func getCurMetricsExporter() view.Exporter { + metricsMux.RLock() + defer metricsMux.RUnlock() + return curMetricsExporter +} + +func setCurMetricsExporter(e view.Exporter) { + metricsMux.Lock() + defer metricsMux.Unlock() + curMetricsExporter = e +} + +func getCurMetricsConfig() *metricsConfig { + metricsMux.RLock() + defer metricsMux.RUnlock() + return curMetricsConfig +} + +func setCurMetricsConfig(c *metricsConfig) { + metricsMux.Lock() + defer metricsMux.Unlock() + if c != nil { + view.SetReportingPeriod(c.reportingPeriod) + } else { + // Setting to 0 enables the default behavior. + view.SetReportingPeriod(0) + } + curMetricsConfig = c +} + +// FlushExporter waits for exported data to be uploaded. +// This should be called before the process shuts down or exporter is replaced. +// Return value indicates whether the exporter is flushable or not. +func FlushExporter() bool { + e := getCurMetricsExporter() + if e == nil { + return false + } + + if f, ok := e.(flushable); ok { + f.Flush() + return true + } + return false +} diff --git a/test/vendor/knative.dev/pkg/metrics/gcp_metadata.go b/test/vendor/knative.dev/pkg/metrics/gcp_metadata.go new file mode 100644 index 0000000000..ec6ae878e3 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/gcp_metadata.go @@ -0,0 +1,53 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "cloud.google.com/go/compute/metadata" + "knative.dev/pkg/metrics/metricskey" +) + +type gcpMetadata struct { + project string + location string + cluster string +} + +func retrieveGCPMetadata() *gcpMetadata { + gm := gcpMetadata{ + project: metricskey.ValueUnknown, + location: metricskey.ValueUnknown, + cluster: metricskey.ValueUnknown, + } + + if metadata.OnGCE() { + project, err := metadata.NumericProjectID() + if err == nil && project != "" { + gm.project = project + } + location, err := metadata.InstanceAttributeValue("cluster-location") + if err == nil && location != "" { + gm.location = location + } + cluster, err := metadata.InstanceAttributeValue("cluster-name") + if err == nil && cluster != "" { + gm.cluster = cluster + } + } + + return &gm +} diff --git a/test/vendor/knative.dev/pkg/metrics/memstats.go b/test/vendor/knative.dev/pkg/metrics/memstats.go new file mode 100644 index 0000000000..51c1ede14f --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/memstats.go @@ -0,0 +1,539 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "runtime" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +// NewMemStatsAll creates a new MemStatsProvider with stats for all of the +// supported Go runtime.MemStat fields. +func NewMemStatsAll() *MemStatsProvider { + return &MemStatsProvider{ + Alloc: stats.Int64( + "go_alloc", + "The number of bytes of allocated heap objects.", + stats.UnitNone, + ), + TotalAlloc: stats.Int64( + "go_total_alloc", + "The cumulative bytes allocated for heap objects.", + stats.UnitNone, + ), + Sys: stats.Int64( + "go_sys", + "The total bytes of memory obtained from the OS.", + stats.UnitNone, + ), + Lookups: stats.Int64( + "go_lookups", + "The number of pointer lookups performed by the runtime.", + stats.UnitNone, + ), + Mallocs: stats.Int64( + "go_mallocs", + "The cumulative count of heap objects allocated.", + stats.UnitNone, + ), + Frees: stats.Int64( + "go_frees", + "The cumulative count of heap objects freed.", + stats.UnitNone, + ), + HeapAlloc: stats.Int64( + "go_heap_alloc", + "The number of bytes of allocated heap objects.", + stats.UnitNone, + ), + HeapSys: stats.Int64( + "go_heap_sys", + "The number of bytes of heap memory obtained from the OS.", + stats.UnitNone, + ), + HeapIdle: stats.Int64( + "go_heap_idle", + "The number of bytes in idle (unused) spans.", + stats.UnitNone, + ), + HeapInuse: stats.Int64( + "go_heap_in_use", + "The number of bytes in in-use spans.", + stats.UnitNone, + ), + HeapReleased: stats.Int64( + "go_heap_released", + "The number of bytes of physical memory returned to the OS.", + stats.UnitNone, + ), + HeapObjects: stats.Int64( + "go_heap_objects", + "The number of allocated heap objects.", + stats.UnitNone, + ), + StackInuse: stats.Int64( + "go_stack_in_use", + "The number of bytes in stack spans.", + stats.UnitNone, + ), + StackSys: stats.Int64( + "go_stack_sys", + "The number of bytes of stack memory obtained from the OS.", + stats.UnitNone, + ), + MSpanInuse: stats.Int64( + "go_mspan_in_use", + "The number of bytes of allocated mspan structures.", + stats.UnitNone, + ), + MSpanSys: stats.Int64( + "go_mspan_sys", + "The number of bytes of memory obtained from the OS for mspan structures.", + stats.UnitNone, + ), + MCacheInuse: stats.Int64( + "go_mcache_in_use", + "The number of bytes of allocated mcache structures.", + stats.UnitNone, + ), + MCacheSys: stats.Int64( + "go_mcache_sys", + "The number of bytes of memory obtained from the OS for mcache structures.", + stats.UnitNone, + ), + BuckHashSys: stats.Int64( + "go_bucket_hash_sys", + "The number of bytes of memory in profiling bucket hash tables.", + stats.UnitNone, + ), + GCSys: stats.Int64( + "go_gc_sys", + "The number of bytes of memory in garbage collection metadata.", + stats.UnitNone, + ), + OtherSys: stats.Int64( + "go_other_sys", + "The number of bytes of memory in miscellaneous off-heap runtime allocations.", + stats.UnitNone, + ), + NextGC: stats.Int64( + "go_next_gc", + "The target heap size of the next GC cycle.", + stats.UnitNone, + ), + LastGC: stats.Int64( + "go_last_gc", + "The time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch).", + "ns", + ), + PauseTotalNs: stats.Int64( + "go_total_gc_pause_ns", + "The cumulative nanoseconds in GC stop-the-world pauses since the program started.", + "ns", + ), + NumGC: stats.Int64( + "go_num_gc", + "The number of completed GC cycles.", + stats.UnitNone, + ), + NumForcedGC: stats.Int64( + "go_num_forced_gc", + "The number of GC cycles that were forced by the application calling the GC function.", + stats.UnitNone, + ), + GCCPUFraction: stats.Float64( + "go_gc_cpu_fraction", + "The fraction of this program's available CPU time used by the GC since the program started.", + stats.UnitNone, + ), + } +} + +// MemStatsProvider is used to expose metrics based on Go's runtime.MemStats. +// The fields below (and their comments) are a filtered list taken from +// Go's runtime.MemStats. +type MemStatsProvider struct { + // Alloc is bytes of allocated heap objects. + // + // This is the same as HeapAlloc (see below). + Alloc *stats.Int64Measure + + // TotalAlloc is cumulative bytes allocated for heap objects. + // + // TotalAlloc increases as heap objects are allocated, but + // unlike Alloc and HeapAlloc, it does not decrease when + // objects are freed. + TotalAlloc *stats.Int64Measure + + // Sys is the total bytes of memory obtained from the OS. + // + // Sys is the sum of the XSys fields below. Sys measures the + // virtual address space reserved by the Go runtime for the + // heap, stacks, and other internal data structures. It's + // likely that not all of the virtual address space is backed + // by physical memory at any given moment, though in general + // it all was at some point. + Sys *stats.Int64Measure + + // Lookups is the number of pointer lookups performed by the + // runtime. + // + // This is primarily useful for debugging runtime internals. + Lookups *stats.Int64Measure + + // Mallocs is the cumulative count of heap objects allocated. + // The number of live objects is Mallocs - Frees. + Mallocs *stats.Int64Measure + + // Frees is the cumulative count of heap objects freed. + Frees *stats.Int64Measure + + // HeapAlloc is bytes of allocated heap objects. + // + // "Allocated" heap objects include all reachable objects, as + // well as unreachable objects that the garbage collector has + // not yet freed. Specifically, HeapAlloc increases as heap + // objects are allocated and decreases as the heap is swept + // and unreachable objects are freed. Sweeping occurs + // incrementally between GC cycles, so these two processes + // occur simultaneously, and as a result HeapAlloc tends to + // change smoothly (in contrast with the sawtooth that is + // typical of stop-the-world garbage collectors). + HeapAlloc *stats.Int64Measure + + // HeapSys is bytes of heap memory obtained from the OS. + // + // HeapSys measures the amount of virtual address space + // reserved for the heap. This includes virtual address space + // that has been reserved but not yet used, which consumes no + // physical memory, but tends to be small, as well as virtual + // address space for which the physical memory has been + // returned to the OS after it became unused (see HeapReleased + // for a measure of the latter). + // + // HeapSys estimates the largest size the heap has had. + HeapSys *stats.Int64Measure + + // HeapIdle is bytes in idle (unused) spans. + // + // Idle spans have no objects in them. These spans could be + // (and may already have been) returned to the OS, or they can + // be reused for heap allocations, or they can be reused as + // stack memory. + // + // HeapIdle minus HeapReleased estimates the amount of memory + // that could be returned to the OS, but is being retained by + // the runtime so it can grow the heap without requesting more + // memory from the OS. If this difference is significantly + // larger than the heap size, it indicates there was a recent + // transient spike in live heap size. + HeapIdle *stats.Int64Measure + + // HeapInuse is bytes in in-use spans. + // + // In-use spans have at least one object in them. These spans + // can only be used for other objects of roughly the same + // size. + // + // HeapInuse minus HeapAlloc estimates the amount of memory + // that has been dedicated to particular size classes, but is + // not currently being used. This is an upper bound on + // fragmentation, but in general this memory can be reused + // efficiently. + HeapInuse *stats.Int64Measure + + // HeapReleased is bytes of physical memory returned to the OS. + // + // This counts heap memory from idle spans that was returned + // to the OS and has not yet been reacquired for the heap. + HeapReleased *stats.Int64Measure + + // HeapObjects is the number of allocated heap objects. + // + // Like HeapAlloc, this increases as objects are allocated and + // decreases as the heap is swept and unreachable objects are + // freed. + HeapObjects *stats.Int64Measure + + // StackInuse is bytes in stack spans. + // + // In-use stack spans have at least one stack in them. These + // spans can only be used for other stacks of the same size. + // + // There is no StackIdle because unused stack spans are + // returned to the heap (and hence counted toward HeapIdle). + StackInuse *stats.Int64Measure + + // StackSys is bytes of stack memory obtained from the OS. + // + // StackSys is StackInuse, plus any memory obtained directly + // from the OS for OS thread stacks (which should be minimal). + StackSys *stats.Int64Measure + + // MSpanInuse is bytes of allocated mspan structures. + MSpanInuse *stats.Int64Measure + + // MSpanSys is bytes of memory obtained from the OS for mspan + // structures. + MSpanSys *stats.Int64Measure + + // MCacheInuse is bytes of allocated mcache structures. + MCacheInuse *stats.Int64Measure + + // MCacheSys is bytes of memory obtained from the OS for + // mcache structures. + MCacheSys *stats.Int64Measure + + // BuckHashSys is bytes of memory in profiling bucket hash tables. + BuckHashSys *stats.Int64Measure + + // GCSys is bytes of memory in garbage collection metadata. + GCSys *stats.Int64Measure + + // OtherSys is bytes of memory in miscellaneous off-heap + // runtime allocations. + OtherSys *stats.Int64Measure + + // NextGC is the target heap size of the next GC cycle. + // + // The garbage collector's goal is to keep HeapAlloc ≤ NextGC. + // At the end of each GC cycle, the target for the next cycle + // is computed based on the amount of reachable data and the + // value of GOGC. + NextGC *stats.Int64Measure + + // LastGC is the time the last garbage collection finished, as + // nanoseconds since 1970 (the UNIX epoch). + LastGC *stats.Int64Measure + + // PauseTotalNs is the cumulative nanoseconds in GC + // stop-the-world pauses since the program started. + // + // During a stop-the-world pause, all goroutines are paused + // and only the garbage collector can run. + PauseTotalNs *stats.Int64Measure + + // NumGC is the number of completed GC cycles. + NumGC *stats.Int64Measure + + // NumForcedGC is the number of GC cycles that were forced by + // the application calling the GC function. + NumForcedGC *stats.Int64Measure + + // GCCPUFraction is the fraction of this program's available + // CPU time used by the GC since the program started. + // + // GCCPUFraction is expressed as a number between 0 and 1, + // where 0 means GC has consumed none of this program's CPU. A + // program's available CPU time is defined as the integral of + // GOMAXPROCS since the program started. That is, if + // GOMAXPROCS is 2 and a program has been running for 10 + // seconds, its "available CPU" is 20 seconds. GCCPUFraction + // does not include CPU time used for write barrier activity. + // + // This is the same as the fraction of CPU reported by + // GODEBUG=gctrace=1. + GCCPUFraction *stats.Float64Measure +} + +// Start initiates a Go routine that starts pushing metrics into +// the provided measures. +func (msp *MemStatsProvider) Start(ctx context.Context, period time.Duration) { + go func() { + ticker := time.NewTicker(period) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ms := runtime.MemStats{} + runtime.ReadMemStats(&ms) + if msp.Alloc != nil { + Record(ctx, msp.Alloc.M(int64(ms.Alloc))) + } + if msp.TotalAlloc != nil { + Record(ctx, msp.TotalAlloc.M(int64(ms.TotalAlloc))) + } + if msp.Sys != nil { + Record(ctx, msp.Sys.M(int64(ms.Sys))) + } + if msp.Lookups != nil { + Record(ctx, msp.Lookups.M(int64(ms.Lookups))) + } + if msp.Mallocs != nil { + Record(ctx, msp.Mallocs.M(int64(ms.Mallocs))) + } + if msp.Frees != nil { + Record(ctx, msp.Frees.M(int64(ms.Frees))) + } + if msp.HeapAlloc != nil { + Record(ctx, msp.HeapAlloc.M(int64(ms.HeapAlloc))) + } + if msp.HeapSys != nil { + Record(ctx, msp.HeapSys.M(int64(ms.HeapSys))) + } + if msp.HeapIdle != nil { + Record(ctx, msp.HeapIdle.M(int64(ms.HeapIdle))) + } + if msp.HeapInuse != nil { + Record(ctx, msp.HeapInuse.M(int64(ms.HeapInuse))) + } + if msp.HeapReleased != nil { + Record(ctx, msp.HeapReleased.M(int64(ms.HeapReleased))) + } + if msp.HeapObjects != nil { + Record(ctx, msp.HeapObjects.M(int64(ms.HeapObjects))) + } + if msp.StackInuse != nil { + Record(ctx, msp.StackInuse.M(int64(ms.StackInuse))) + } + if msp.StackSys != nil { + Record(ctx, msp.StackSys.M(int64(ms.StackSys))) + } + if msp.MSpanInuse != nil { + Record(ctx, msp.MSpanInuse.M(int64(ms.MSpanInuse))) + } + if msp.MSpanSys != nil { + Record(ctx, msp.MSpanSys.M(int64(ms.MSpanSys))) + } + if msp.MCacheInuse != nil { + Record(ctx, msp.MCacheInuse.M(int64(ms.MCacheInuse))) + } + if msp.MCacheSys != nil { + Record(ctx, msp.MCacheSys.M(int64(ms.MCacheSys))) + } + if msp.BuckHashSys != nil { + Record(ctx, msp.BuckHashSys.M(int64(ms.BuckHashSys))) + } + if msp.GCSys != nil { + Record(ctx, msp.GCSys.M(int64(ms.GCSys))) + } + if msp.OtherSys != nil { + Record(ctx, msp.OtherSys.M(int64(ms.OtherSys))) + } + if msp.NextGC != nil { + Record(ctx, msp.NextGC.M(int64(ms.NextGC))) + } + if msp.LastGC != nil { + Record(ctx, msp.LastGC.M(int64(ms.LastGC))) + } + if msp.PauseTotalNs != nil { + Record(ctx, msp.PauseTotalNs.M(int64(ms.PauseTotalNs))) + } + if msp.NumGC != nil { + Record(ctx, msp.NumGC.M(int64(ms.NumGC))) + } + if msp.NumForcedGC != nil { + Record(ctx, msp.NumForcedGC.M(int64(ms.NumForcedGC))) + } + if msp.GCCPUFraction != nil { + Record(ctx, msp.GCCPUFraction.M(ms.GCCPUFraction)) + } + } + } + }() +} + +// DefaultViews returns a list of views suitable for passing to view.Register +func (msp *MemStatsProvider) DefaultViews() (views []*view.View) { + if m := msp.Alloc; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.TotalAlloc; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.Sys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.Lookups; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.Mallocs; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.Frees; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.HeapAlloc; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.HeapSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.HeapIdle; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.HeapInuse; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.HeapReleased; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.HeapObjects; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.StackInuse; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.StackSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.MSpanInuse; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.MSpanSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.MCacheInuse; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.MCacheSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.BuckHashSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.GCSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.OtherSys; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.NextGC; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.LastGC; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.PauseTotalNs; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.NumGC; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.NumForcedGC; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + if m := msp.GCCPUFraction; m != nil { + views = append(views, measureView(m, view.LastValue())) + } + return +} diff --git a/test/vendor/knative.dev/pkg/metrics/metrics.go b/test/vendor/knative.dev/pkg/metrics/metrics.go new file mode 100644 index 0000000000..cb69c01576 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/metrics.go @@ -0,0 +1,173 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "net/url" + "sync/atomic" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/workqueue" +) + +var ( + // tagName is used to associate the provided name with each metric created + // through the WorkqueueProvider's methods to implement workqueue.MetricsProvider. + // For the kubernetes workqueue implementations this is the queue name provided + // to the workqueue constructor. + tagName = tag.MustNewKey("name") + + // tagVerb is used to associate the verb of the client action with latency metrics. + tagVerb = tag.MustNewKey("verb") + // tagCode is used to associate the status code the client gets back from an API call. + tagCode = tag.MustNewKey("code") + // tagMethod is used to associate the HTTP method the client used for the rest call. + tagMethod = tag.MustNewKey("method") + // tagHost is used to associate the host to which the HTTP request was made. + tagHost = tag.MustNewKey("host") + // tagPath is used to associate the path to which the HTTP request as made. + tagPath = tag.MustNewKey("path") +) + +type counterMetric struct { + mutators []tag.Mutator + measure *stats.Int64Measure +} + +var ( + _ cache.CounterMetric = (*counterMetric)(nil) + _ workqueue.CounterMetric = (*counterMetric)(nil) +) + +// Inc implements CounterMetric +func (m counterMetric) Inc() { + Record(context.Background(), m.measure.M(1), stats.WithTags(m.mutators...)) +} + +type gaugeMetric struct { + mutators []tag.Mutator + measure *stats.Int64Measure + total int64 +} + +var ( + _ workqueue.GaugeMetric = (*gaugeMetric)(nil) +) + +// Inc implements CounterMetric +func (m *gaugeMetric) Inc() { + total := atomic.AddInt64(&m.total, 1) + Record(context.Background(), m.measure.M(total), stats.WithTags(m.mutators...)) +} + +// Dec implements GaugeMetric +func (m *gaugeMetric) Dec() { + total := atomic.AddInt64(&m.total, -1) + Record(context.Background(), m.measure.M(total), stats.WithTags(m.mutators...)) +} + +type floatMetric struct { + mutators []tag.Mutator + measure *stats.Float64Measure +} + +var ( + _ workqueue.SummaryMetric = (*floatMetric)(nil) + _ workqueue.SettableGaugeMetric = (*floatMetric)(nil) + _ workqueue.HistogramMetric = (*floatMetric)(nil) + _ cache.GaugeMetric = (*floatMetric)(nil) +) + +// Observe implements SummaryMetric +func (m floatMetric) Observe(v float64) { + Record(context.Background(), m.measure.M(v), stats.WithTags(m.mutators...)) +} + +// Set implements GaugeMetric +func (m floatMetric) Set(v float64) { + m.Observe(v) +} + +type latencyMetric struct { + measure *stats.Float64Measure +} + +var ( + _ metrics.LatencyMetric = (*latencyMetric)(nil) +) + +// Observe implements LatencyMetric +func (m latencyMetric) Observe(verb string, u url.URL, t time.Duration) { + Record(context.Background(), m.measure.M(t.Seconds()), stats.WithTags( + tag.Insert(tagVerb, verb), + tag.Insert(tagHost, u.Host), + tag.Insert(tagPath, u.Path), + )) +} + +type resultMetric struct { + measure *stats.Int64Measure +} + +var ( + _ metrics.ResultMetric = (*resultMetric)(nil) +) + +// Increment implements ResultMetric +func (m resultMetric) Increment(code, method, host string) { + Record(context.Background(), m.measure.M(1), stats.WithTags( + tag.Insert(tagCode, code), + tag.Insert(tagMethod, method), + tag.Insert(tagHost, host), + )) +} + +// measureView returns a view of the supplied metric. +func measureView(m stats.Measure, agg *view.Aggregation) *view.View { + return &view.View{ + Name: m.Name(), + Description: m.Description(), + Measure: m, + Aggregation: agg, + TagKeys: []tag.Key{tagName}, + } +} + +// noopMetric implements all the cache and workqueue metric interfaces. +// Note: we cannot implement the metrics.FooMetric types due to +// overlapping method names. +type noopMetric struct{} + +var ( + _ cache.CounterMetric = (*noopMetric)(nil) + _ cache.GaugeMetric = (*noopMetric)(nil) + _ workqueue.CounterMetric = (*noopMetric)(nil) + _ workqueue.GaugeMetric = (*noopMetric)(nil) + _ workqueue.HistogramMetric = (*noopMetric)(nil) + _ workqueue.SettableGaugeMetric = (*noopMetric)(nil) +) + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Observe(float64) {} diff --git a/test/vendor/knative.dev/pkg/metrics/metricskey/constants.go b/test/vendor/knative.dev/pkg/metrics/metricskey/constants.go new file mode 100644 index 0000000000..b2508807f8 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/metricskey/constants.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricskey + +const ( + // LabelProject is the label for project (e.g. GCP GAIA ID, AWS project name) + LabelProject = "project_id" + + // LabelLocation is the label for location (e.g. GCE zone, AWS region) where the service is deployed + LabelLocation = "location" + + // LabelClusterName is the label for immutable name of the cluster + LabelClusterName = "cluster_name" + + // LabelNamespaceName is the label for immutable name of the namespace that the service is deployed + LabelNamespaceName = "namespace_name" + + // LabelResponseCode is the label for the HTTP response status code. + LabelResponseCode = "response_code" + + // LabelResponseCodeClass is the label for the HTTP response status code class. For example, "2xx", "3xx", etc. + LabelResponseCodeClass = "response_code_class" + + // ValueUnknown is the default value if the field is unknown, e.g. project will be unknown if Knative + // is not running on GKE. + ValueUnknown = "unknown" +) diff --git a/test/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go b/test/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go new file mode 100644 index 0000000000..7362ad037e --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/metricskey/constants_eventing.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricskey + +import "k8s.io/apimachinery/pkg/util/sets" + +// TODO should be moved to eventing. See https://github.com/knative/pkg/issues/608 + +const ( + // ResourceTypeKnativeTrigger is the Stackdriver resource type for Knative Triggers. + ResourceTypeKnativeTrigger = "knative_trigger" + + // ResourceTypeKnativeBroker is the Stackdriver resource type for Knative Brokers. + ResourceTypeKnativeBroker = "knative_broker" + + // ResourceTypeKnativeSource is the Stackdriver resource type for Knative Sources. + ResourceTypeKnativeSource = "knative_source" + + // LabelName is the label for the name of the resource. + LabelName = "name" + + // LabelResourceGroup is the name of the resource CRD. + LabelResourceGroup = "resource_group" + + // LabelTriggerName is the label for the name of the Trigger. + LabelTriggerName = "trigger_name" + + // LabelBrokerName is the label for the name of the Broker. + LabelBrokerName = "broker_name" + + // LabelEventType is the label for the name of the event type. + LabelEventType = "event_type" + + // LabelEventSource is the label for the name of the event source. + LabelEventSource = "event_source" + + // LabelFilterType is the label for the Trigger filter attribute "type". + LabelFilterType = "filter_type" +) + +var ( + // KnativeTriggerLabels stores the set of resource labels for resource type knative_trigger. + KnativeTriggerLabels = sets.NewString( + LabelProject, + LabelLocation, + LabelClusterName, + LabelNamespaceName, + LabelBrokerName, + LabelTriggerName, + ) + + // KnativeTriggerMetrics stores a set of metric types which are supported + // by resource type knative_trigger. + KnativeTriggerMetrics = sets.NewString( + "knative.dev/internal/eventing/trigger/event_count", + "knative.dev/internal/eventing/trigger/event_processing_latencies", + "knative.dev/internal/eventing/trigger/event_dispatch_latencies", + ) + + // KnativeBrokerLabels stores the set of resource labels for resource type knative_broker. + KnativeBrokerLabels = sets.NewString( + LabelProject, + LabelLocation, + LabelClusterName, + LabelNamespaceName, + LabelBrokerName, + ) + + // KnativeBrokerMetrics stores a set of metric types which are supported + // by resource type knative_trigger. + KnativeBrokerMetrics = sets.NewString( + "knative.dev/internal/eventing/broker/event_count", + ) + + // KnativeSourceLabels stores the set of resource labels for resource type knative_source. + KnativeSourceLabels = sets.NewString( + LabelProject, + LabelLocation, + LabelClusterName, + LabelNamespaceName, + LabelName, + LabelResourceGroup, + ) + + // KnativeSourceMetrics stores a set of metric types which are supported + // by resource type knative_source. + KnativeSourceMetrics = sets.NewString( + "knative.dev/eventing/source/event_count", + ) +) diff --git a/test/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go b/test/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go new file mode 100644 index 0000000000..239cebadfe --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/metricskey/constants_serving.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricskey + +import "k8s.io/apimachinery/pkg/util/sets" + +// TODO should be moved to serving. See https://github.com/knative/pkg/issues/608 + +const ( + // ResourceTypeKnativeRevision is the Stackdriver resource type for Knative revision + ResourceTypeKnativeRevision = "knative_revision" + + // LabelServiceName is the label for the deployed service name + LabelServiceName = "service_name" + + // LabelRouteName is the label for immutable name of the route that receives the request + LabelRouteName = "route_name" + + // LabelConfigurationName is the label for the configuration which created the monitored revision + LabelConfigurationName = "configuration_name" + + // LabelRevisionName is the label for the monitored revision + LabelRevisionName = "revision_name" +) + +var ( + // KnativeRevisionLabels stores the set of resource labels for resource type knative_revision. + // LabelRouteName is added as extra label since it is optional, not in this map. + KnativeRevisionLabels = sets.NewString( + LabelProject, + LabelLocation, + LabelClusterName, + LabelNamespaceName, + LabelServiceName, + LabelConfigurationName, + LabelRevisionName, + ) + + // KnativeRevisionMetrics stores a set of metric types which are supported + // by resource type knative_revision. + KnativeRevisionMetrics = sets.NewString( + "knative.dev/internal/serving/activator/request_count", + "knative.dev/internal/serving/activator/request_latencies", + "knative.dev/serving/autoscaler/desired_pods", + "knative.dev/serving/autoscaler/requested_pods", + "knative.dev/serving/autoscaler/actual_pods", + "knative.dev/serving/autoscaler/stable_request_concurrency", + "knative.dev/serving/autoscaler/panic_request_concurrency", + "knative.dev/serving/autoscaler/target_concurrency_per_pod", + "knative.dev/serving/autoscaler/panic_mode", + "knative.dev/internal/serving/revision/request_count", + "knative.dev/internal/serving/revision/request_latencies", + "knative.dev/internal/serving/controller/cert_expiration_durations", + "knative.dev/internal/serving/controller/cert_total_num", + "knative.dev/internal/serving/controller/cert_issuance_latencies", + "knative.dev/internal/serving/controller/cert_creation_count", + ) +) diff --git a/test/vendor/knative.dev/pkg/metrics/monitored_resources.go b/test/vendor/knative.dev/pkg/metrics/monitored_resources.go new file mode 100644 index 0000000000..d8034efc92 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/monitored_resources.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "go.opencensus.io/tag" + "knative.dev/pkg/metrics/metricskey" +) + +type Global struct{} + +func (g *Global) MonitoredResource() (resType string, labels map[string]string) { + return "global", nil +} + +func getTagsMap(tags []tag.Tag) map[string]string { + tagsMap := map[string]string{} + for _, t := range tags { + tagsMap[t.Key.Name()] = t.Value + } + return tagsMap +} + +func valueOrUnknown(key string, tagsMap map[string]string) string { + if value, ok := tagsMap[key]; ok { + return value + } + return metricskey.ValueUnknown +} diff --git a/test/vendor/knative.dev/pkg/metrics/monitored_resources_eventing.go b/test/vendor/knative.dev/pkg/metrics/monitored_resources_eventing.go new file mode 100644 index 0000000000..32742e7732 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/monitored_resources_eventing.go @@ -0,0 +1,159 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +// TODO should be moved to eventing. See https://github.com/knative/pkg/issues/608 + +import ( + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/metric/metricdata" + "knative.dev/pkg/metrics/metricskey" +) + +type KnativeTrigger struct { + Project string + Location string + ClusterName string + NamespaceName string + TriggerName string + BrokerName string + TypeFilterAttribute string + SourceFilterAttribute string +} + +type KnativeBroker struct { + Project string + Location string + ClusterName string + NamespaceName string + BrokerName string +} + +type KnativeSource struct { + Project string + Location string + ClusterName string + NamespaceName string + SourceName string + SourceResourceGroup string +} + +func (kt *KnativeTrigger) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + metricskey.LabelProject: kt.Project, + metricskey.LabelLocation: kt.Location, + metricskey.LabelClusterName: kt.ClusterName, + metricskey.LabelNamespaceName: kt.NamespaceName, + metricskey.LabelBrokerName: kt.BrokerName, + metricskey.LabelTriggerName: kt.TriggerName, + } + return metricskey.ResourceTypeKnativeTrigger, labels +} + +func (kb *KnativeBroker) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + metricskey.LabelProject: kb.Project, + metricskey.LabelLocation: kb.Location, + metricskey.LabelClusterName: kb.ClusterName, + metricskey.LabelNamespaceName: kb.NamespaceName, + metricskey.LabelBrokerName: kb.BrokerName, + } + return metricskey.ResourceTypeKnativeBroker, labels +} + +func (ki *KnativeSource) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + metricskey.LabelProject: ki.Project, + metricskey.LabelLocation: ki.Location, + metricskey.LabelClusterName: ki.ClusterName, + metricskey.LabelNamespaceName: ki.NamespaceName, + metricskey.LabelName: ki.SourceName, + metricskey.LabelResourceGroup: ki.SourceResourceGroup, + } + return metricskey.ResourceTypeKnativeSource, labels +} + +func GetKnativeBrokerMonitoredResource( + des *metricdata.Descriptor, tags map[string]string, gm *gcpMetadata) (map[string]string, monitoredresource.Interface) { + kb := &KnativeBroker{ + // The first three resource labels are from metadata. + Project: gm.project, + Location: gm.location, + ClusterName: gm.cluster, + // The rest resource labels are from metrics labels. + NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tags), + BrokerName: valueOrUnknown(metricskey.LabelBrokerName, tags), + } + + metricLabels := map[string]string{} + for k, v := range tags { + // Keep the metrics labels that are not resource labels + if !metricskey.KnativeBrokerLabels.Has(k) { + metricLabels[k] = v + } + } + + return metricLabels, kb +} + +func GetKnativeTriggerMonitoredResource( + des *metricdata.Descriptor, tags map[string]string, gm *gcpMetadata) (map[string]string, monitoredresource.Interface) { + kt := &KnativeTrigger{ + // The first three resource labels are from metadata. + Project: gm.project, + Location: gm.location, + ClusterName: gm.cluster, + // The rest resource labels are from metrics labels. + NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tags), + BrokerName: valueOrUnknown(metricskey.LabelBrokerName, tags), + TriggerName: valueOrUnknown(metricskey.LabelTriggerName, tags), + } + + metricLabels := map[string]string{} + for k, v := range tags { + // Keep the metrics labels that are not resource labels + if !metricskey.KnativeTriggerLabels.Has(k) { + metricLabels[k] = v + } + } + + return metricLabels, kt +} + +func GetKnativeSourceMonitoredResource( + des *metricdata.Descriptor, tags map[string]string, gm *gcpMetadata) (map[string]string, monitoredresource.Interface) { + ks := &KnativeSource{ + // The first three resource labels are from metadata. + Project: gm.project, + Location: gm.location, + ClusterName: gm.cluster, + // The rest resource labels are from metrics labels. + NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tags), + SourceName: valueOrUnknown(metricskey.LabelName, tags), + SourceResourceGroup: valueOrUnknown(metricskey.LabelResourceGroup, tags), + } + + metricLabels := map[string]string{} + for k, v := range tags { + // Keep the metrics labels that are not resource labels + if !metricskey.KnativeSourceLabels.Has(k) { + metricLabels[k] = v + } + } + + return metricLabels, ks +} diff --git a/test/vendor/knative.dev/pkg/metrics/monitored_resources_serving.go b/test/vendor/knative.dev/pkg/metrics/monitored_resources_serving.go new file mode 100644 index 0000000000..eae42408b6 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/monitored_resources_serving.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/metric/metricdata" + "knative.dev/pkg/metrics/metricskey" +) + +// TODO should be moved to serving. See https://github.com/knative/pkg/issues/608 + +type KnativeRevision struct { + Project string + Location string + ClusterName string + NamespaceName string + ServiceName string + ConfigurationName string + RevisionName string +} + +func (kr *KnativeRevision) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + metricskey.LabelProject: kr.Project, + metricskey.LabelLocation: kr.Location, + metricskey.LabelClusterName: kr.ClusterName, + metricskey.LabelNamespaceName: kr.NamespaceName, + metricskey.LabelServiceName: kr.ServiceName, + metricskey.LabelConfigurationName: kr.ConfigurationName, + metricskey.LabelRevisionName: kr.RevisionName, + } + return metricskey.ResourceTypeKnativeRevision, labels +} + +func GetKnativeRevisionMonitoredResource( + des *metricdata.Descriptor, tags map[string]string, gm *gcpMetadata) (map[string]string, monitoredresource.Interface) { + kr := &KnativeRevision{ + // The first three resource labels are from metadata. + Project: gm.project, + Location: gm.location, + ClusterName: gm.cluster, + // The rest resource labels are from metrics labels. + NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tags), + ServiceName: valueOrUnknown(metricskey.LabelServiceName, tags), + ConfigurationName: valueOrUnknown(metricskey.LabelConfigurationName, tags), + RevisionName: valueOrUnknown(metricskey.LabelRevisionName, tags), + } + + metricLabels := map[string]string{} + for k, v := range tags { + // Keep the metrics labels that are not resource labels + if !metricskey.KnativeRevisionLabels.Has(k) { + metricLabels[k] = v + } + } + + return metricLabels, kr +} diff --git a/test/vendor/knative.dev/pkg/metrics/opencensus_exporter.go b/test/vendor/knative.dev/pkg/metrics/opencensus_exporter.go new file mode 100644 index 0000000000..2a947f9da4 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/opencensus_exporter.go @@ -0,0 +1,38 @@ +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/stats/view" + "go.uber.org/zap" +) + +func newOpenCensusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + opts := []ocagent.ExporterOption{ocagent.WithServiceName(config.component)} + if config.collectorAddress != "" { + opts = append(opts, ocagent.WithAddress(config.collectorAddress)) + } + if !config.requireSecure { + opts = append(opts, ocagent.WithInsecure()) + } + e, err := ocagent.NewExporter(opts...) + if err != nil { + logger.Errorw("Failed to create the OpenCensus exporter.", zap.Error(err)) + return nil, err + } + logger.Infof("Created OpenCensus exporter with config: %+v.", *config) + view.RegisterExporter(e) + return e, nil +} diff --git a/test/vendor/knative.dev/pkg/metrics/prometheus_exporter.go b/test/vendor/knative.dev/pkg/metrics/prometheus_exporter.go new file mode 100644 index 0000000000..b83b23a0d2 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/prometheus_exporter.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net/http" + "sync" + + "contrib.go.opencensus.io/exporter/prometheus" + "go.opencensus.io/stats/view" + "go.uber.org/zap" +) + +var ( + curPromSrv *http.Server + curPromSrvMux sync.Mutex +) + +func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component}) + if err != nil { + logger.Errorw("Failed to create the Prometheus exporter.", zap.Error(err)) + return nil, err + } + logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config) + // Start the server for Prometheus scraping + go func() { + srv := startNewPromSrv(e, config.prometheusPort) + srv.ListenAndServe() + }() + return e, nil +} + +func getCurPromSrv() *http.Server { + curPromSrvMux.Lock() + defer curPromSrvMux.Unlock() + return curPromSrv +} + +func resetCurPromSrv() { + curPromSrvMux.Lock() + defer curPromSrvMux.Unlock() + if curPromSrv != nil { + curPromSrv.Close() + curPromSrv = nil + } +} + +func startNewPromSrv(e *prometheus.Exporter, port int) *http.Server { + sm := http.NewServeMux() + sm.Handle("/metrics", e) + curPromSrvMux.Lock() + defer curPromSrvMux.Unlock() + if curPromSrv != nil { + curPromSrv.Close() + } + curPromSrv = &http.Server{ + Addr: fmt.Sprintf(":%v", port), + Handler: sm, + } + return curPromSrv +} diff --git a/test/vendor/knative.dev/pkg/metrics/record.go b/test/vendor/knative.dev/pkg/metrics/record.go new file mode 100644 index 0000000000..397f9be77c --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/record.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "go.opencensus.io/stats" +) + +// TODO should be properly refactored and pieces should move to eventing and serving, as appropriate. +// See https://github.com/knative/pkg/issues/608 + +// Record stores the given Measurement from `ms` in the current metrics backend. +func Record(ctx context.Context, ms stats.Measurement, ros ...stats.Options) { + mc := getCurMetricsConfig() + + mc.Record(ctx, ms, ros...) +} + +// Buckets125 generates an array of buckets with approximate powers-of-two +// buckets that also aligns with powers of 10 on every 3rd step. This can +// be used to create a view.Distribution. +func Buckets125(low, high float64) []float64 { + buckets := []float64{low} + for last := low; last < high; last = last * 10 { + buckets = append(buckets, 2*last, 5*last, 10*last) + } + return buckets +} + +// BucketsNBy10 generates an array of N buckets starting from low and +// multiplying by 10 n times. +func BucketsNBy10(low float64, n int) []float64 { + buckets := []float64{low} + for last, i := low, len(buckets); i < n; last, i = 10*last, i+1 { + buckets = append(buckets, 10*last) + } + return buckets +} diff --git a/test/vendor/knative.dev/pkg/metrics/reflector.go b/test/vendor/knative.dev/pkg/metrics/reflector.go new file mode 100644 index 0000000000..548b811b60 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/reflector.go @@ -0,0 +1,176 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "k8s.io/client-go/tools/cache" +) + +// ReflectorProvider implements reflector.MetricsProvider and may be used with +// reflector.SetProvider to have metrics exported to the provided metrics. +type ReflectorProvider struct { + ItemsInList *stats.Float64Measure + // TODO(mattmoor): This is not in the latest version, so it will + // be removed in a future version. + ItemsInMatch *stats.Float64Measure + ItemsInWatch *stats.Float64Measure + LastResourceVersion *stats.Float64Measure + ListDuration *stats.Float64Measure + Lists *stats.Int64Measure + ShortWatches *stats.Int64Measure + WatchDuration *stats.Float64Measure + Watches *stats.Int64Measure +} + +var ( + _ cache.MetricsProvider = (*ReflectorProvider)(nil) +) + +// NewItemsInListMetric implements MetricsProvider +func (rp *ReflectorProvider) NewItemsInListMetric(name string) cache.SummaryMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.ItemsInList, + } +} + +// ItemsInListView returns a view of the ItemsInList metric. +func (rp *ReflectorProvider) ItemsInListView() *view.View { + return measureView(rp.ItemsInList, view.Distribution(BucketsNBy10(0.1, 6)...)) +} + +// NewItemsInMatchMetric implements MetricsProvider +func (rp *ReflectorProvider) NewItemsInMatchMetric(name string) cache.SummaryMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.ItemsInMatch, + } +} + +// ItemsInMatchView returns a view of the ItemsInMatch metric. +func (rp *ReflectorProvider) ItemsInMatchView() *view.View { + return measureView(rp.ItemsInMatch, view.Distribution(BucketsNBy10(0.1, 6)...)) +} + +// NewItemsInWatchMetric implements MetricsProvider +func (rp *ReflectorProvider) NewItemsInWatchMetric(name string) cache.SummaryMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.ItemsInWatch, + } +} + +// ItemsInWatchView returns a view of the ItemsInWatch metric. +func (rp *ReflectorProvider) ItemsInWatchView() *view.View { + return measureView(rp.ItemsInWatch, view.Distribution(BucketsNBy10(0.1, 6)...)) +} + +// NewLastResourceVersionMetric implements MetricsProvider +func (rp *ReflectorProvider) NewLastResourceVersionMetric(name string) cache.GaugeMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.LastResourceVersion, + } +} + +// LastResourceVersionView returns a view of the LastResourceVersion metric. +func (rp *ReflectorProvider) LastResourceVersionView() *view.View { + return measureView(rp.LastResourceVersion, view.LastValue()) +} + +// NewListDurationMetric implements MetricsProvider +func (rp *ReflectorProvider) NewListDurationMetric(name string) cache.SummaryMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.ListDuration, + } +} + +// ListDurationView returns a view of the ListDuration metric. +func (rp *ReflectorProvider) ListDurationView() *view.View { + return measureView(rp.ListDuration, view.Distribution(BucketsNBy10(0.1, 6)...)) +} + +// NewListsMetric implements MetricsProvider +func (rp *ReflectorProvider) NewListsMetric(name string) cache.CounterMetric { + return counterMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.Lists, + } +} + +// ListsView returns a view of the Lists metric. +func (rp *ReflectorProvider) ListsView() *view.View { + return measureView(rp.Lists, view.Count()) +} + +// NewShortWatchesMetric implements MetricsProvider +func (rp *ReflectorProvider) NewShortWatchesMetric(name string) cache.CounterMetric { + return counterMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.ShortWatches, + } +} + +// ShortWatchesView returns a view of the ShortWatches metric. +func (rp *ReflectorProvider) ShortWatchesView() *view.View { + return measureView(rp.ShortWatches, view.Count()) +} + +// NewWatchDurationMetric implements MetricsProvider +func (rp *ReflectorProvider) NewWatchDurationMetric(name string) cache.SummaryMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.WatchDuration, + } +} + +// WatchDurationView returns a view of the WatchDuration metric. +func (rp *ReflectorProvider) WatchDurationView() *view.View { + return measureView(rp.WatchDuration, view.Distribution(BucketsNBy10(0.1, 6)...)) +} + +// NewWatchesMetric implements MetricsProvider +func (rp *ReflectorProvider) NewWatchesMetric(name string) cache.CounterMetric { + return counterMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: rp.Watches, + } +} + +// WatchesView returns a view of the Watches metric. +func (rp *ReflectorProvider) WatchesView() *view.View { + return measureView(rp.Watches, view.Count()) +} + +// DefaultViews returns a list of views suitable for passing to view.Register +func (rp *ReflectorProvider) DefaultViews() []*view.View { + return []*view.View{ + rp.ItemsInListView(), + rp.ItemsInMatchView(), + rp.ItemsInWatchView(), + rp.LastResourceVersionView(), + rp.ListDurationView(), + rp.ListsView(), + rp.ShortWatchesView(), + rp.WatchDurationView(), + rp.WatchesView(), + } +} diff --git a/test/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go b/test/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go new file mode 100644 index 0000000000..90122f6fd4 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go @@ -0,0 +1,263 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "path" + "sync" + + "contrib.go.opencensus.io/exporter/stackdriver" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + "google.golang.org/api/option" + "knative.dev/pkg/metrics/metricskey" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + customMetricTypePrefix = "custom.googleapis.com" + // defaultCustomMetricSubDomain is the default subdomain to use for unsupported metrics by monitored resource types. + // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor + defaultCustomMetricSubDomain = "knative.dev" + // StackdriverSecretNamespaceDefault is the default namespace to search for a k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. + StackdriverSecretNamespaceDefault = "default" + // StackdriverSecretNameDefault is the default name of the k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. + StackdriverSecretNameDefault = "stackdriver-service-account-key" + // secretDataFieldKey is the name of the k8s Secret field that contains the Secret's key. + secretDataFieldKey = "key.json" +) + +var ( + // gcpMetadataFunc is the function used to fetch GCP metadata. + // In product usage, this is always set to function retrieveGCPMetadata. + // In unit tests this is set to a fake one to avoid calling GCP metadata + // service. + gcpMetadataFunc func() *gcpMetadata + + // newStackdriverExporterFunc is the function used to create new stackdriver + // exporter. + // In product usage, this is always set to function newOpencensusSDExporter. + // In unit tests this is set to a fake one to avoid calling actual Google API + // service. + newStackdriverExporterFunc func(stackdriver.Options) (view.Exporter, error) + + // kubeclient is the in-cluster Kubernetes kubeclient, which is lazy-initialized on first use. + kubeclient *kubernetes.Clientset + // initClientOnce is the lazy initializer for kubeclient. + initClientOnce sync.Once + // kubeclientInitErr capture an error during initClientOnce + kubeclientInitErr error + + // stackdriverMtx protects setting secretNamespace and secretName and useStackdriverSecretEnabled + stackdriverMtx sync.RWMutex + // secretName is the name of the k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. + secretName = StackdriverSecretNameDefault + // secretNamespace is the namespace to search for a k8s Secret to pass to Stackdriver client to authenticate with Stackdriver. + secretNamespace = StackdriverSecretNamespaceDefault + // useStackdriverSecretEnabled specifies whether or not the exporter can be configured with a Secret. + // Consuming packages must do explicitly enable this by calling SetStackdriverSecretLocation. + useStackdriverSecretEnabled = false +) + +// SetStackdriverSecretLocation sets the name and namespace of the Secret that can be used to authenticate with Stackdriver. +// The Secret is only used if both: +// 1. This function has been explicitly called to set the name and namespace +// 2. Users set metricsConfig.stackdriverClientConfig.UseSecret to "true" +func SetStackdriverSecretLocation(name string, namespace string) { + stackdriverMtx.Lock() + defer stackdriverMtx.Unlock() + secretName = name + secretNamespace = namespace + useStackdriverSecretEnabled = true +} + +func init() { + // Set gcpMetadataFunc to call GCP metadata service. + gcpMetadataFunc = retrieveGCPMetadata + newStackdriverExporterFunc = newOpencensusSDExporter + + kubeclientInitErr = nil +} + +func newOpencensusSDExporter(o stackdriver.Options) (view.Exporter, error) { + e, err := stackdriver.NewExporter(o) + if err == nil { + // Start the exporter. + // TODO(https://github.com/knative/pkg/issues/866): Move this to an interface. + e.StartMetricsExporter() + } + return e, nil +} + +// TODO should be properly refactored to be able to inject the getResourceByDescriptorFunc function. +// See https://github.com/knative/pkg/issues/608 +func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + gm := getMergedGCPMetadata(config) + mpf := getMetricPrefixFunc(config.stackdriverMetricTypePrefix, config.stackdriverCustomMetricTypePrefix) + co, err := getStackdriverExporterClientOptions(&config.stackdriverClientConfig) + if err != nil { + logger.Warnw("Issue configuring Stackdriver exporter client options, no additional client options will be used: ", zap.Error(err)) + } + // Automatically fall back on Google application default credentials + e, err := newStackdriverExporterFunc(stackdriver.Options{ + ProjectID: gm.project, + Location: gm.location, + MonitoringClientOptions: co, + TraceClientOptions: co, + GetMetricPrefix: mpf, + ResourceByDescriptor: getResourceByDescriptorFunc(config.stackdriverMetricTypePrefix, gm), + ReportingInterval: config.reportingPeriod, + DefaultMonitoringLabels: &stackdriver.Labels{}, + }) + if err != nil { + logger.Errorw("Failed to create the Stackdriver exporter: ", zap.Error(err)) + return nil, err + } + logger.Infof("Created Opencensus Stackdriver exporter with config %v", config) + return e, nil +} + +// getStackdriverExporterClientOptions creates client options for the opencensus Stackdriver exporter from the given stackdriverClientConfig. +// On error, an empty array of client options is returned. +func getStackdriverExporterClientOptions(sdconfig *StackdriverClientConfig) ([]option.ClientOption, error) { + var co []option.ClientOption + if sdconfig.UseSecret && useStackdriverSecretEnabled { + secret, err := getStackdriverSecret(sdconfig) + if err != nil { + return co, err + } + + if opt, err := convertSecretToExporterOption(secret); err == nil { + co = append(co, opt) + } else { + return co, err + } + } + + return co, nil +} + +// getMergedGCPMetadata returns GCP metadata required to export metrics +// to Stackdriver. Values can come from the GCE metadata server or the config. +// Values explicitly set in the config take the highest precedent. +func getMergedGCPMetadata(config *metricsConfig) *gcpMetadata { + gm := gcpMetadataFunc() + if config.stackdriverClientConfig.ProjectID != "" { + gm.project = config.stackdriverClientConfig.ProjectID + } + + if config.stackdriverClientConfig.GCPLocation != "" { + gm.location = config.stackdriverClientConfig.GCPLocation + } + + if config.stackdriverClientConfig.ClusterName != "" { + gm.cluster = config.stackdriverClientConfig.ClusterName + } + + return gm +} + +func getResourceByDescriptorFunc(metricTypePrefix string, gm *gcpMetadata) func(*metricdata.Descriptor, map[string]string) (map[string]string, monitoredresource.Interface) { + return func(des *metricdata.Descriptor, tags map[string]string) (map[string]string, monitoredresource.Interface) { + metricType := path.Join(metricTypePrefix, des.Name) + if metricskey.KnativeRevisionMetrics.Has(metricType) { + return GetKnativeRevisionMonitoredResource(des, tags, gm) + } else if metricskey.KnativeBrokerMetrics.Has(metricType) { + return GetKnativeBrokerMonitoredResource(des, tags, gm) + } else if metricskey.KnativeTriggerMetrics.Has(metricType) { + return GetKnativeTriggerMonitoredResource(des, tags, gm) + } else if metricskey.KnativeSourceMetrics.Has(metricType) { + return GetKnativeSourceMonitoredResource(des, tags, gm) + } + // Unsupported metric by knative_revision, knative_broker, knative_trigger, and knative_source, use "global" resource type. + return getGlobalMonitoredResource(des, tags) + } +} + +func getGlobalMonitoredResource(des *metricdata.Descriptor, tags map[string]string) (map[string]string, monitoredresource.Interface) { + return tags, &Global{} +} + +func getMetricPrefixFunc(metricTypePrefix, customMetricTypePrefix string) func(name string) string { + return func(name string) string { + metricType := path.Join(metricTypePrefix, name) + inServing := metricskey.KnativeRevisionMetrics.Has(metricType) + inEventing := metricskey.KnativeBrokerMetrics.Has(metricType) || + metricskey.KnativeTriggerMetrics.Has(metricType) || + metricskey.KnativeSourceMetrics.Has(metricType) + if inServing || inEventing { + return metricTypePrefix + } + // Unsupported metric by knative_revision, use custom domain. + return customMetricTypePrefix + } +} + +// getStackdriverSecret returns the Kubernetes Secret specified in the given config. +// TODO(anniefu): Update exporter if Secret changes (https://github.com/knative/pkg/issues/842) +func getStackdriverSecret(sdconfig *StackdriverClientConfig) (*corev1.Secret, error) { + if err := ensureKubeclient(); err != nil { + return nil, err + } + + stackdriverMtx.RLock() + defer stackdriverMtx.RUnlock() + + sec, secErr := kubeclient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + + if secErr != nil { + return nil, fmt.Errorf("Error getting Secret [%v] in namespace [%v]: %v", secretName, secretNamespace, secErr) + } + + return sec, nil +} + +// convertSecretToExporterOption converts a Kubernetes Secret to an OpenCensus Stackdriver Exporter Option. +func convertSecretToExporterOption(secret *corev1.Secret) (option.ClientOption, error) { + if data, ok := secret.Data[secretDataFieldKey]; ok { + return option.WithCredentialsJSON(data), nil + } + return nil, fmt.Errorf("Expected Secret to store key in data field named [%v]", secretDataFieldKey) +} + +// ensureKubeclient is the lazy initializer for kubeclient. +func ensureKubeclient() error { + // initClientOnce is only run once and cannot return error, so kubeclientInitErr is used to capture errors. + initClientOnce.Do(func() { + config, err := rest.InClusterConfig() + if err != nil { + kubeclientInitErr = err + return + } + + cs, err := kubernetes.NewForConfig(config) + if err != nil { + kubeclientInitErr = err + return + } + kubeclient = cs + }) + + return kubeclientInitErr +} diff --git a/test/vendor/knative.dev/pkg/metrics/utils.go b/test/vendor/knative.dev/pkg/metrics/utils.go new file mode 100644 index 0000000000..7345cec920 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/utils.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "strconv" + +// ResponseCodeClass converts an HTTP response code to a string representing its response code class. +// E.g., The response code class is "5xx" for response code 503. +func ResponseCodeClass(responseCode int) string { + // Get the hundred digit of the response code and concatenate "xx". + return strconv.Itoa(responseCode/100) + "xx" +} diff --git a/test/vendor/knative.dev/pkg/metrics/workqueue.go b/test/vendor/knative.dev/pkg/metrics/workqueue.go new file mode 100644 index 0000000000..80a29cdc44 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/workqueue.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "k8s.io/client-go/util/workqueue" +) + +// WorkqueueProvider implements workqueue.MetricsProvider and may be used with +// workqueue.SetProvider to have metrics exported to the provided metrics. +type WorkqueueProvider struct { + Adds *stats.Int64Measure + Depth *stats.Int64Measure + Latency *stats.Float64Measure + UnfinishedWorkSeconds *stats.Float64Measure + LongestRunningProcessorSeconds *stats.Float64Measure + Retries *stats.Int64Measure + WorkDuration *stats.Float64Measure +} + +var ( + _ workqueue.MetricsProvider = (*WorkqueueProvider)(nil) +) + +// NewAddsMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return counterMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.Adds, + } +} + +// AddsView returns a view of the Adds metric. +func (wp *WorkqueueProvider) AddsView() *view.View { + return measureView(wp.Adds, view.Count()) +} + +// NewDepthMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return &gaugeMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.Depth, + } +} + +// DepthView returns a view of the Depth metric. +func (wp *WorkqueueProvider) DepthView() *view.View { + return measureView(wp.Depth, view.LastValue()) +} + +// NewLatencyMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.Latency, + } +} + +// LatencyView returns a view of the Latency metric. +func (wp *WorkqueueProvider) LatencyView() *view.View { + return measureView(wp.Latency, view.Distribution(BucketsNBy10(1e-08, 10)...)) +} + +// NewLongestRunningProcessorSecondsMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.LongestRunningProcessorSeconds, + } +} + +// LongestRunningProcessorSecondsView returns a view of the LongestRunningProcessorSeconds metric. +func (wp *WorkqueueProvider) LongestRunningProcessorSecondsView() *view.View { + return measureView(wp.LongestRunningProcessorSeconds, view.Distribution(BucketsNBy10(1e-08, 10)...)) +} + +// NewRetriesMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return counterMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.Retries, + } +} + +// RetriesView returns a view of the Retries metric. +func (wp *WorkqueueProvider) RetriesView() *view.View { + return measureView(wp.Retries, view.Count()) +} + +// NewUnfinishedWorkSecondsMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.UnfinishedWorkSeconds, + } +} + +// UnfinishedWorkSecondsView returns a view of the UnfinishedWorkSeconds metric. +func (wp *WorkqueueProvider) UnfinishedWorkSecondsView() *view.View { + return measureView(wp.UnfinishedWorkSeconds, view.Distribution(BucketsNBy10(1e-08, 10)...)) +} + +// NewWorkDurationMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return floatMetric{ + mutators: []tag.Mutator{tag.Insert(tagName, name)}, + measure: wp.WorkDuration, + } +} + +// WorkDurationView returns a view of the WorkDuration metric. +func (wp *WorkqueueProvider) WorkDurationView() *view.View { + return measureView(wp.WorkDuration, view.Distribution(BucketsNBy10(1e-08, 10)...)) +} + +// DefaultViews returns a list of views suitable for passing to view.Register +func (wp *WorkqueueProvider) DefaultViews() []*view.View { + return []*view.View{ + wp.AddsView(), + wp.DepthView(), + wp.LatencyView(), + wp.RetriesView(), + wp.WorkDurationView(), + wp.UnfinishedWorkSecondsView(), + wp.LongestRunningProcessorSecondsView(), + } +} + +// NewDeprecatedAddsMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedAddsMetric(name string) workqueue.CounterMetric { + return noopMetric{} +} + +// NewDeprecatedDepthMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedDepthMetric(name string) workqueue.GaugeMetric { + return noopMetric{} +} + +// NewDeprecatedLatencyMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedLatencyMetric(name string) workqueue.SummaryMetric { + return noopMetric{} +} + +// NewDeprecatedLongestRunningProcessorMicrosecondsMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric { + return noopMetric{} +} + +// NewDeprecatedRetriesMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedRetriesMetric(name string) workqueue.CounterMetric { + return noopMetric{} +} + +// NewDeprecatedUnfinishedWorkSecondsMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return noopMetric{} +} + +// NewDeprecatedWorkDurationMetric implements MetricsProvider +func (wp *WorkqueueProvider) NewDeprecatedWorkDurationMetric(name string) workqueue.SummaryMetric { + return noopMetric{} +} diff --git a/test/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go new file mode 100644 index 0000000000..822bb4db22 --- /dev/null +++ b/test/vendor/knative.dev/pkg/metrics/zz_generated.deepcopy.go @@ -0,0 +1,37 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package metrics + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfig) DeepCopyInto(out *ObservabilityConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfig. +func (in *ObservabilityConfig) DeepCopy() *ObservabilityConfig { + if in == nil { + return nil + } + out := new(ObservabilityConfig) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/pkg/network/doc.go b/test/vendor/knative.dev/pkg/network/doc.go new file mode 100644 index 0000000000..133ce55ea8 --- /dev/null +++ b/test/vendor/knative.dev/pkg/network/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package network holds the typed objects that define the schemas for +// configuring the knative networking layer. +package network diff --git a/test/vendor/knative.dev/pkg/network/domain.go b/test/vendor/knative.dev/pkg/network/domain.go new file mode 100644 index 0000000000..bc2ef6a994 --- /dev/null +++ b/test/vendor/knative.dev/pkg/network/domain.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + "sync" +) + +const ( + resolverFileName = "/etc/resolv.conf" + defaultDomainName = "cluster.local" +) + +var ( + domainName string + once sync.Once +) + +// GetServiceHostname returns the fully qualified service hostname +func GetServiceHostname(name string, namespace string) string { + return fmt.Sprintf("%s.%s.svc.%s", name, namespace, GetClusterDomainName()) +} + +// GetClusterDomainName returns cluster's domain name or an error +// Closes issue: https://github.com/knative/eventing/issues/714 +func GetClusterDomainName() string { + once.Do(func() { + f, err := os.Open(resolverFileName) + if err == nil { + defer f.Close() + domainName = getClusterDomainName(f) + + } else { + domainName = defaultDomainName + } + }) + + return domainName +} + +func getClusterDomainName(r io.Reader) string { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + elements := strings.Split(scanner.Text(), " ") + if elements[0] != "search" { + continue + } + for i := 1; i < len(elements)-1; i++ { + if strings.HasPrefix(elements[i], "svc.") { + return strings.TrimSuffix(elements[i][4:], ".") + } + } + } + // For all abnormal cases return default domain name + return defaultDomainName +} diff --git a/test/vendor/knative.dev/pkg/network/error_handler.go b/test/vendor/knative.dev/pkg/network/error_handler.go new file mode 100644 index 0000000000..486518b966 --- /dev/null +++ b/test/vendor/knative.dev/pkg/network/error_handler.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "io/ioutil" + "net/http" + + "go.uber.org/zap" +) + +// ErrorHandler sets up a handler suitable for use with the ErrorHandler field on +// httputil's reverse proxy. +func ErrorHandler(logger *zap.SugaredLogger) func(http.ResponseWriter, *http.Request, error) { + return func(w http.ResponseWriter, req *http.Request, err error) { + ss := readSockStat(logger) + logger.Errorw("error reverse proxying request; sockstat: "+ss, zap.Error(err)) + http.Error(w, err.Error(), http.StatusBadGateway) + } +} + +func readSockStat(logger *zap.SugaredLogger) string { + b, err := ioutil.ReadFile("/proc/net/sockstat") + if err != nil { + logger.Errorw("Unable to read sockstat", zap.Error(err)) + return "" + } + return string(b) +} diff --git a/test/vendor/knative.dev/pkg/network/h2c.go b/test/vendor/knative.dev/pkg/network/h2c.go new file mode 100644 index 0000000000..683bbebe09 --- /dev/null +++ b/test/vendor/knative.dev/pkg/network/h2c.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" +) + +// NewServer returns a new HTTP Server with HTTP2 handler. +func NewServer(addr string, h http.Handler) *http.Server { + h1s := &http.Server{ + Addr: addr, + Handler: h2c.NewHandler(h, &http2.Server{}), + } + + return h1s +} + +// NewH2CTransport constructs a new H2C transport. +// That transport will reroute all HTTPS traffic to HTTP. This is +// to explicitly allow h2c (http2 without TLS) transport. +// See https://github.com/golang/go/issues/14141 for more details. +func NewH2CTransport() http.RoundTripper { + return &http2.Transport{ + AllowHTTP: true, + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + d := &net.Dialer{ + Timeout: DefaultConnTimeout, + KeepAlive: 5 * time.Second, + DualStack: true, + } + return d.Dial(netw, addr) + }, + } +} diff --git a/test/vendor/knative.dev/pkg/network/network.go b/test/vendor/knative.dev/pkg/network/network.go new file mode 100644 index 0000000000..32015ada3d --- /dev/null +++ b/test/vendor/knative.dev/pkg/network/network.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "time" +) + +const ( + // DefaultConnTimeout specifies a short default connection timeout + // to avoid hitting the issue fixed in + // https://github.com/kubernetes/kubernetes/pull/72534 but only + // avalailable after Kubernetes 1.14. + // + // Our connections are usually between pods in the same cluster + // like activator <-> queue-proxy, or even between containers + // within the same pod queue-proxy <-> user-container, so a + // smaller connect timeout would be justifiable. + // + // We should consider exposing this as a configuration. + DefaultConnTimeout = 200 * time.Millisecond + + // UserAgentKey is the constant for header "User-Agent". + UserAgentKey = "User-Agent" + + // ProbeHeaderName is the name of a header that can be added to + // requests to probe the knative networking layer. Requests + // with this header will not be passed to the user container or + // included in request metrics. + ProbeHeaderName = "K-Network-Probe" +) diff --git a/test/vendor/knative.dev/pkg/network/transports.go b/test/vendor/knative.dev/pkg/network/transports.go new file mode 100644 index 0000000000..35747fa34d --- /dev/null +++ b/test/vendor/knative.dev/pkg/network/transports.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "context" + "errors" + "net" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// RoundTripperFunc implementation roundtrips a request. +type RoundTripperFunc func(*http.Request) (*http.Response, error) + +// RoundTrip implements http.RoundTripper. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +func newAutoTransport(v1 http.RoundTripper, v2 http.RoundTripper) http.RoundTripper { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + t := v1 + if r.ProtoMajor == 2 { + t = v2 + } + return t.RoundTrip(r) + }) +} + +const sleepTO = 30 * time.Millisecond + +var backOffTemplate = wait.Backoff{ + Duration: 50 * time.Millisecond, + Factor: 1.4, + Jitter: 0.1, // At most 10% jitter. + Steps: 15, +} + +var errDialTimeout = errors.New("timed out dialing") + +// dialWithBackOff executes `net.Dialer.DialContext()` with exponentially increasing +// dial timeouts. In addition it sleeps with random jitter between tries. +func dialWithBackOff(ctx context.Context, network, address string) (net.Conn, error) { + return dialBackOffHelper(ctx, network, address, backOffTemplate, sleepTO) +} + +func dialBackOffHelper(ctx context.Context, network, address string, bo wait.Backoff, sleep time.Duration) (net.Conn, error) { + dialer := &net.Dialer{ + Timeout: bo.Duration, // Initial duration. + KeepAlive: 5 * time.Second, + DualStack: true, + } + for { + c, err := dialer.DialContext(ctx, network, address) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + if bo.Steps < 1 { + break + } + dialer.Timeout = bo.Step() + time.Sleep(wait.Jitter(sleep, 1.0)) // Sleep with jitter. + continue + } + return nil, err + } + return c, nil + } + return nil, errDialTimeout +} + +func newHTTPTransport(connTimeout time.Duration, disableKeepAlives bool) http.RoundTripper { + return &http.Transport{ + // Those match net/http/transport.go + Proxy: http.ProxyFromEnvironment, + MaxIdleConns: 1000, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 5 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: disableKeepAlives, + + // This is bespoke. + DialContext: dialWithBackOff, + } +} + +// NewProberTransport creates a RoundTripper that is useful for probing, +// since it will not cache connections. +func NewProberTransport() http.RoundTripper { + return newAutoTransport( + newHTTPTransport(DefaultConnTimeout, true /*disable keep-alives*/), + NewH2CTransport()) +} + +// NewAutoTransport creates a RoundTripper that can use appropriate transport +// based on the request's HTTP version. +func NewAutoTransport() http.RoundTripper { + return newAutoTransport( + newHTTPTransport(DefaultConnTimeout, false /*disable keep-alives*/), + NewH2CTransport()) +} + +// AutoTransport uses h2c for HTTP2 requests and falls back to `http.DefaultTransport` for all others +var AutoTransport = NewAutoTransport() diff --git a/test/vendor/knative.dev/pkg/profiling/server.go b/test/vendor/knative.dev/pkg/profiling/server.go new file mode 100644 index 0000000000..fe27ac04e5 --- /dev/null +++ b/test/vendor/knative.dev/pkg/profiling/server.go @@ -0,0 +1,116 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package profiling + +import ( + "fmt" + "net/http" + "net/http/pprof" + "strconv" + "sync/atomic" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +const ( + // ProfilingPort specifies the port where profiling data is available when profiling is enabled + ProfilingPort = 8008 + + // profilingKey is the name of the key in config-observability config map + // that indicates whether profiling is enabled + profilingKey = "profiling.enable" +) + +// Handler holds the main HTTP handler and a flag indicating +// whether the handler is active +type Handler struct { + enabled int32 + handler http.Handler + log *zap.SugaredLogger +} + +// NewHandler create a new ProfilingHandler which serves runtime profiling data +// according to the given context path +func NewHandler(logger *zap.SugaredLogger, enableProfiling bool) *Handler { + const pprofPrefix = "/debug/pprof/" + + mux := http.NewServeMux() + mux.HandleFunc(pprofPrefix, pprof.Index) + mux.HandleFunc(pprofPrefix+"cmdline", pprof.Cmdline) + mux.HandleFunc(pprofPrefix+"profile", pprof.Profile) + mux.HandleFunc(pprofPrefix+"symbol", pprof.Symbol) + mux.HandleFunc(pprofPrefix+"trace", pprof.Trace) + + logger.Infof("Profiling enabled: %t", enableProfiling) + return &Handler{ + enabled: boolToInt32(enableProfiling), + handler: mux, + log: logger, + } +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if atomic.LoadInt32(&h.enabled) == 1 { + h.handler.ServeHTTP(w, r) + } else { + http.NotFoundHandler().ServeHTTP(w, r) + } +} + +func ReadProfilingFlag(config map[string]string) (bool, error) { + profiling, ok := config[profilingKey] + if !ok { + return false, nil + } + enabled, err := strconv.ParseBool(profiling) + if err != nil { + return false, fmt.Errorf("failed to parse the profiling flag: %w", err) + } + return enabled, nil +} + +// UpdateFromConfigMap modifies the Enabled flag in the Handler +// according to the value in the given ConfigMap +func (h *Handler) UpdateFromConfigMap(configMap *corev1.ConfigMap) { + enabled, err := ReadProfilingFlag(configMap.Data) + if err != nil { + h.log.Errorw("Failed to update the profiling flag", zap.Error(err)) + return + } + + new := boolToInt32(enabled) + old := atomic.SwapInt32(&h.enabled, new) + if old != new { + h.log.Infof("Profiling enabled: %t", enabled) + } +} + +// NewServer creates a new http server that exposes profiling data on the default profiling port +func NewServer(handler http.Handler) *http.Server { + return &http.Server{ + Addr: ":" + strconv.Itoa(ProfilingPort), + Handler: handler, + } +} + +func boolToInt32(b bool) int32 { + if b { + return 1 + } + return 0 +} diff --git a/test/vendor/github.com/knative/pkg/ptr/doc.go b/test/vendor/knative.dev/pkg/ptr/doc.go similarity index 100% rename from test/vendor/github.com/knative/pkg/ptr/doc.go rename to test/vendor/knative.dev/pkg/ptr/doc.go diff --git a/test/vendor/github.com/knative/pkg/ptr/ptr.go b/test/vendor/knative.dev/pkg/ptr/ptr.go similarity index 76% rename from test/vendor/github.com/knative/pkg/ptr/ptr.go rename to test/vendor/knative.dev/pkg/ptr/ptr.go index 3564647338..a3bfef85c6 100644 --- a/test/vendor/github.com/knative/pkg/ptr/ptr.go +++ b/test/vendor/knative.dev/pkg/ptr/ptr.go @@ -16,6 +16,8 @@ limitations under the License. package ptr +import "time" + // Int32 is a helper for turning integers into pointers for use in // API types that want *int32. func Int32(i int32) *int32 { @@ -39,3 +41,15 @@ func Bool(b bool) *bool { func String(s string) *string { return &s } + +// Duration is a helper for turning time.Duration into pointers for use in +// API types that want *time.Duration. +func Duration(t time.Duration) *time.Duration { + return &t +} + +// Time is a helper for turning a const time.Time into a pointer for use in +// API types that want *time.Duration. +func Time(t time.Time) *time.Time { + return &t +} diff --git a/test/vendor/knative.dev/pkg/signals/signal.go b/test/vendor/knative.dev/pkg/signals/signal.go new file mode 100644 index 0000000000..b9ba9eaf35 --- /dev/null +++ b/test/vendor/knative.dev/pkg/signals/signal.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "context" + "errors" + "os" + "os/signal" + "time" +) + +var onlyOneSignalHandler = make(chan struct{}) + +// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned +// which is closed on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +func SetupSignalHandler() (stopCh <-chan struct{}) { + close(onlyOneSignalHandler) // panics when called twice + + stop := make(chan struct{}) + c := make(chan os.Signal, 2) + signal.Notify(c, shutdownSignals...) + go func() { + <-c + close(stop) + <-c + os.Exit(1) // second signal. Exit directly. + }() + + return stop +} + +// NewContext creates a new context with SetupSignalHandler() +// as our Done() channel. +func NewContext() context.Context { + return &signalContext{stopCh: SetupSignalHandler()} +} + +type signalContext struct { + stopCh <-chan struct{} +} + +// Deadline implements context.Context +func (scc *signalContext) Deadline() (deadline time.Time, ok bool) { + return +} + +// Done implements context.Context +func (scc *signalContext) Done() <-chan struct{} { + return scc.stopCh +} + +// Err implements context.Context +func (scc *signalContext) Err() error { + select { + case _, ok := <-scc.Done(): + if !ok { + return errors.New("received a termination signal") + } + default: + } + return nil +} + +// Value implements context.Context +func (scc *signalContext) Value(key interface{}) interface{} { + return nil +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_defaults.go b/test/vendor/knative.dev/pkg/signals/signal_posix.go similarity index 78% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_defaults.go rename to test/vendor/knative.dev/pkg/signals/signal_posix.go index 2db63076c7..b3537d0e5d 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_defaults.go +++ b/test/vendor/knative.dev/pkg/signals/signal_posix.go @@ -1,3 +1,5 @@ +// +build !windows + /* Copyright 2018 The Knative Authors @@ -14,14 +16,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package signals import ( - "context" - - "github.com/knative/pkg/apis" + "os" + "syscall" ) -func (c *ClusterIngress) SetDefaults(ctx context.Context) { - c.Spec.SetDefaults(apis.WithinSpec(ctx)) -} +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/test/vendor/knative.dev/pkg/signals/signal_windows.go b/test/vendor/knative.dev/pkg/signals/signal_windows.go new file mode 100644 index 0000000000..a5a4026faa --- /dev/null +++ b/test/vendor/knative.dev/pkg/signals/signal_windows.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" +) + +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/test/vendor/knative.dev/pkg/system/clock.go b/test/vendor/knative.dev/pkg/system/clock.go new file mode 100644 index 0000000000..7d99d9b5cd --- /dev/null +++ b/test/vendor/knative.dev/pkg/system/clock.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package system + +import ( + "time" +) + +// Mockable interface for time based testing +type Clock interface { + Now() time.Time +} + +type RealClock struct{} + +func (RealClock) Now() time.Time { + return time.Now() +} diff --git a/test/vendor/knative.dev/pkg/system/env.go b/test/vendor/knative.dev/pkg/system/env.go new file mode 100644 index 0000000000..d9ec3ddac3 --- /dev/null +++ b/test/vendor/knative.dev/pkg/system/env.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package system + +import ( + "fmt" + "os" +) + +const ( + NamespaceEnvKey = "SYSTEM_NAMESPACE" + ResourceLabelEnvKey = "SYSTEM_RESOURCE_LABEL" +) + +// Namespace returns the name of the K8s namespace where our system components +// run. +func Namespace() string { + if ns := os.Getenv(NamespaceEnvKey); ns != "" { + return ns + } + + panic(fmt.Sprintf(`The environment variable %q is not set + +If this is a process running on Kubernetes, then it should be using the downward +API to initialize this variable via: + + env: + - name: %s + valueFrom: + fieldRef: + fieldPath: metadata.namespace + +If this is a Go unit test consuming system.Namespace() then it should add the +following import: + +import ( + _ "knative.dev/pkg/system/testing" +)`, NamespaceEnvKey, NamespaceEnvKey)) +} + +// ResourceLabel returns the label key identifying K8s objects our system +// components source their configuration from. +func ResourceLabel() string { + return os.Getenv(ResourceLabelEnvKey) +} diff --git a/test/vendor/knative.dev/pkg/test/e2e_flags.go b/test/vendor/knative.dev/pkg/test/e2e_flags.go index b5d911fdf5..020dce303c 100644 --- a/test/vendor/knative.dev/pkg/test/e2e_flags.go +++ b/test/vendor/knative.dev/pkg/test/e2e_flags.go @@ -20,16 +20,32 @@ limitations under the License. package test import ( + "bytes" "flag" "fmt" "os" "os/user" "path" + "sync" + "text/template" + + _ "github.com/golang/glog" // Needed if glog and klog are to coexist + "k8s.io/klog" + "knative.dev/pkg/test/logging" +) + +const ( + // The recommended default log level https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md + klogDefaultLogLevel = "2" ) -// Flags holds the command line flags or defaults for settings in the user's environment. -// See EnvironmentFlags for a list of supported fields. -var Flags = initializeFlags() +var ( + flagsSetupOnce = &sync.Once{} + klogFlags = flag.NewFlagSet("klog", flag.ExitOnError) + // Flags holds the command line flags or defaults for settings in the user's environment. + // See EnvironmentFlags for a list of supported fields. + Flags = initializeFlags() +) // EnvironmentFlags define the flags that are needed to run the e2e tests. type EnvironmentFlags struct { @@ -38,7 +54,7 @@ type EnvironmentFlags struct { Namespace string // K8s namespace (blank by default, to be overwritten by test suite) IngressEndpoint string // Host to use for ingress endpoint LogVerbose bool // Enable verbose logging - EmitMetrics bool // Emit metrics + ImageTemplate string // Template to build the image reference (defaults to {{.Repository}}/{{.Name}}:{{.Tag}}) DockerRepo string // Docker repo (defaults to $KO_DOCKER_REPO) Tag string // Tag for test images } @@ -64,8 +80,8 @@ func initializeFlags() *EnvironmentFlags { flag.BoolVar(&f.LogVerbose, "logverbose", false, "Set this flag to true if you would like to see verbose logging.") - flag.BoolVar(&f.EmitMetrics, "emitmetrics", false, - "Set this flag to true if you would like tests to emit metrics, e.g. latency of resources being realized in the system.") + flag.StringVar(&f.ImageTemplate, "imagetemplate", "{{.Repository}}/{{.Name}}:{{.Tag}}", + "Provide a template to generate the reference to an image from the test. Defaults to `{{.Repository}}/{{.Name}}:{{.Tag}}`.") defaultRepo := os.Getenv("KO_DOCKER_REPO") flag.StringVar(&f.DockerRepo, "dockerrepo", defaultRepo, @@ -73,10 +89,65 @@ func initializeFlags() *EnvironmentFlags { flag.StringVar(&f.Tag, "tag", "latest", "Provide the version tag for the test images.") + klog.InitFlags(klogFlags) + flag.Set("v", klogDefaultLogLevel) + flag.Set("alsologtostderr", "true") + return &f } -// ImagePath is a helper function to prefix image name with repo and suffix with tag +func printFlags() { + fmt.Print("Test Flags: {") + flag.CommandLine.VisitAll(func(f *flag.Flag) { + fmt.Printf("'%s': '%s', ", f.Name, f.Value.String()) + }) + fmt.Println("}") +} + +// SetupLoggingFlags initializes the logging libraries at runtime +func SetupLoggingFlags() { + flagsSetupOnce.Do(func() { + // Sync the glog flags to klog + flag.CommandLine.VisitAll(func(f1 *flag.Flag) { + f2 := klogFlags.Lookup(f1.Name) + if f2 != nil { + value := f1.Value.String() + f2.Value.Set(value) + } + }) + if Flags.LogVerbose { + // If klog verbosity is not set to a non-default value (via "-args -v=X"), + if flag.CommandLine.Lookup("v").Value.String() == klogDefaultLogLevel { + // set up verbosity for klog so round_trippers.go prints: + // URL, request headers, response headers, and partial response body + // See levels in vendor/k8s.io/client-go/transport/round_trippers.go:DebugWrappers for other options + klogFlags.Set("v", "8") + flag.Set("v", "8") // This is for glog, since glog=>klog sync is one-time + } + printFlags() + } + logging.InitializeLogger(Flags.LogVerbose) + }) +} + +// ImagePath is a helper function to transform an image name into an image reference that can be pulled. func ImagePath(name string) string { - return fmt.Sprintf("%s/%s:%s", Flags.DockerRepo, name, Flags.Tag) + tpl, err := template.New("image").Parse(Flags.ImageTemplate) + if err != nil { + panic("could not parse image template: " + err.Error()) + } + + var buf bytes.Buffer + if err := tpl.Execute(&buf, struct { + Repository string + Name string + Tag string + }{ + Repository: Flags.DockerRepo, + Name: name, + Tag: Flags.Tag, + }); err != nil { + panic("could not apply the image template: " + err.Error()) + } + return buf.String() } diff --git a/test/vendor/knative.dev/pkg/test/helpers/dryrun.go b/test/vendor/knative.dev/pkg/test/helpers/dryrun.go new file mode 100644 index 0000000000..68f7994444 --- /dev/null +++ b/test/vendor/knative.dev/pkg/test/helpers/dryrun.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "log" +) + +// Run can run functions that needs dryrun support. +func Run(message string, call func() error, dryrun bool) error { + if dryrun { + log.Printf("[dry run] %s", message) + return nil + } + log.Print(message) + + return call() +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/metadata_validation.go b/test/vendor/knative.dev/pkg/test/helpers/error.go similarity index 57% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/metadata_validation.go rename to test/vendor/knative.dev/pkg/test/helpers/error.go index 2720374c1a..c262a48fa1 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/metadata_validation.go +++ b/test/vendor/knative.dev/pkg/test/helpers/error.go @@ -14,17 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -package serving +// error.go helps with error handling + +package helpers import ( - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/autoscaling" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "errors" + "strings" ) -// ValidateObjectMetadata validates that `metadata` stanza of the -// resources is correct. -func ValidateObjectMetadata(meta metav1.Object) *apis.FieldError { - return apis.ValidateObjectMetadata(meta).Also( - autoscaling.ValidateAnnotations(meta.GetAnnotations()).ViaField("annotations")) +// CombineErrors combines slice of errors and return a single error +func CombineErrors(errs []error) error { + if len(errs) == 0 { + return nil + } + msgs := make([]string, 0) + for _, err := range errs { + if err != nil { + msgs = append(msgs, err.Error()) + } + } + if len(msgs) == 0 { + return nil + } + return errors.New(strings.Join(msgs, "\n")) } diff --git a/test/vendor/knative.dev/pkg/test/helpers/name.go b/test/vendor/knative.dev/pkg/test/helpers/name.go new file mode 100644 index 0000000000..97f85fb835 --- /dev/null +++ b/test/vendor/knative.dev/pkg/test/helpers/name.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "log" + "math/rand" + "strings" + "time" + "unicode" + + "knative.dev/pkg/test" +) + +const ( + letterBytes = "abcdefghijklmnopqrstuvwxyz" + randSuffixLen = 8 + sep = '-' + testNamePrefix = "Test" +) + +func init() { + // Properly seed the random number generator so AppendRandomString() is actually random. + // Otherwise, rerunning tests will generate the same names for the test resources, causing conflicts with + // already existing resources. + seed := time.Now().UTC().UnixNano() + log.Printf("Using '%d' to seed the random number generator", seed) + rand.Seed(seed) +} + +// ObjectPrefixForTest returns the name prefix for this test's random names. +func ObjectPrefixForTest(t test.T) string { + return MakeK8sNamePrefix(strings.TrimPrefix(t.Name(), testNamePrefix)) +} + +// ObjectNameForTest generates a random object name based on the test name. +func ObjectNameForTest(t test.T) string { + return AppendRandomString(ObjectPrefixForTest(t)) +} + +// AppendRandomString will generate a random string that begins with prefix. +// This is useful if you want to make sure that your tests can run at the same +// time against the same environment without conflicting. +// This method will use "-" as the separator between the prefix and +// the random suffix. +func AppendRandomString(prefix string) string { + suffix := make([]byte, randSuffixLen) + + for i := range suffix { + suffix[i] = letterBytes[rand.Intn(len(letterBytes))] + } + + return strings.Join([]string{prefix, string(suffix)}, string(sep)) +} + +// MakeK8sNamePrefix converts each chunk of non-alphanumeric character into a single dash +// and also convert camelcase tokens into dash-delimited lowercase tokens. +func MakeK8sNamePrefix(s string) string { + var sb strings.Builder + newToken := false + for _, c := range s { + if !(unicode.IsLetter(c) || unicode.IsNumber(c)) { + newToken = true + continue + } + if sb.Len() > 0 && (newToken || unicode.IsUpper(c)) { + sb.WriteRune(sep) + } + sb.WriteRune(unicode.ToLower(c)) + newToken = false + } + return sb.String() +} + +// GetBaseFuncName returns the baseFuncName parsed from the fullFuncName. +// eg. test/e2e.TestMain will return TestMain. +func GetBaseFuncName(fullFuncName string) string { + name := fullFuncName + // Possibly there is no parent package, so only remove it from the name if '/' exists + if strings.ContainsRune(name, '/') { + name = name[strings.LastIndex(name, "/")+1:] + } + name = name[strings.LastIndex(name, ".")+1:] + return name +} diff --git a/test/vendor/knative.dev/pkg/test/kube_checks.go b/test/vendor/knative.dev/pkg/test/kube_checks.go index b15f8ea2bf..35bcbd201b 100644 --- a/test/vendor/knative.dev/pkg/test/kube_checks.go +++ b/test/vendor/knative.dev/pkg/test/kube_checks.go @@ -75,6 +75,52 @@ func WaitForPodListState(client *KubeClient, inState func(p *corev1.PodList) (bo }) } +// WaitForPodState polls the status of the specified Pod +// from client every interval until inState returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took to get into the state checked by inState. +func WaitForPodState(client *KubeClient, inState func(p *corev1.Pod) (bool, error), name string, namespace string) error { + p := client.Kube.CoreV1().Pods(namespace) + span := logging.GetEmitableSpan(context.Background(), "WaitForPodState/"+name) + defer span.End() + + return wait.PollImmediate(interval, podTimeout, func() (bool, error) { + p, err := p.Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return inState(p) + }) +} + +// WaitForServiceHasAtLeastOneEndpoint polls the status of the specified Service +// from client every interval until number of service endpoints = numOfEndpoints +func WaitForServiceEndpoints(client *KubeClient, svcName string, svcNamespace string, numOfEndpoints int) error { + endpointsService := client.Kube.CoreV1().Endpoints(svcNamespace) + span := logging.GetEmitableSpan(context.Background(), "WaitForServiceHasAtLeastOneEndpoint/"+svcName) + defer span.End() + + return wait.PollImmediate(interval, podTimeout, func() (bool, error) { + endpoint, err := endpointsService.Get(svcName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return countEndpointsNum(endpoint) == numOfEndpoints, nil + }) +} + +func countEndpointsNum(e *corev1.Endpoints) int { + if e == nil || e.Subsets == nil { + return 0 + } + num := 0 + for _, sub := range e.Subsets { + num += len(sub.Addresses) + } + return num +} + // GetConfigMap gets the configmaps for a given namespace func GetConfigMap(client *KubeClient, namespace string) k8styped.ConfigMapInterface { return client.Kube.CoreV1().ConfigMaps(namespace) diff --git a/test/vendor/knative.dev/pkg/test/logging/logging.go b/test/vendor/knative.dev/pkg/test/logging/logging.go index 4d9c5a96f7..74ef1d243a 100644 --- a/test/vendor/knative.dev/pkg/test/logging/logging.go +++ b/test/vendor/knative.dev/pkg/test/logging/logging.go @@ -21,13 +21,11 @@ package logging import ( "context" - "flag" "fmt" "strings" "time" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "go.uber.org/zap" @@ -35,9 +33,6 @@ import ( ) const ( - // VerboseLogLevel defines verbose log level as 10 - VerboseLogLevel glog.Level = 10 - // 1 second was chosen arbitrarily metricViewReportingPeriod = 1 * time.Second @@ -134,11 +129,6 @@ func InitializeMetricExporter(context string) { func InitializeLogger(logVerbose bool) { logLevel := "info" if logVerbose { - // Both gLog and "go test" use -v flag. The code below is a work around so that we can still set v value for gLog - flag.StringVar(&logLevel, "logLevel", fmt.Sprint(VerboseLogLevel), "verbose log level") - flag.Lookup("v").Value.Set(logLevel) - glog.Infof("Logging set to verbose mode with logLevel %d", VerboseLogLevel) - logLevel = "debug" } diff --git a/test/vendor/knative.dev/pkg/test/request.go b/test/vendor/knative.dev/pkg/test/request.go index 8b38cb29a6..3d10cabc7c 100644 --- a/test/vendor/knative.dev/pkg/test/request.go +++ b/test/vendor/knative.dev/pkg/test/request.go @@ -137,12 +137,12 @@ func MatchesAllOf(checkers ...spoof.ResponseChecker) spoof.ResponseChecker { func WaitForEndpointState( kubeClient *KubeClient, logf logging.FormatLogger, - theURL string, + url *url.URL, inState spoof.ResponseChecker, desc string, resolvable bool, opts ...interface{}) (*spoof.Response, error) { - return WaitForEndpointStateWithTimeout(kubeClient, logf, theURL, inState, desc, resolvable, spoof.RequestTimeout, opts...) + return WaitForEndpointStateWithTimeout(kubeClient, logf, url, inState, desc, resolvable, spoof.RequestTimeout, opts...) } // WaitForEndpointStateWithTimeout will poll an endpoint until inState indicates the state is achieved @@ -154,7 +154,7 @@ func WaitForEndpointState( func WaitForEndpointStateWithTimeout( kubeClient *KubeClient, logf logging.FormatLogger, - theURL string, + url *url.URL, inState spoof.ResponseChecker, desc string, resolvable bool, @@ -162,16 +162,11 @@ func WaitForEndpointStateWithTimeout( opts ...interface{}) (*spoof.Response, error) { defer logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForEndpointState/%s", desc)).End() - // Try parsing the "theURL" with and without a scheme. - asURL, err := url.Parse(theURL) - if err != nil { - return nil, err - } - if asURL.Scheme == "" { - asURL.Scheme = "http" + if url.Scheme == "" || url.Host == "" { + return nil, fmt.Errorf("invalid URL: %q", url.String()) } - req, err := http.NewRequest(http.MethodGet, asURL.String(), nil) + req, err := http.NewRequest(http.MethodGet, url.String(), nil) if err != nil { return nil, err } @@ -186,7 +181,7 @@ func WaitForEndpointStateWithTimeout( } } - client, err := NewSpoofingClient(kubeClient, logf, asURL.Hostname(), resolvable, tOpts...) + client, err := NewSpoofingClient(kubeClient, logf, url.Hostname(), resolvable, tOpts...) if err != nil { return nil, err } diff --git a/test/vendor/knative.dev/pkg/test/spoof/error_checks.go b/test/vendor/knative.dev/pkg/test/spoof/error_checks.go index 0cd2995ca2..8a913b99d4 100644 --- a/test/vendor/knative.dev/pkg/test/spoof/error_checks.go +++ b/test/vendor/knative.dev/pkg/test/spoof/error_checks.go @@ -40,13 +40,14 @@ func isDNSError(err error) bool { return strings.Contains(msg, "no such host") || strings.Contains(msg, ":53") } -func isTCPConnectRefuse(err error) bool { +func isConnectionRefused(err error) bool { // The alternative for the string check is: // errNo := (((err.(*url.Error)).Err.(*net.OpError)).Err.(*os.SyscallError).Err).(syscall.Errno) // if errNo == syscall.Errno(0x6f) {...} // But with assertions, of course. - if err != nil && strings.Contains(err.Error(), "connect: connection refused") { - return true - } - return false + return err != nil && strings.Contains(err.Error(), "connect: connection refused") +} + +func isConnectionReset(err error) bool { + return err != nil && strings.Contains(err.Error(), "connection reset by peer") } diff --git a/test/vendor/knative.dev/pkg/test/spoof/spoof.go b/test/vendor/knative.dev/pkg/test/spoof/spoof.go index 0b43c5bf57..098a8772c2 100644 --- a/test/vendor/knative.dev/pkg/test/spoof/spoof.go +++ b/test/vendor/knative.dev/pkg/test/spoof/spoof.go @@ -27,7 +27,6 @@ import ( "strings" "time" - "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "knative.dev/pkg/test/ingress" @@ -65,7 +64,7 @@ func (r *Response) String() string { // Interface defines the actions that can be performed by the spoofing client. type Interface interface { Do(*http.Request) (*Response, error) - Poll(*http.Request, ResponseChecker) (*Response, error) + Poll(*http.Request, ResponseChecker, ...ErrorRetryChecker) (*Response, error) } // https://medium.com/stupid-gopher-tricks/ensuring-go-interface-satisfaction-at-compile-time-1ed158e8fa17 @@ -81,14 +80,17 @@ var ( // https://github.com/kubernetes/apimachinery/blob/cf7ae2f57dabc02a3d215f15ca61ae1446f3be8f/pkg/util/wait/wait.go#L172 type ResponseChecker func(resp *Response) (done bool, err error) +// ErrorRetryChecker is used to determine if an error should be retried or not. +// If an error should be retried, it should return true and the wrapped error to explain why to retry. +type ErrorRetryChecker func(e error) (retry bool, err error) + // SpoofingClient is a minimal HTTP client wrapper that spoofs the domain of requests // for non-resolvable domains. type SpoofingClient struct { Client *http.Client RequestInterval time.Duration RequestTimeout time.Duration - - logf logging.FormatLogger + Logf logging.FormatLogger } // TransportOption allows callers to customize the http.Transport used by a SpoofingClient @@ -108,10 +110,11 @@ func New( opts ...TransportOption) (*SpoofingClient, error) { endpoint, err := ResolveEndpoint(kubeClientset, domain, resolvable, endpointOverride) if err != nil { - fmt.Errorf("failed get the cluster endpoint: %v", err) + return nil, fmt.Errorf("failed get the cluster endpoint: %v", err) } // Spoof the hostname at the resolver level + logf("Spoofing %s -> %s", domain, endpoint) transport := &http.Transport{ DialContext: func(ctx context.Context, network, addr string) (conn net.Conn, e error) { spoofed := addr @@ -119,7 +122,6 @@ func New( // The original hostname:port is spoofed by replacing the hostname by the value // returned by ResolveEndpoint. spoofed = endpoint + ":" + addr[i+1:] - logf("Spoofing %s -> %s", addr, spoofed) } return dialContext(ctx, network, spoofed) }, @@ -139,7 +141,7 @@ func New( Client: &http.Client{Transport: roundTripper}, RequestInterval: requestInterval, RequestTimeout: RequestTimeout, - logf: logf, + Logf: logf, } return &sc, nil } @@ -201,7 +203,7 @@ func (sc *SpoofingClient) Do(req *http.Request) (*Response, error) { } // Poll executes an http request until it satisfies the inState condition or encounters an error. -func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker) (*Response, error) { +func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker, errorRetryCheckers ...ErrorRetryChecker) (*Response, error) { var ( resp *Response err error @@ -214,19 +216,15 @@ func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker) (*Res req.Header.Add(pollReqHeader, "True") resp, err = sc.Do(req) if err != nil { - if isTCPTimeout(err) { - sc.logf("Retrying %s for TCP timeout %v", req.URL.String(), err) - return false, nil - } - // Retrying on DNS error, since we may be using xip.io or nip.io in tests. - if isDNSError(err) { - sc.logf("Retrying %s for DNS error %v", req.URL.String(), err) - return false, nil + if len(errorRetryCheckers) == 0 { + errorRetryCheckers = []ErrorRetryChecker{DefaultErrorRetryChecker} } - // Repeat the poll on `connection refused` errors, which are usually transient Istio errors. - if isTCPConnectRefuse(err) { - sc.logf("Retrying %s for connection refused %v", req.URL.String(), err) - return false, nil + for _, checker := range errorRetryCheckers { + retry, newErr := checker(err) + if retry { + sc.Logf("Retrying %s: %v", req.URL, newErr) + return false, nil + } } return true, err } @@ -239,11 +237,30 @@ func (sc *SpoofingClient) Poll(req *http.Request, inState ResponseChecker) (*Res } if err != nil { - return resp, errors.Wrapf(err, "response: %s did not pass checks", resp) + return resp, fmt.Errorf("response: %s did not pass checks: %w", resp, err) } return resp, nil } +// DefaultErrorRetryChecker implements the defaults for retrying on error. +func DefaultErrorRetryChecker(err error) (bool, error) { + if isTCPTimeout(err) { + return true, fmt.Errorf("Retrying for TCP timeout: %v", err) + } + // Retrying on DNS error, since we may be using xip.io or nip.io in tests. + if isDNSError(err) { + return true, fmt.Errorf("Retrying for DNS error: %v", err) + } + // Repeat the poll on `connection refused` errors, which are usually transient Istio errors. + if isConnectionRefused(err) { + return true, fmt.Errorf("Retrying for connection refused: %v", err) + } + if isConnectionReset(err) { + return true, fmt.Errorf("Retrying for connection reset: %v", err) + } + return false, err +} + // logZipkinTrace provides support to log Zipkin Trace for param: spoofResponse // We only log Zipkin trace for HTTP server errors i.e for HTTP status codes between 500 to 600 func (sc *SpoofingClient) logZipkinTrace(spoofResp *Response) { @@ -252,14 +269,14 @@ func (sc *SpoofingClient) logZipkinTrace(spoofResp *Response) { } traceID := spoofResp.Header.Get(zipkin.ZipkinTraceIDHeader) - sc.logf("Logging Zipkin Trace for: %s", traceID) + sc.Logf("Logging Zipkin Trace for: %s", traceID) - json, err := zipkin.JSONTrace(traceID, /* We don't know the expected number of spans */ -1, 5 * time.Second) + json, err := zipkin.JSONTrace(traceID /* We don't know the expected number of spans */, -1, 5*time.Second) if err != nil { if _, ok := err.(*zipkin.TimeoutError); !ok { - sc.logf("Error getting zipkin trace: %v", err) + sc.Logf("Error getting zipkin trace: %v", err) } } - sc.logf("%s", json) + sc.Logf("%s", json) } diff --git a/test/vendor/knative.dev/pkg/test/tinterface.go b/test/vendor/knative.dev/pkg/test/tinterface.go new file mode 100644 index 0000000000..c4a87dae86 --- /dev/null +++ b/test/vendor/knative.dev/pkg/test/tinterface.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Defines an interface of commonality between testing.T and logging.TLogger +// Allows most library functions to be shared +// Simplifies coexistance with TLogger + +package test + +type T interface { + Name() string + Helper() + SkipNow() + Log(args ...interface{}) + Error(args ...interface{}) +} + +type TLegacy interface { + T + Logf(fmt string, args ...interface{}) // It gets passed to things in logstream + Fatal(args ...interface{}) +} diff --git a/test/vendor/knative.dev/pkg/test/zipkin/util.go b/test/vendor/knative.dev/pkg/test/zipkin/util.go index 81533f1fe2..f02f437035 100644 --- a/test/vendor/knative.dev/pkg/test/zipkin/util.go +++ b/test/vendor/knative.dev/pkg/test/zipkin/util.go @@ -20,6 +20,7 @@ package zipkin import ( "encoding/json" + "fmt" "io/ioutil" "net/http" "sync" @@ -126,47 +127,40 @@ func CleanupZipkinTracingSetup(logf logging.FormatLogger) { }) } -// CheckZipkinPortAvailability checks to see if Zipkin Port is available on the machine. -// returns error if the port is not available. -func CheckZipkinPortAvailability() error { - return monitoring.CheckPortAvailability(ZipkinPort) -} - // JSONTrace returns a trace for the given traceID. It will continually try to get the trace. If the // trace it gets has the expected number of spans, then it will be returned. If not, it will try // again. If it reaches timeout, then it returns everything it has so far with an error. func JSONTrace(traceID string, expected int, timeout time.Duration) (trace []model.SpanModel, err error) { t := time.After(timeout) - for ; len(trace) != expected; { + for len(trace) != expected { select { case <-t: - return trace, &TimeoutError{} + return trace, &TimeoutError{ + lastErr: err, + } default: trace, err = jsonTrace(traceID) - if err != nil { - return trace, err - } } } - return trace, nil + return trace, err } // TimeoutError is an error returned by JSONTrace if it times out before getting the expected number // of traces. -type TimeoutError struct {} -func (*TimeoutError) Error() string { - return "timeout getting JSONTrace" +type TimeoutError struct { + lastErr error +} + +func (t *TimeoutError) Error() string { + return fmt.Sprintf("timeout getting JSONTrace, most recent error: %v", t.lastErr) } -// jsonTrace gets a trace from Zipkin and returns it. +// jsonTrace gets a trace from Zipkin and returns it. Errors returned from this function should be +// retried, as they are likely caused by random problems communicating with Zipkin, or Zipkin +// communicating with its data store. func jsonTrace(traceID string) ([]model.SpanModel, error) { var empty []model.SpanModel - // Check if zipkin port forwarding is setup correctly - if err := CheckZipkinPortAvailability(); err == nil { - return empty, err - } - resp, err := http.Get(ZipkinTraceEndpoint + traceID) if err != nil { return empty, err @@ -181,7 +175,7 @@ func jsonTrace(traceID string) ([]model.SpanModel, error) { var models []model.SpanModel err = json.Unmarshal(body, &models) if err != nil { - return empty, err + return empty, fmt.Errorf("got an error in unmarshalling JSON %q: %v", body, err) } return models, nil } diff --git a/test/vendor/knative.dev/pkg/tracker/doc.go b/test/vendor/knative.dev/pkg/tracker/doc.go new file mode 100644 index 0000000000..5e0dfba60b --- /dev/null +++ b/test/vendor/knative.dev/pkg/tracker/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package tracker defines a utility to enable Reconcilers to trigger +// reconciliations when objects that are cross-referenced change, so +// that the level-based reconciliation can react to the change. The +// prototypical cross-reference in Kubernetes is corev1.ObjectReference. +package tracker diff --git a/test/vendor/knative.dev/pkg/tracker/enqueue.go b/test/vendor/knative.dev/pkg/tracker/enqueue.go new file mode 100644 index 0000000000..4a40a129ea --- /dev/null +++ b/test/vendor/knative.dev/pkg/tracker/enqueue.go @@ -0,0 +1,263 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracker + +import ( + "fmt" + "sort" + "strings" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" + + "knative.dev/pkg/kmeta" +) + +// New returns an implementation of Interface that lets a Reconciler +// register a particular resource as watching an ObjectReference for +// a particular lease duration. This watch must be refreshed +// periodically (e.g. by a controller resync) or it will expire. +// +// When OnChanged is called by the informer for a particular +// GroupVersionKind, the provided callback is called with the "key" +// of each object actively watching the changed object. +func New(callback func(types.NamespacedName), lease time.Duration) Interface { + return &impl{ + leaseDuration: lease, + cb: callback, + } +} + +type impl struct { + m sync.Mutex + // exact maps from an object reference to the set of + // keys for objects watching it. + exact map[Reference]set + // inexact maps from a partial object reference (no name/selector) to + // a map from watcher keys to the compiled selector and expiry. + inexact map[Reference]matchers + + // The amount of time that an object may watch another + // before having to renew the lease. + leaseDuration time.Duration + + cb func(types.NamespacedName) +} + +// Check that impl implements Interface. +var _ Interface = (*impl)(nil) + +// set is a map from keys to expirations +type set map[types.NamespacedName]time.Time + +// matchers maps the tracker's key to the matcher. +type matchers map[types.NamespacedName]matcher + +// matcher holds the selector and expiry for matching tracked objects. +type matcher struct { + // The selector to complete the match. + selector labels.Selector + + // When this lease expires. + expiry time.Time +} + +// Track implements Interface. +func (i *impl) Track(ref corev1.ObjectReference, obj interface{}) error { + return i.TrackReference(Reference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Namespace: ref.Namespace, + Name: ref.Name, + }, obj) +} + +func (i *impl) TrackReference(ref Reference, obj interface{}) error { + invalidFields := map[string][]string{ + "APIVersion": validation.IsQualifiedName(ref.APIVersion), + "Kind": validation.IsCIdentifier(ref.Kind), + "Namespace": validation.IsDNS1123Label(ref.Namespace), + } + var selector labels.Selector + fieldErrors := []string{} + switch { + case ref.Selector != nil && ref.Name != "": + fieldErrors = append(fieldErrors, "cannot provide both Name and Selector") + case ref.Name != "": + invalidFields["Name"] = validation.IsDNS1123Subdomain(ref.Name) + case ref.Selector != nil: + ls, err := metav1.LabelSelectorAsSelector(ref.Selector) + if err != nil { + invalidFields["Selector"] = []string{err.Error()} + } + selector = ls + default: + fieldErrors = append(fieldErrors, "must provide either Name or Selector") + } + for k, v := range invalidFields { + for _, msg := range v { + fieldErrors = append(fieldErrors, fmt.Sprintf("%s: %s", k, msg)) + } + } + if len(fieldErrors) > 0 { + sort.Strings(fieldErrors) + return fmt.Errorf("invalid Reference:\n%s", strings.Join(fieldErrors, "\n")) + } + + // Determine the key of the object tracking this reference. + object, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + return err + } + key := types.NamespacedName{Namespace: object.GetNamespace(), Name: object.GetName()} + + i.m.Lock() + defer i.m.Unlock() + if i.exact == nil { + i.exact = make(map[Reference]set) + } + if i.inexact == nil { + i.inexact = make(map[Reference]matchers) + } + + // If the reference uses Name then it is an exact match. + if selector == nil { + l, ok := i.exact[ref] + if !ok { + l = set{} + } + + if expiry, ok := l[key]; !ok || isExpired(expiry) { + // When covering an uncovered key, immediately call the + // registered callback to ensure that the following pattern + // doesn't create problems: + // foo, err := lister.Get(key) + // // Later... + // err := tracker.Track(fooRef, parent) + // In this example, "Later" represents a window where "foo" may + // have changed or been created while the Track is not active. + // The simplest way of eliminating such a window is to call the + // callback to "catch up" immediately following new + // registrations. + i.cb(key) + } + // Overwrite the key with a new expiration. + l[key] = time.Now().Add(i.leaseDuration) + + i.exact[ref] = l + return nil + } + + // Otherwise, it is an inexact match by selector. + partialRef := Reference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Namespace: ref.Namespace, + // Exclude the selector. + } + l, ok := i.inexact[partialRef] + if !ok { + l = matchers{} + } + + if m, ok := l[key]; !ok || isExpired(m.expiry) { + // When covering an uncovered key, immediately call the + // registered callback to ensure that the following pattern + // doesn't create problems: + // foo, err := lister.Get(key) + // // Later... + // err := tracker.Track(fooRef, parent) + // In this example, "Later" represents a window where "foo" may + // have changed or been created while the Track is not active. + // The simplest way of eliminating such a window is to call the + // callback to "catch up" immediately following new + // registrations. + i.cb(key) + } + // Overwrite the key with a new expiration. + l[key] = matcher{ + selector: selector, + expiry: time.Now().Add(i.leaseDuration), + } + + i.inexact[partialRef] = l + return nil +} + +func isExpired(expiry time.Time) bool { + return time.Now().After(expiry) +} + +// OnChanged implements Interface. +func (i *impl) OnChanged(obj interface{}) { + item, err := kmeta.DeletionHandlingAccessor(obj) + if err != nil { + return + } + + or := kmeta.ObjectReference(item) + ref := Reference{ + APIVersion: or.APIVersion, + Kind: or.Kind, + Namespace: or.Namespace, + Name: or.Name, + } + + i.m.Lock() + defer i.m.Unlock() + + // Handle exact matches. + s, ok := i.exact[ref] + if ok { + for key, expiry := range s { + // If the expiration has lapsed, then delete the key. + if isExpired(expiry) { + delete(s, key) + continue + } + i.cb(key) + } + if len(s) == 0 { + delete(i.exact, ref) + } + } + + // Handle inexact matches. + ref.Name = "" + ms, ok := i.inexact[ref] + if ok { + ls := labels.Set(item.GetLabels()) + for key, m := range ms { + // If the expiration has lapsed, then delete the key. + if isExpired(m.expiry) { + delete(ms, key) + continue + } + if m.selector.Matches(ls) { + i.cb(key) + } + } + if len(s) == 0 { + delete(i.exact, ref) + } + } +} diff --git a/test/vendor/knative.dev/pkg/tracker/interface.go b/test/vendor/knative.dev/pkg/tracker/interface.go new file mode 100644 index 0000000000..2e5278c3c7 --- /dev/null +++ b/test/vendor/knative.dev/pkg/tracker/interface.go @@ -0,0 +1,170 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracker + +import ( + "context" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + + "knative.dev/pkg/apis" +) + +// Reference is modeled after corev1.ObjectReference, but omits fields +// unsupported by the tracker, and permits us to extend things in +// divergent ways. +type Reference struct { + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent. + // +optional + Kind string `json:"kind,omitempty"` + + // Namespace of the referent. + // +optional + Namespace string `json:"namespace,omitempty"` + + // Name of the referent. + // Mutually exclusive with Selector. + // +optional + Name string `json:"name,omitempty"` + + // Selector of the referents. + // Mutually exclusive with Name. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` +} + +// Interface defines the interface through which an object can register +// that it is tracking another object by reference. +type Interface interface { + // Track tells us that "obj" is tracking changes to the + // referenced object. + // DEPRECATED: use TrackReference + Track(ref corev1.ObjectReference, obj interface{}) error + + // Track tells us that "obj" is tracking changes to the + // referenced object. + TrackReference(ref Reference, obj interface{}) error + + // OnChanged is a callback to register with the InformerFactory + // so that we are notified for appropriate object changes. + OnChanged(obj interface{}) +} + +// GroupVersionKind returns the GroupVersion of the object referenced. +func (ref *Reference) GroupVersionKind() schema.GroupVersionKind { + gv, _ := schema.ParseGroupVersion(ref.APIVersion) + return schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: ref.Kind, + } +} + +// ObjectReference returns the tracker Reference as an ObjectReference. +func (ref *Reference) ObjectReference() corev1.ObjectReference { + return corev1.ObjectReference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Namespace: ref.Namespace, + Name: ref.Name, + } +} + +// ValidateObjectReference validates that the Reference uses a subset suitable for +// translation to a corev1.ObjectReference. This helper is intended to simplify +// validating a particular (narrow) use of tracker.Reference. +func (ref *Reference) ValidateObjectReference(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + // Required fields + if ref.APIVersion == "" { + errs = errs.Also(apis.ErrMissingField("apiVersion")) + } else if verrs := validation.IsQualifiedName(ref.APIVersion); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "apiVersion")) + } + if ref.Kind == "" { + errs = errs.Also(apis.ErrMissingField("kind")) + } else if verrs := validation.IsCIdentifier(ref.Kind); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "kind")) + } + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) + } else if verrs := validation.IsDNS1123Label(ref.Name); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "name")) + } + if ref.Namespace == "" { + errs = errs.Also(apis.ErrMissingField("namespace")) + } else if verrs := validation.IsDNS1123Label(ref.Namespace); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "namespace")) + } + + // Disallowed fields in ObjectReference-compatible context. + if ref.Selector != nil { + errs = errs.Also(apis.ErrDisallowedFields("selector")) + } + + return errs +} + +func (ref *Reference) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + // Required fields + if ref.APIVersion == "" { + errs = errs.Also(apis.ErrMissingField("apiVersion")) + } else if verrs := validation.IsQualifiedName(ref.APIVersion); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "apiVersion")) + } + if ref.Kind == "" { + errs = errs.Also(apis.ErrMissingField("kind")) + } else if verrs := validation.IsCIdentifier(ref.Kind); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "kind")) + } + if ref.Namespace == "" { + errs = errs.Also(apis.ErrMissingField("namespace")) + } else if verrs := validation.IsDNS1123Label(ref.Namespace); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "namespace")) + } + + switch { + case ref.Selector != nil && ref.Name != "": + errs = errs.Also(apis.ErrMultipleOneOf("selector", "name")) + case ref.Selector != nil: + _, err := metav1.LabelSelectorAsSelector(ref.Selector) + if err != nil { + errs = errs.Also(apis.ErrInvalidValue(err.Error(), "selector")) + } + + case ref.Name != "": + if verrs := validation.IsDNS1123Label(ref.Name); len(verrs) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(verrs, ", "), "name")) + } + default: + errs = errs.Also(apis.ErrMissingOneOf("selector", "name")) + } + + return errs + +} diff --git a/test/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go b/test/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9494168c6f --- /dev/null +++ b/test/vendor/knative.dev/pkg/tracker/zz_generated.deepcopy.go @@ -0,0 +1,46 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package tracker + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Reference) DeepCopyInto(out *Reference) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reference. +func (in *Reference) DeepCopy() *Reference { + if in == nil { + return nil + } + out := new(Reference) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/.gitattributes b/test/vendor/knative.dev/serving/.gitattributes new file mode 100644 index 0000000000..2aa7c69c17 --- /dev/null +++ b/test/vendor/knative.dev/serving/.gitattributes @@ -0,0 +1,15 @@ +# This file is documented at https://git-scm.com/docs/gitattributes. +# Linguist-specific attributes are documented at +# https://github.com/github/linguist. + +**/zz_generated.*.go linguist-generated=true +/pkg/client/** linguist-generated=true +/test/client/** linguist-generated=true + +# coverage-excluded is an attribute used to explicitly exclude a path from being included in code +# coverage. If a path is marked as linguist-generated already, it will be implicitly excluded and +# there is no need to add the coverage-excluded attribute +/pkg/**/testing/** coverage-excluded=true +/vendor/** coverage-excluded=true +/test/** coverage-excluded=true +/cmd/**/main.go coverage-excluded=true diff --git a/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/ask-question.md b/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/ask-question.md new file mode 100644 index 0000000000..d975ebdf40 --- /dev/null +++ b/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/ask-question.md @@ -0,0 +1,28 @@ +--- +name: Question +about: Ask a question about knative/serving +title: '' +labels: kind/question +assignees: '' + +--- + + + + +## Ask your question here: + diff --git a/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/bug-report.md b/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000..c5595f0411 --- /dev/null +++ b/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,48 @@ +--- +name: Bug report +about: Report a bug in knative/serving +title: '' +labels: kind/bug +assignees: '' + +--- + + + + +## What version of Knative? + + + +> 0.9.x +> 0.10.x +> 0.11.x +> Output of `git describe --dirty` + +## Expected Behavior + + + + +## Actual Behavior + + + + +## Steps to Reproduce the Problem + + diff --git a/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/feature-request.md b/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000..df93041012 --- /dev/null +++ b/test/vendor/knative.dev/serving/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,28 @@ +--- +name: Feature Request +about: Create a feature request for knative/serving +title: '' +labels: kind/feature +assignees: '' + +--- + + + + +## Describe the feature + diff --git a/test/vendor/knative.dev/serving/.github/issue-template.md b/test/vendor/knative.dev/serving/.github/issue-template.md new file mode 100644 index 0000000000..76796a05ff --- /dev/null +++ b/test/vendor/knative.dev/serving/.github/issue-template.md @@ -0,0 +1,37 @@ + + +## Expected Behavior + +## Actual Behavior + +## Steps to Reproduce the Problem + +1. +2. +3. + +## Additional Info diff --git a/test/vendor/knative.dev/serving/.github/pull-request-template.md b/test/vendor/knative.dev/serving/.github/pull-request-template.md new file mode 100644 index 0000000000..99ad38da07 --- /dev/null +++ b/test/vendor/knative.dev/serving/.github/pull-request-template.md @@ -0,0 +1,23 @@ + + +Fixes # + +## Proposed Changes + +* +* +* + +**Release Note** + + + +```release-note + +``` diff --git a/test/vendor/knative.dev/serving/.gitignore b/test/vendor/knative.dev/serving/.gitignore new file mode 100644 index 0000000000..85baa82ae0 --- /dev/null +++ b/test/vendor/knative.dev/serving/.gitignore @@ -0,0 +1,11 @@ +# Operating system temporary files +.DS_Store + +# Editor/IDE specific settings +.idea +.vscode/ +*.iml + +# Temporary output of build tools +bazel-* +*.out diff --git a/test/vendor/knative.dev/serving/.ko.yaml b/test/vendor/knative.dev/serving/.ko.yaml new file mode 100644 index 0000000000..5e5fb741d1 --- /dev/null +++ b/test/vendor/knative.dev/serving/.ko.yaml @@ -0,0 +1,4 @@ +# Use :nonroot base image for all containers +defaultBaseImage: gcr.io/distroless/static:nonroot +baseImageOverrides: + knative.dev/serving/vendor/github.com/tsenart/vegeta: ubuntu:latest diff --git a/test/vendor/github.com/knative/serving/AUTHORS b/test/vendor/knative.dev/serving/AUTHORS similarity index 96% rename from test/vendor/github.com/knative/serving/AUTHORS rename to test/vendor/knative.dev/serving/AUTHORS index 5ab90824ca..f42714c4ec 100644 --- a/test/vendor/github.com/knative/serving/AUTHORS +++ b/test/vendor/knative.dev/serving/AUTHORS @@ -8,3 +8,4 @@ Pivotal Software, Inc. IBM Corp Red Hat, Inc. Cisco Systems, Inc. +VMware, Inc. diff --git a/test/vendor/knative.dev/serving/CONTRIBUTING.md b/test/vendor/knative.dev/serving/CONTRIBUTING.md new file mode 100644 index 0000000000..eaf329d9ef --- /dev/null +++ b/test/vendor/knative.dev/serving/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contribution guidelines + +So you want to hack on Knative Serving? Yay! Please refer to Knative's overall +[contribution guidelines](https://www.knative.dev/contributing/) to find out how +you can help. diff --git a/test/vendor/knative.dev/serving/DEVELOPMENT.md b/test/vendor/knative.dev/serving/DEVELOPMENT.md new file mode 100644 index 0000000000..79e1082243 --- /dev/null +++ b/test/vendor/knative.dev/serving/DEVELOPMENT.md @@ -0,0 +1,305 @@ +# Development + +This doc explains how to setup a development environment so you can get started +[contributing](https://www.knative.dev/contributing/) to `Knative Serving`. Also +take a look at: + +- [The pull request workflow](https://www.knative.dev/contributing/contributing/#pull-requests) +- [How to add and run tests](./test/README.md) +- [Iterating](#iterating) + +## Prerequisites + +Follow the instructions below to set up your development environment. Once you +meet these requirements, you can make changes and +[deploy your own version of Knative Serving](#starting-knative-serving)! + +Before submitting a PR, see also [CONTRIBUTING.md](./CONTRIBUTING.md). + +### Sign up for GitHub + +Start by creating [a GitHub account](https://github.com/join), then setup +[GitHub access via SSH](https://help.github.com/articles/connecting-to-github-with-ssh/). + +### Install requirements + +You must install these tools: + +1. [`go`](https://golang.org/doc/install): The language `Knative Serving` is + built in (1.13 or later) +1. [`git`](https://help.github.com/articles/set-up-git/): For source control +1. [`dep`](https://github.com/golang/dep): For managing external Go + dependencies. +1. [`ko`](https://github.com/google/ko): For development. +1. [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/): For + managing development environments. + +### Create a cluster and a repo + +1. [Set up a kubernetes cluster](https://www.knative.dev/docs/install/) + - Follow an install guide up through "Creating a Kubernetes Cluster" + - You do _not_ need to install Istio or Knative using the instructions in the + guide. Simply create the cluster and come back here. + - If you _did_ install Istio/Knative following those instructions, that's + fine too, you'll just redeploy over them, below. +1. Set up a docker repository for pushing images. You can use any container + image registry by adjusting the authentication methods and repository paths + mentioned in the sections below. + - [Google Container Registry quickstart](https://cloud.google.com/container-registry/docs/pushing-and-pulling) + - [Docker Hub quickstart](https://docs.docker.com/docker-hub/) + +**Note**: You'll need to be authenticated with your `KO_DOCKER_REPO` before +pushing images. Run `gcloud auth configure-docker` if you are using Google +Container Registry or `docker login` if you are using Docker Hub. + +### Setup your environment + +To start your environment you'll need to set these environment variables (we +recommend adding them to your `.bashrc`): + +1. `GOPATH`: If you don't have one, simply pick a directory and add + `export GOPATH=...` +1. `$GOPATH/bin` on `PATH`: This is so that tooling installed via `go get` will + work properly. +1. `KO_DOCKER_REPO`: The docker repository to which developer images should be + pushed (e.g. `gcr.io/[gcloud-project]`). + +- **Note**: if you are using docker hub to store your images your + `KO_DOCKER_REPO` variable should be `docker.io/`. +- **Note**: Currently Docker Hub doesn't let you create subdirs under your + username. + +`.bashrc` example: + +```shell +export GOPATH="$HOME/go" +export PATH="${PATH}:${GOPATH}/bin" +export KO_DOCKER_REPO='gcr.io/my-gcloud-project-id' +``` + +### Checkout your fork + +The Go tools require that you clone the repository to the +`src/knative.dev/serving` directory in your +[`GOPATH`](https://github.com/golang/go/wiki/SettingGOPATH). + +To check out this repository: + +1. Create your own + [fork of this repo](https://help.github.com/articles/fork-a-repo/) +1. Clone it to your machine: + +```shell +mkdir -p ${GOPATH}/src/knative.dev +cd ${GOPATH}/src/knative.dev +git clone git@github.com:${YOUR_GITHUB_USERNAME}/serving.git +cd serving +git remote add upstream git@github.com:knative/serving.git +git remote set-url --push upstream no_push +``` + +_Adding the `upstream` remote sets you up nicely for regularly +[syncing your fork](https://help.github.com/articles/syncing-a-fork/)._ + +Once you reach this point you are ready to do a full build and deploy as +described below. + +## Starting Knative Serving + +Once you've [setup your development environment](#prerequisites), stand up +`Knative Serving`. Note that if you already installed Knative to your cluster, +redeploying the new version should work fine, but if you run into trouble, you +can easily [clean your cluster up](#clean-up) and try again. + +### Setup cluster admin + +Your user must be a cluster admin to perform the setup needed for Knative. + +The value you use depends on +[your cluster setup](https://www.knative.dev/docs/install/): when using Minikube +or Kubernetes on Docker Desktop, the user is your local user; when using GKE, +the user is your GCP user. + +```shell +# For GCP +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user=$(gcloud config get-value core/account) + +# For minikube or Kubernetes on Docker Desktop +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user=$USER +``` + +### Resource allocation for Kubernetes + +Please allocate sufficient resources for Kubernetes, especially when you run a +Kubernetes cluster on your local machine. We recommend allocating at least 6 +CPUs and 8G memory assuming a single node Kubernetes installation, and +allocating at least 4 CPUs and 8G memory for each node assuming a 3-node +Kubernetes installation. Please go back to +[your cluster setup](https://www.knative.dev/docs/install/) to reconfigure your +Kubernetes cluster in your designated environment, if necessary. + +### Deploy Istio + +```shell +kubectl apply -f ./third_party/istio-1.3-latest/istio-crds.yaml +while [[ $(kubectl get crd gateways.networking.istio.io -o jsonpath='{.status.conditions[?(@.type=="Established")].status}') != 'True' ]]; do + echo "Waiting on Istio CRDs"; sleep 1 +done +kubectl apply -f ./third_party/istio-1.3-latest/istio-minimal.yaml +``` + +Follow the +[instructions](https://www.knative.dev/docs/serving/gke-assigning-static-ip-address/) +if you need to set up static IP for Ingresses in the cluster. + +If you want to adopt preinstalled Istio, please check whether +the `cluster-local-gateway` Service is deployed in namespace `istio-system` or not +(you can check by running `kubectl get service cluster-local-gateway -n istio-system`). If it's not +installed, please install it with following command. You could also adjust +parameters if needed. + +```shell +kubectl apply -f ./third_party/istio-1.3-latest/istio-knative-extras.yaml +``` + +> If you want to customize the `istio*.yaml` files you can refer to +> `third_party/istio--latest/download-istio.sh` how these templates +> were generated. + +### Deploy cert-manager + +1. Deploy `cert-manager` CRDs + + ```shell + kubectl apply -f ./third_party/cert-manager-0.9.1/cert-manager-crds.yaml + while [[ $(kubectl get crd certificates.certmanager.k8s.io -o jsonpath='{.status.conditions[?(@.type=="Established")].status}') != 'True' ]]; do + echo "Waiting on Cert-Manager CRDs"; sleep 1 + done + ``` + +1. Deploy `cert-manager` + + If you want to use the feature of automatically provisioning TLS for Knative + services, you need to install the full cert-manager. + + ```shell + # For kubernetes version 1.13 or above, --validate=false is not needed. + kubectl apply -f ./third_party/cert-manager-0.9.1/cert-manager.yaml --validate=false + ``` + +### Deploy Knative Serving + +This step includes building Knative Serving, creating and pushing developer +images and deploying them to your Kubernetes cluster. + +Run: + +```shell +ko apply -f config/ + +# Optional steps + +# Run post-install job to setup nice XIP.IO domain name. This only works +# if your Kubernetes LoadBalancer has an IPv4 address. +ko delete -f config/post-install --ignore-not-found +ko apply -f config/post-install +``` + +The above step is equivalent to applying the `serving.yaml` for released +versions of Knative Serving. + +You can see things running with: + +```console +kubectl -n knative-serving get pods +NAME READY STATUS RESTARTS AGE +activator-5b87795885-f8t7k 2/2 Running 0 18m +autoscaler-6495f7f79d-86jsr 2/2 Running 0 18m +controller-5fd7fddc58-klmt4 1/1 Running 0 18m +default-domain-6hs98 0/1 Completed 0 13s +networking-istio-6755db495d-wtj4d 1/1 Running 0 18m +webhook-84b8c9886d-dsqqv 1/1 Running 0 18m +``` + +You can access the Knative Serving Controller's logs with: + +```shell +kubectl -n knative-serving logs $(kubectl -n knative-serving get pods -l app=controller -o name) +``` + +If you're using a GCP project to host your Kubernetes cluster, it's good to +check the +[Discovery & load balancing](http://console.developers.google.com/kubernetes/discovery) +page to ensure that all services are up and running (and not blocked by a quota +issue, for example). + +### Install logging and monitoring backends + +Run: + +```shell +kubectl apply -R -f config/monitoring/100-namespace.yaml \ + -f third_party/config/monitoring/logging/elasticsearch \ + -f config/monitoring/logging/elasticsearch \ + -f third_party/config/monitoring/metrics/prometheus \ + -f config/monitoring/metrics/prometheus \ + -f config/monitoring/tracing/zipkin +``` + +## Iterating + +As you make changes to the code-base, there are two special cases to be aware +of: + +- **If you change an input to generated code**, then you must run + [`./hack/update-codegen.sh`](./hack/update-codegen.sh). Inputs include: + + - API type definitions in + [pkg/apis/serving/v1alpha1/](./pkg/apis/serving/v1alpha1/.), + - Types definitions annotated with `// +k8s:deepcopy-gen=true`. + +- **If you change a package's deps** (including adding external dep), then you + must run [`./hack/update-deps.sh`](./hack/update-deps.sh). + +These are both idempotent, and we expect that running these at `HEAD` to have no +diffs. Code generation and dependencies are automatically checked to produce no +diffs for each pull request. + +update-deps.sh runs "dep ensure" command. In some cases, if newer dependencies +are required, you need to run "dep ensure -update package-name" manually. + +Once the codegen and dependency information is correct, redeploying the +controller is simply: + +```shell +ko apply -f config/controller.yaml +``` + +Or you can [clean it up completely](./DEVELOPMENT.md#clean-up) and +[completely redeploy `Knative Serving`](./DEVELOPMENT.md#starting-knative-serving). + +## Clean up + +You can delete all of the service components with: + +```shell +ko delete --ignore-not-found=true \ + -f config/monitoring/100-namespace.yaml \ + -f config/ \ + -f ./third_party/istio-1.3-latest/istio-minimal.yaml \ + -f ./third_party/istio-1.3-latest/istio-crds.yaml \ + -f ./third_party/cert-manager-0.9.1/cert-manager-crds.yaml \ + -f ./third_party/cert-manager-0.9.1/cert-manager.yaml +``` + +## Telemetry + +To access Telemetry see: + +- [Accessing Metrics](https://www.knative.dev/docs/serving/accessing-metrics/) +- [Accessing Logs](https://www.knative.dev/docs/serving/accessing-logs/) +- [Accessing Traces](https://www.knative.dev/docs/serving/accessing-traces/) diff --git a/test/vendor/knative.dev/serving/Gopkg.lock b/test/vendor/knative.dev/serving/Gopkg.lock new file mode 100644 index 0000000000..1f6fa6e66b --- /dev/null +++ b/test/vendor/knative.dev/serving/Gopkg.lock @@ -0,0 +1,2121 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:4f2265c5832962455472837497a7e477cf488202605f5010a2ee3e6817a3f821" + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "container/apiv1", + "iam", + "internal", + "internal/optional", + "internal/trace", + "internal/version", + "monitoring/apiv3", + "storage", + "trace/apiv2", + ] + pruneopts = "NUT" + revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430" + version = "v0.34.0" + +[[projects]] + digest = "1:0a936e17f4fc0c65615913199ce0a387a67a4c72e017747b6f38c898168b1b75" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "NUT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:642cf8e80572f9dc0677b0f241c8ab2e715c9dccc215270ea873c86ddca0062c" + name = "contrib.go.opencensus.io/exporter/prometheus" + packages = ["."] + pruneopts = "NUT" + revision = "f4a2c1e53ec45636355d35fb9022b64e4bdd4a91" + version = "v0.1.0" + +[[projects]] + digest = "1:c3fd5ddaad733530174bba5dd787d98a45d181851a95a0b7362be7bce7144f56" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = [ + ".", + "monitoredresource", + ] + pruneopts = "NUT" + revision = "59d068f8d8ff5b653916aa30cdc4e13c7f15d56e" + +[[projects]] + digest = "1:7b5f423f5b0dd3dfa32a19a6183b0ab9129bff371ebf3f9efae32f87e4986d8f" + name = "contrib.go.opencensus.io/exporter/zipkin" + packages = ["."] + pruneopts = "NUT" + revision = "30f9fad5db2c8944c21d223496e2543aeb445d4c" + version = "v0.1.1" + +[[projects]] + digest = "1:042dd95511f495f6e7c0ad6a9398b8c5c77bf940efa6e568cbac9f818401f36a" + name = "github.com/Azure/azure-sdk-for-go" + packages = ["arm/containerregistry"] + pruneopts = "NUT" + revision = "2d1d76c9013c4feb6695a2346f0e66ea0ef77aa6" + version = "v11.3.0-beta" + +[[projects]] + digest = "1:90df11ad9349a69d46e08211d47eb8db80311bf985a447dd88cb30d5b5f54add" + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/date", + "autorest/to", + "autorest/validation", + "logger", + "version", + ] + pruneopts = "NUT" + revision = "1ffcc8896ef6dfe022d90a4317d866f925cf0f9e" + version = "v11.1.2" + +[[projects]] + digest = "1:632e5f740de6cafce1c08f659bfa10e75f7e7938dfaeb3d99c82fcbf484484e2" + name = "github.com/NYTimes/gziphandler" + packages = ["."] + pruneopts = "NUT" + revision = "dd0439581c7657cb652dfe5c71d7d48baf39541d" + version = "v1.1.1" + +[[projects]] + digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6" + name = "github.com/PuerkitoBio/purell" + packages = ["."] + pruneopts = "NUT" + revision = "44968752391892e1b0d0b821ee79e9a85fa13049" + version = "v1.1.1" + +[[projects]] + branch = "master" + digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" + name = "github.com/PuerkitoBio/urlesc" + packages = ["."] + pruneopts = "NUT" + revision = "de5bf2ad457846296e2031421a34e2568e304e35" + +[[projects]] + digest = "1:de17997fdba5b694eb3528204adba0f089ebd2ceef54a346ad2313e6ca138df5" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/json/jsonutil", + "private/protocol/jsonrpc", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/ecr", + "service/sts", + ] + pruneopts = "NUT" + revision = "c4e0e593d5f18b478f17ff0e86b56d41a70ced2d" + version = "v1.16.2" + +[[projects]] + branch = "master" + digest = "1:cb0535f5823b47df7dcb9768ebb6c000b79ad115472910c70efe93c9ed9b2315" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NUT" + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + +[[projects]] + digest = "1:45c41cd27a8d986998680bfc86da0bbff5fa4f90d0f446c00636c8b099028ffe" + name = "github.com/blang/semver" + packages = ["."] + pruneopts = "NUT" + revision = "ba2c2ddd89069b46a7011d4106f6868f17ee1705" + version = "v3.6.1" + +[[projects]] + branch = "master" + digest = "1:2755397479777565ae0c383f63be30e8975da63805281283bdde46d73bed9a7d" + name = "github.com/c2h5oh/datasize" + packages = ["."] + pruneopts = "NUT" + revision = "4eba002a5eaea69cf8d235a388fc6b65ae68d2dd" + +[[projects]] + digest = "1:fdb4ed936abeecb46a8c27dcac83f75c05c87a46d9ec7711411eb785c213fa02" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "NUT" + revision = "a105b96453fe85139acc07b68de48f2cbdd71249" + version = "v0.2.0" + +[[projects]] + digest = "1:3cfafea3a01d537771da31732b081c357f02b92a420efc03507b2aff4d96f046" + name = "github.com/coreos/etcd" + packages = [ + "auth/authpb", + "clientv3", + "etcdserver/api/v3rpc/rpctypes", + "etcdserver/etcdserverpb", + "mvcc/mvccpb", + "pkg/tlsutil", + "pkg/transport", + "pkg/types", + ] + pruneopts = "NUT" + revision = "98d308426819d892e149fe45f6fd542464cb1f9d" + version = "v3.3.13" + +[[projects]] + digest = "1:1da3a221f0bc090792d3a2a080ff09008427c0e0f0533a4ed6abd8994421da73" + name = "github.com/coreos/go-systemd" + packages = ["daemon"] + pruneopts = "NUT" + revision = "95778dfbb74eb7e4dbaf43bf7d71809650ef8076" + version = "v19" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "NUT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "NUT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:740dc3cccfcdb302c323d17da5f4f2dfa65f7b8c666e0a9ac8bc64f560fb2974" + name = "github.com/docker/docker" + packages = [ + "api/types", + "api/types/blkiodev", + "api/types/container", + "api/types/filters", + "api/types/mount", + "api/types/network", + "api/types/registry", + "api/types/strslice", + "api/types/swarm", + "api/types/versions", + ] + pruneopts = "NUT" + revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" + version = "v1.13.1" + +[[projects]] + digest = "1:ade935c55cd6d0367c843b109b09c9d748b1982952031414740750fdf94747eb" + name = "github.com/docker/go-connections" + packages = ["nat"] + pruneopts = "NUT" + revision = "7395e3f8aa162843a74ed6d48e79627d9792ac55" + version = "v0.4.0" + +[[projects]] + digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482" + name = "github.com/docker/go-units" + packages = ["."] + pruneopts = "NUT" + revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" + version = "v0.3.3" + +[[projects]] + digest = "1:4f48083a56b5ac1f38cfbd8c0ccc19273079c97f9e4967a63f130c4f4b8bcfc5" + name = "github.com/emicklei/go-restful" + packages = [ + ".", + "log", + ] + pruneopts = "NUT" + revision = "b9bbc5664f49b6deec52393bd68f39830687a347" + version = "v2.9.3" + +[[projects]] + digest = "1:27e00683ae7700cf7b286011dcd6ace672d542d236590dcc51bd10669cfb3366" + name = "github.com/emicklei/go-restful-swagger12" + packages = ["."] + pruneopts = "NUT" + revision = "dcef7f55730566d41eae5db10e7d6981829720f6" + version = "1.0.1" + +[[projects]] + digest = "1:db115eee0ae265dab922fecbb7966c3fcdc4eae3c2e9caae302fdcba3ba422c7" + name = "github.com/evanphx/json-patch" + packages = ["."] + pruneopts = "NUT" + revision = "5858425f75500d40c52783dce87d085a483ce135" + +[[projects]] + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" + name = "github.com/ghodss/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" + version = "v1.0.0" + +[[projects]] + digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" + name = "github.com/go-openapi/jsonpointer" + packages = ["."] + pruneopts = "NUT" + revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" + version = "v0.19.0" + +[[projects]] + digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" + name = "github.com/go-openapi/jsonreference" + packages = ["."] + pruneopts = "NUT" + revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" + version = "v0.19.0" + +[[projects]] + digest = "1:8f80caf2fa31f78a035f33981c9685013033073b53f344f579e60fa69f0c6670" + name = "github.com/go-openapi/spec" + packages = ["."] + pruneopts = "NUT" + revision = "53d776530bf78a11b03a7b52dd8a083086b045e5" + version = "v0.19.0" + +[[projects]] + digest = "1:076ebf43e6e70f18ef9d079a685ede59a0f4dc9247256c209cf57407f959cef9" + name = "github.com/go-openapi/swag" + packages = ["."] + pruneopts = "NUT" + revision = "b3e2804c8535ee0d1b89320afd98474d5b8e9e3b" + version = "v0.19.0" + +[[projects]] + digest = "1:acaf854c7e302a7e13a665d00e6579fb72ead2ecd4932286deeac94ad3070fe7" + name = "github.com/gobuffalo/envy" + packages = ["."] + pruneopts = "NUT" + revision = "047ecc927cd0b7d27bab83eb948e120fb7d1ea68" + version = "v1.6.5" + +[[projects]] + digest = "1:b38ab1b8cf42d089d9f31d4918c57c2dd47d42f3e7fc50e12142140228cf81a9" + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "jsonpb", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types", + ] + pruneopts = "NUT" + revision = "5628607bb4c51c3157aacc3a50f0ab707582b805" + version = "v1.3.1" + +[[projects]] + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "NUT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + branch = "master" + digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "NUT" + revision = "84a468cf14b4376def5d68c722b139b881c450a4" + +[[projects]] + digest = "1:a677057cef8b68b66003c2775ed1126bbd7e9e372b54b7c1a7c5201a2f1f3eb0" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/empty", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "NUT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:010d46ea3c1e730897e53058d1013a963f3f987675dda87df64f891b945281db" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/cmpopts", + "cmp/internal/diff", + "cmp/internal/flags", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "NUT" + revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b" + +[[projects]] + digest = "1:244554add7692579a59f70199e2a09b217c21a328279a71b05ecf261b32aaf03" + name = "github.com/google/go-containerregistry" + packages = [ + "pkg/authn", + "pkg/authn/k8schain", + "pkg/internal/retry", + "pkg/internal/retry/wait", + "pkg/logs", + "pkg/name", + "pkg/v1", + "pkg/v1/empty", + "pkg/v1/mutate", + "pkg/v1/partial", + "pkg/v1/random", + "pkg/v1/remote", + "pkg/v1/remote/transport", + "pkg/v1/stream", + "pkg/v1/tarball", + "pkg/v1/types", + "pkg/v1/v1util", + ] + pruneopts = "NUT" + revision = "b02d448a3705facf11018efff34f1d2830be5724" + +[[projects]] + digest = "1:91099c6f78b1e7bdf9ed06eb4cb7f017174293a3689d76b995a06f4c8d64a7f0" + name = "github.com/google/go-github" + packages = ["github"] + pruneopts = "NUT" + revision = "9686ff0746200cf521ce225525b421e13b4eac1a" + version = "v28.1.1" + +[[projects]] + digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690" + name = "github.com/google/go-querystring" + packages = ["query"] + pruneopts = "NUT" + revision = "44c6ddd0a2342c386950e880b658017258da92fc" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "NUT" + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + branch = "master" + digest = "1:0d5e3798bfa2642ac268341c96710b8def1f3cbc3bc803c421d90704d72107d8" + name = "github.com/google/licenseclassifier" + packages = [ + ".", + "internal/sets", + "stringclassifier", + "stringclassifier/internal/pq", + "stringclassifier/searchset", + "stringclassifier/searchset/tokenizer", + ] + pruneopts = "NUT" + revision = "e979a0b10eebe748549c702a25e997c556349da6" + +[[projects]] + digest = "1:d7736b4372fa7b6d447118aca5a9a6d90a61d4e0d467316ac631e4ba58458ae4" + name = "github.com/google/mako" + packages = [ + "clients/proto/analyzers/threshold_analyzer_go_proto", + "clients/proto/analyzers/utest_analyzer_go_proto", + "clients/proto/analyzers/window_deviation_go_proto", + "go/quickstore", + "internal/go/common", + "internal/quickstore_microservice/proto/quickstore_go_proto", + "proto/quickstore/quickstore_go_proto", + "spec/proto/mako_go_proto", + ] + pruneopts = "NUT" + revision = "122f8dcef9e3906310e7dba05849cedb5be43b24" + version = "0.1.0" + +[[projects]] + digest = "1:ab3ec1fe3e39bac4b3ab63390767766622be35b7cab03f47f787f9ec60522a53" + name = "github.com/google/uuid" + packages = ["."] + pruneopts = "NUT" + revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" + version = "v1.1.1" + +[[projects]] + digest = "1:5cfcdb1646a889af81a01e9a83c7ccbb45e9b14c314eaf17552459d2b797e557" + name = "github.com/googleapis/gax-go" + packages = [ + ".", + "v2", + ] + pruneopts = "NUT" + revision = "b001040cd31805261cbd978842099e326dfa857b" + version = "v2.0.2" + +[[projects]] + digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "NUT" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" + +[[projects]] + digest = "1:4a0c072e44da763409da72d41492373a034baf2e6d849c76d239b4abdfbb6c49" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "NUT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + +[[projects]] + digest = "1:5872c7f130f62fc34bfda20babad36be6309c00b5c9207717f7cd2a51536fff4" + name = "github.com/grpc-ecosystem/go-grpc-prometheus" + packages = ["."] + pruneopts = "NUT" + revision = "c225b8c3b01faf2899099b768856a9e916e5087b" + version = "v1.2.0" + +[[projects]] + digest = "1:e9c0bf861ea69dd6de1ff7ac2a0485e84743e163853ca973177fba7a50d1b769" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "NUT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + branch = "master" + digest = "1:892e13370cbfcda090d8f7676ef67b50cb2ead5460b72f3a1c2bb1c19e9a57de" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "NUT" + revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" + +[[projects]] + digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NUT" + revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" + version = "v0.3.6" + +[[projects]] + branch = "master" + digest = "1:aa0434674a14402891f31f393b4dd53792bc67eee05158748d999096a8826ed3" + name = "github.com/influxdata/tdigest" + packages = ["."] + pruneopts = "NUT" + revision = "bf2b5ad3c0a925c44a0d2842c5d8182113cd248e" + +[[projects]] + digest = "1:1e59759b895f57302df03c5632dc6bbedcdf6782a2b6259246ff653dd214ac45" + name = "github.com/jetstack/cert-manager" + packages = [ + "pkg/apis/acme", + "pkg/apis/acme/v1alpha2", + "pkg/apis/certmanager", + "pkg/apis/certmanager/v1alpha2", + "pkg/apis/meta", + "pkg/apis/meta/v1", + ] + pruneopts = "NUT" + revision = "" + version = "v0.12.0" + +[[projects]] + digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "NUT" + revision = "c2b33e84" + +[[projects]] + digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a" + name = "github.com/joho/godotenv" + packages = ["."] + pruneopts = "NUT" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + +[[projects]] + digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "NUT" + revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" + +[[projects]] + digest = "1:08c58ac78a8c1f61e9a96350066d30fe194b8779799bd932a79932a5166a173f" + name = "github.com/kelseyhightower/envconfig" + packages = ["."] + pruneopts = "NUT" + revision = "0b417c4ec4a8a82eecc22a1459a504aa55163d61" + version = "v1.4.0" + +[[projects]] + digest = "1:9cf7e8e00acb7270c993e3116b6841a1c885374e81e7b1d4e62b13ee1e62c2d2" + name = "github.com/kubernetes-incubator/custom-metrics-apiserver" + packages = [ + "pkg/apiserver", + "pkg/apiserver/endpoints/handlers", + "pkg/apiserver/installer", + "pkg/apiserver/registry/rest", + "pkg/cmd", + "pkg/cmd/server", + "pkg/dynamicmapper", + "pkg/provider", + "pkg/registry/custom_metrics", + "pkg/registry/external_metrics", + ] + pruneopts = "NUT" + revision = "3d9be26a50eb64531fc40eb31a5f3e6720956dc6" + +[[projects]] + branch = "master" + digest = "1:06268133e9584afdd4ec94092076965820dbd23ecbe3b1c19bb156e501a14972" + name = "github.com/mailru/easyjson" + packages = [ + ".", + "buffer", + "jlexer", + "jwriter", + ] + pruneopts = "NUT" + revision = "1ea4449da9834f4d333f1cc461c374aea217d249" + +[[projects]] + digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" + name = "github.com/markbates/inflect" + packages = ["."] + pruneopts = "NUT" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" + +[[projects]] + branch = "master" + digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5" + name = "github.com/mattbaird/jsonpatch" + packages = ["."] + pruneopts = "NUT" + revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" + +[[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NUT" + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "NUT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "NUT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + branch = "master" + digest = "1:1604410fccff1fa79d0978bf9271d52d9ea1c8c06618d5c624dabba9cfe9cc83" + name = "github.com/munnerz/goautoneg" + packages = ["."] + pruneopts = "NUT" + revision = "2ae31c8b6b30d2f4c8100c20d527b571e9c433bb" + +[[projects]] + digest = "1:ba4312bc8a900105f0c5b85d23b289d594dc88af6608ce971c8696435f43ae85" + name = "github.com/openzipkin/zipkin-go" + packages = [ + ".", + "idgenerator", + "model", + "propagation", + "reporter", + "reporter/http", + "reporter/recorder", + ] + pruneopts = "NUT" + revision = "c29478e51bfb2e9c93e0e9f5e015e5993a490399" + version = "v0.2.2" + +[[projects]] + digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" + name = "github.com/pborman/uuid" + packages = ["."] + pruneopts = "NUT" + revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" + version = "v1.2" + +[[projects]] + digest = "1:e411054f54a332ecadd5fe5a4a68d092f36583e554037587da86da298cffc72d" + name = "github.com/prometheus/client_golang" + packages = [ + "api", + "api/prometheus/v1", + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "NUT" + revision = "505eaef017263e299324067d40ca2c48f6a2cf50" + version = "v0.9.2" + +[[projects]] + branch = "master" + digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NUT" + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + digest = "1:4a6bb73eb5d7b8e67f25d22d87c84116cc69dfa80299dfd06622b8eee47917a8" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NUT" + revision = "89604d197083d4781071d3c65855d24ecfb0a563" + +[[projects]] + branch = "master" + digest = "1:b2f81139ed70ba4358171bc3024a5f2a3a9d2148ccc3fff03e3a011afe9b7543" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "NUT" + revision = "282c8707aa210456a825798969cc27edda34992a" + +[[projects]] + digest = "1:7008ec21a0ce773bd21deefdb7d17fab8cb9f1ae0bc44026cdd2b658ab1582ca" + name = "github.com/rogpeppe/go-internal" + packages = ["semver"] + pruneopts = "NUT" + revision = "438578804ca6f31be148c27683afc419ce47c06e" + version = "v1.3.0" + +[[projects]] + digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + pruneopts = "NUT" + revision = "1744e2970ca51c86172c8190fadad617561ed6e7" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:68cecd4c1137f77dfa2db1f13a0ceef2b5606d6b69d66fa69f7df1b2f2fe9afc" + name = "github.com/shurcooL/httpfs" + packages = ["vfsutil"] + pruneopts = "NUT" + revision = "6a4d4a70508bfeac7afc814c5e4345e1c640b6b2" + +[[projects]] + branch = "master" + digest = "1:9ad746e784d9616f68a0ea56566a01b23dff758b8c864eaf3e69dfe86d6c3c89" + name = "github.com/shurcooL/vfsgen" + packages = ["."] + pruneopts = "NUT" + revision = "6a9ea43bcacdf716a5c1b38efff722c07adf0069" + +[[projects]] + digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NUT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + branch = "master" + digest = "1:a889ff8426d462ae8b9a91ef29eb1bcd0d9a9ad792c55f9f32f6f912d57b9b8c" + name = "github.com/tsenart/go-tsz" + packages = [ + ".", + "testdata", + ] + pruneopts = "NUT" + revision = "0bd30b3df1c328dfc87ed857cdf695c3c7d2cb09" + +[[projects]] + branch = "master" + digest = "1:d6264baf1274b5857bc7f36eeb1a5772bf90727cc828008876e0f3a68d72ecfa" + name = "github.com/tsenart/vegeta" + packages = [ + ".", + "internal/resolver", + "lib", + "lib/lttb", + "lib/plot", + ] + pruneopts = "NUT" + revision = "ab06ddb56e2f6097bba8c5a6d168621088867949" + +[[projects]] + digest = "1:0e3fd52087079d1289983e4fef32268ca965973f5370b69204e2934185527baa" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricexport", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "resource", + "resource/resourcekeys", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "NUT" + revision = "9c377598961b706d1542bd2d84d538b5094d596e" + version = "v0.22.0" + +[[projects]] + digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "NUT" + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" + +[[projects]] + digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "NUT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:5ab79d2a36037de1ab2908733a2cab0c08b12f6956e3e1eab07cd1b2abf7b903" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "internal/ztest", + "zapcore", + "zaptest", + ] + pruneopts = "NUT" + revision = "67bc79d13d155c02fd008f721863ff8cc5f30659" + +[[projects]] + branch = "master" + digest = "1:a140dd5e8f7d56830f1d4e32f721bdcc63c39f83277d111b02e1c3643e119400" + name = "golang.org/x/crypto" + packages = [ + "cast5", + "cryptobyte", + "cryptobyte/asn1", + "nacl/secretbox", + "openpgp", + "openpgp/armor", + "openpgp/elgamal", + "openpgp/errors", + "openpgp/packet", + "openpgp/s2k", + "pkcs12", + "pkcs12/internal/rc2", + "poly1305", + "salsa20/salsa", + "ssh/terminal", + ] + pruneopts = "NUT" + revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac" + +[[projects]] + branch = "master" + digest = "1:9d8a7d1a24150e97da9aa3d5cac964615297357a311837421bb6180f36ab6fdc" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/h2c", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + "websocket", + ] + pruneopts = "NUT" + revision = "aaf60122140d3fcf75376d319f0554393160eb50" + +[[projects]] + branch = "master" + digest = "1:dcb89c032286a9c3c5118a1496f8e0e237c1437f5356ac9602f6fdef560a5c21" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "NUT" + revision = "c57b0facaced709681d9f90397429b9430a74754" + +[[projects]] + branch = "master" + digest = "1:c313aef534e493304f3666fbd24dca5932ebf776a82b7a40f961c9355794a1b1" + name = "golang.org/x/sync" + packages = [ + "errgroup", + "semaphore", + ] + pruneopts = "NUT" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + digest = "1:c84eea70554fcd678159b8c088e12bec7255d43f1c25183455cef7198565fcdd" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "NUT" + revision = "8469e314837c2e2471561de5c47bbf8bfd0d9099" + +[[projects]] + branch = "master" + digest = "1:e33513a825fcd765e97b5de639a2f7547542d1a8245df0cef18e1fd390b778a9" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + "width", + ] + pruneopts = "NUT" + revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" + +[[projects]] + branch = "master" + digest = "1:51a479a09b7ed06b7be5a854e27fcc328718ae0e5ad159f9ddeef12d0326c2e7" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "NUT" + revision = "26559e0f760e39c24d730d3224364aef164ee23f" + +[[projects]] + branch = "master" + digest = "1:2c57a52b1792fad8b668be56c76a9a4959f7b665b8474cfa188c652ef1a2b82d" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports", + ] + pruneopts = "NUT" + revision = "fbec762f837dc349b73d1eaa820552e2ad177942" + +[[projects]] + branch = "master" + digest = "1:316acdd3d33465ea10eb1a5a10d4de4918f0a2373f87233d6dee80e8aba14c91" + name = "google.golang.org/api" + packages = [ + "container/v1beta1", + "googleapi", + "googleapi/transport", + "internal", + "internal/gensupport", + "internal/third_party/uritemplates", + "iterator", + "option", + "storage/v1", + "support/bundler", + "transport", + "transport/grpc", + "transport/http", + "transport/http/internal/propagation", + ] + pruneopts = "NUT" + revision = "997ecb25550d593f7f4c1ebcc4ba33f50efc6a4b" + +[[projects]] + digest = "1:b400989fa9ecf8f825e9092d41c9779444afeda5d5ce7f5c9d4a9ca9d05c949c" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/socket", + "internal/urlfetch", + "socket", + "urlfetch", + ] + pruneopts = "NUT" + revision = "4a4468ece617fc8205e99368fa2200e9d1fad421" + version = "v1.3.0" + +[[projects]] + digest = "1:0a17eda92a03c3e0843832503aae4610abf8d8ed4c3a7adfc72015cf6b688502" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api", + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/httpbody", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/container/v1", + "googleapis/devtools/cloudtrace/v2", + "googleapis/iam/v1", + "googleapis/monitoring/v3", + "googleapis/rpc/code", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "NUT" + revision = "357c62f0e4bbba7e6cc403ae09edcf3e2b9028fe" + +[[projects]] + digest = "1:480952990437229d2348e7fd7f32897bfc2da15a769e954f307ff861040124f9" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/grpclb", + "balancer/grpclb/grpc_lb_v1", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/alts", + "credentials/alts/internal", + "credentials/alts/internal/authinfo", + "credentials/alts/internal/conn", + "credentials/alts/internal/handshaker", + "credentials/alts/internal/handshaker/service", + "credentials/alts/internal/proto/grpc_gcp", + "credentials/google", + "credentials/internal", + "credentials/oauth", + "encoding", + "encoding/proto", + "grpclog", + "health/grpc_health_v1", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "NUT" + revision = "f6d0f9ee430895e87ef1ceb5ac8f39725bafceef" + version = "v1.24.0" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "NUT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:80c34337e8a734e190f2d1b716cae774cca74db98315166f92074434e9af0227" + name = "gopkg.in/natefinch/lumberjack.v2" + packages = ["."] + pruneopts = "NUT" + revision = "a96e63847dc3c67d17befa69c303767e2f84e54f" + version = "v2.1" + +[[projects]] + digest = "1:accc3bfe4e404aa53ac3621470e7cf9fce1efe48f0fabcfe6d12a72579d9d91f" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NUT" + revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5" + version = "v2.2.4" + +[[projects]] + digest = "1:9579dd8b1ccd8cf54ed576af68438b69588cc136eac90b10aa7c8d1a1c8334ff" + name = "istio.io/api" + packages = ["networking/v1alpha3"] + pruneopts = "NUT" + revision = "e1a1952e5b81ed914b06c1814fd3a0c92c750e37" + version = "1.4.1" + +[[projects]] + digest = "1:57e8c0255e33ab72e09219665d5ed7733a694e0092ab4a69b97100aa27bcbbb4" + name = "istio.io/client-go" + packages = ["pkg/apis/networking/v1alpha3"] + pruneopts = "NUT" + revision = "26c62a04cdbc1fb52dd71a9194b0d49ce297e836" + version = "1.4.1" + +[[projects]] + digest = "1:9c9f11af1b1f2ae03d897ba5de27103ec1c9c43605663ccdef67831d6a462a30" + name = "istio.io/gogo-genproto" + packages = ["googleapis/google/api"] + pruneopts = "NUT" + revision = "f7d19ec0141d49ac9efc83b5e61fa81ba103b445" + version = "1.4.0-beta.4" + +[[projects]] + digest = "1:9b3e533ee96d9d3c0090385d65de0d507243ae5e32be4e623829b68e37f2aaad" + name = "k8s.io/api" + packages = [ + "admission/v1", + "admission/v1beta1", + "admissionregistration/v1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "auditregistration/v1alpha1", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1", + "coordination/v1beta1", + "core/v1", + "discovery/v1alpha1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "networking/v1beta1", + "node/v1alpha1", + "node/v1beta1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "NUT" + revision = "2a24ef1ce092b529249c747df24d66a55fe97ad0" + version = "kubernetes-1.16.4" + +[[projects]] + digest = "1:636417ff03ed51f0045e4a65b563a3cc338db010e910e33617872a70bc5a4983" + name = "k8s.io/apiextensions-apiserver" + packages = [ + "pkg/apis/apiextensions", + "pkg/apis/apiextensions/v1beta1", + ] + pruneopts = "NUT" + revision = "bbf2aa1c9d0f69788f9cbd29ad5520574f91133c" + version = "kubernetes-1.16.4" + +[[projects]] + digest = "1:bfbb9cb6545607642906234410d39b7bae81812f360d7649d16bebdacaede4cf" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/equality", + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/api/validation", + "pkg/api/validation/path", + "pkg/apis/meta/internalversion", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1/validation", + "pkg/apis/meta/v1beta1", + "pkg/apis/meta/v1beta1/validation", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/cache", + "pkg/util/clock", + "pkg/util/diff", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/mergepatch", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/sets/types", + "pkg/util/strategicpatch", + "pkg/util/uuid", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/waitgroup", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/json", + "third_party/forked/golang/reflect", + ] + pruneopts = "NUT" + revision = "72ed19daf4bb788ae595ae4103c404cb0fa09c84" + version = "kubernetes-1.16.4" + +[[projects]] + digest = "1:1d6d4ffe676009b21056127701b04b97ace2951fc6244fedf0526b9c61d5498e" + name = "k8s.io/apiserver" + packages = [ + "pkg/admission", + "pkg/admission/configuration", + "pkg/admission/initializer", + "pkg/admission/metrics", + "pkg/admission/plugin/namespace/lifecycle", + "pkg/admission/plugin/webhook", + "pkg/admission/plugin/webhook/config", + "pkg/admission/plugin/webhook/config/apis/webhookadmission", + "pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1", + "pkg/admission/plugin/webhook/errors", + "pkg/admission/plugin/webhook/generic", + "pkg/admission/plugin/webhook/mutating", + "pkg/admission/plugin/webhook/namespace", + "pkg/admission/plugin/webhook/object", + "pkg/admission/plugin/webhook/request", + "pkg/admission/plugin/webhook/rules", + "pkg/admission/plugin/webhook/validating", + "pkg/apis/apiserver", + "pkg/apis/apiserver/install", + "pkg/apis/apiserver/v1alpha1", + "pkg/apis/audit", + "pkg/apis/audit/install", + "pkg/apis/audit/v1", + "pkg/apis/audit/v1alpha1", + "pkg/apis/audit/v1beta1", + "pkg/apis/audit/validation", + "pkg/apis/config", + "pkg/apis/config/v1", + "pkg/audit", + "pkg/audit/event", + "pkg/audit/policy", + "pkg/audit/util", + "pkg/authentication/authenticator", + "pkg/authentication/authenticatorfactory", + "pkg/authentication/group", + "pkg/authentication/request/anonymous", + "pkg/authentication/request/bearertoken", + "pkg/authentication/request/headerrequest", + "pkg/authentication/request/union", + "pkg/authentication/request/websocket", + "pkg/authentication/request/x509", + "pkg/authentication/serviceaccount", + "pkg/authentication/token/cache", + "pkg/authentication/token/tokenfile", + "pkg/authentication/user", + "pkg/authorization/authorizer", + "pkg/authorization/authorizerfactory", + "pkg/authorization/path", + "pkg/authorization/union", + "pkg/endpoints", + "pkg/endpoints/discovery", + "pkg/endpoints/filters", + "pkg/endpoints/handlers", + "pkg/endpoints/handlers/fieldmanager", + "pkg/endpoints/handlers/fieldmanager/internal", + "pkg/endpoints/handlers/negotiation", + "pkg/endpoints/handlers/responsewriters", + "pkg/endpoints/metrics", + "pkg/endpoints/openapi", + "pkg/endpoints/request", + "pkg/features", + "pkg/registry/generic", + "pkg/registry/generic/registry", + "pkg/registry/rest", + "pkg/server", + "pkg/server/egressselector", + "pkg/server/filters", + "pkg/server/healthz", + "pkg/server/httplog", + "pkg/server/mux", + "pkg/server/options", + "pkg/server/options/encryptionconfig", + "pkg/server/resourceconfig", + "pkg/server/routes", + "pkg/server/storage", + "pkg/storage", + "pkg/storage/cacher", + "pkg/storage/errors", + "pkg/storage/etcd3", + "pkg/storage/etcd3/metrics", + "pkg/storage/names", + "pkg/storage/storagebackend", + "pkg/storage/storagebackend/factory", + "pkg/storage/value", + "pkg/storage/value/encrypt/aes", + "pkg/storage/value/encrypt/envelope", + "pkg/storage/value/encrypt/envelope/v1beta1", + "pkg/storage/value/encrypt/identity", + "pkg/storage/value/encrypt/secretbox", + "pkg/util/dryrun", + "pkg/util/feature", + "pkg/util/flushwriter", + "pkg/util/openapi", + "pkg/util/webhook", + "pkg/util/wsstream", + "plugin/pkg/audit/buffered", + "plugin/pkg/audit/dynamic", + "plugin/pkg/audit/dynamic/enforced", + "plugin/pkg/audit/log", + "plugin/pkg/audit/truncate", + "plugin/pkg/audit/webhook", + "plugin/pkg/authenticator/token/webhook", + "plugin/pkg/authorizer/webhook", + ] + pruneopts = "NUT" + revision = "45540e023360193661c0ec8e5afd746edf45ec83" + version = "kubernetes-1.16.4" + +[[projects]] + digest = "1:db0d7ba21a4bffa411a4f3cfa8b8a6b4bcf401a6062d6dbf1b7c13191590c1bc" + name = "k8s.io/client-go" + packages = [ + "discovery", + "discovery/fake", + "dynamic", + "dynamic/fake", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/autoscaling/v2beta2", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/coordination", + "informers/coordination/v1", + "informers/coordination/v1beta1", + "informers/core", + "informers/core/v1", + "informers/discovery", + "informers/discovery/v1alpha1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/networking/v1beta1", + "informers/node", + "informers/node/v1alpha1", + "informers/node/v1beta1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", + "kubernetes", + "kubernetes/fake", + "kubernetes/scheme", + "kubernetes/typed/admissionregistration/v1", + "kubernetes/typed/admissionregistration/v1/fake", + "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/admissionregistration/v1beta1/fake", + "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1/fake", + "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta1/fake", + "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/apps/v1beta2/fake", + "kubernetes/typed/auditregistration/v1alpha1", + "kubernetes/typed/auditregistration/v1alpha1/fake", + "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1/fake", + "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authentication/v1beta1/fake", + "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1/fake", + "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/authorization/v1beta1/fake", + "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v1/fake", + "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta1/fake", + "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", + "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1/fake", + "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v1beta1/fake", + "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/batch/v2alpha1/fake", + "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/certificates/v1beta1/fake", + "kubernetes/typed/coordination/v1", + "kubernetes/typed/coordination/v1/fake", + "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", + "kubernetes/typed/core/v1", + "kubernetes/typed/core/v1/fake", + "kubernetes/typed/discovery/v1alpha1", + "kubernetes/typed/discovery/v1alpha1/fake", + "kubernetes/typed/events/v1beta1", + "kubernetes/typed/events/v1beta1/fake", + "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/extensions/v1beta1/fake", + "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1/fake", + "kubernetes/typed/networking/v1beta1", + "kubernetes/typed/networking/v1beta1/fake", + "kubernetes/typed/node/v1alpha1", + "kubernetes/typed/node/v1alpha1/fake", + "kubernetes/typed/node/v1beta1", + "kubernetes/typed/node/v1beta1/fake", + "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/policy/v1beta1/fake", + "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1/fake", + "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1alpha1/fake", + "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/rbac/v1beta1/fake", + "kubernetes/typed/scheduling/v1", + "kubernetes/typed/scheduling/v1/fake", + "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1alpha1/fake", + "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", + "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/settings/v1alpha1/fake", + "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1/fake", + "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1alpha1/fake", + "kubernetes/typed/storage/v1beta1", + "kubernetes/typed/storage/v1beta1/fake", + "listers/admissionregistration/v1", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/autoscaling/v2beta2", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/coordination/v1", + "listers/coordination/v1beta1", + "listers/core/v1", + "listers/discovery/v1alpha1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/networking/v1beta1", + "listers/node/v1alpha1", + "listers/node/v1beta1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "plugin/pkg/client/auth/gcp", + "plugin/pkg/client/auth/oidc", + "rest", + "rest/watch", + "restmapper", + "testing", + "third_party/forked/golang/template", + "tools/auth", + "tools/cache", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "tools/pager", + "tools/record", + "tools/record/util", + "tools/reference", + "transport", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/jsonpath", + "util/keyutil", + "util/retry", + "util/workqueue", + ] + pruneopts = "NUT" + revision = "20ea64f01e4d4026bce06d61d697368390aeda4e" + version = "kubernetes-1.16.4" + +[[projects]] + digest = "1:a6aa236db5d07dff9ea1160ef316b17e131d2b1b2ad7d177431206bf389d241e" + name = "k8s.io/code-generator" + packages = [ + "cmd/client-gen", + "cmd/client-gen/args", + "cmd/client-gen/generators", + "cmd/client-gen/generators/fake", + "cmd/client-gen/generators/scheme", + "cmd/client-gen/generators/util", + "cmd/client-gen/path", + "cmd/client-gen/types", + "cmd/deepcopy-gen", + "cmd/deepcopy-gen/args", + "cmd/defaulter-gen", + "cmd/defaulter-gen/args", + "cmd/informer-gen", + "cmd/informer-gen/args", + "cmd/informer-gen/generators", + "cmd/lister-gen", + "cmd/lister-gen/args", + "cmd/lister-gen/generators", + "pkg/namer", + "pkg/util", + ] + pruneopts = "T" + revision = "8e001e5d18949be7e823ccb9cfe9b60026e7bda0" + version = "kubernetes-1.16.4" + +[[projects]] + branch = "master" + digest = "1:665624cd875baf59506da205c7c3d703976aea45ff51922c59d08add213578aa" + name = "k8s.io/component-base" + packages = [ + "cli/flag", + "featuregate", + "logs", + "metrics", + "metrics/legacyregistry", + "version", + ] + pruneopts = "NUT" + revision = "3f8a0e53a4bc6e4944cbfbb28da0b971ceba0380" + +[[projects]] + branch = "master" + digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15" + name = "k8s.io/gengo" + packages = [ + "args", + "examples/deepcopy-gen/generators", + "examples/defaulter-gen/generators", + "examples/set-gen/sets", + "generator", + "namer", + "parser", + "types", + ] + pruneopts = "NUT" + revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985" + +[[projects]] + digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NUT" + revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" + version = "v0.2.0" + +[[projects]] + digest = "1:6267b439cfa16676669e82402d6d7bf209f2c55a198206ef28eee97e0ccda60c" + name = "k8s.io/kube-openapi" + packages = [ + "pkg/builder", + "pkg/common", + "pkg/handler", + "pkg/schemaconv", + "pkg/util", + "pkg/util/proto", + ] + pruneopts = "NUT" + revision = "743ec37842bffe49dd4221d9026f30fb1d5adbc4" + +[[projects]] + digest = "1:82f9b2deb157470451063cea72fe0a7155f68c97bd43c6f5e671ba347d72e4a0" + name = "k8s.io/kubernetes" + packages = [ + "pkg/cloudprovider/providers/azure/auth", + "pkg/credentialprovider", + "pkg/credentialprovider/aws", + "pkg/credentialprovider/azure", + "pkg/credentialprovider/gcp", + "pkg/credentialprovider/secrets", + "pkg/version", + ] + pruneopts = "NUT" + revision = "81753b10df112992bf51bbc2c2f85208aad78335" + version = "v1.10.2" + +[[projects]] + digest = "1:281a4b2b38d55df26339f669931486a38836671606e8f84703a61beefbb5797b" + name = "k8s.io/metrics" + packages = [ + "pkg/apis/custom_metrics", + "pkg/apis/custom_metrics/install", + "pkg/apis/custom_metrics/v1beta1", + "pkg/apis/custom_metrics/v1beta2", + "pkg/apis/external_metrics", + "pkg/apis/external_metrics/install", + "pkg/apis/external_metrics/v1beta1", + ] + pruneopts = "NUT" + revision = "fba067ca6a9c551119bdfd7b14e26890d6a3f62e" + version = "kubernetes-1.16.4" + +[[projects]] + branch = "master" + digest = "1:bf4880ebdf96696ad8b81e12017aff06c779b6b0c1e2299ad7aa0512b8fab066" + name = "k8s.io/utils" + packages = [ + "buffer", + "integer", + "path", + "pointer", + "trace", + ] + pruneopts = "NUT" + revision = "3d4f5b7dea0b7c63c77d7fb1f0ee433ad8d54667" + +[[projects]] + branch = "master" + digest = "1:fc8e3e15886097a2e3715671acaeefebf939dc7977e4ee46f1c3f490dfdcec10" + name = "knative.dev/caching" + packages = [ + "pkg/apis/caching", + "pkg/apis/caching/v1alpha1", + "pkg/client/clientset/versioned", + "pkg/client/clientset/versioned/fake", + "pkg/client/clientset/versioned/scheme", + "pkg/client/clientset/versioned/typed/caching/v1alpha1", + "pkg/client/clientset/versioned/typed/caching/v1alpha1/fake", + "pkg/client/informers/externalversions", + "pkg/client/informers/externalversions/caching", + "pkg/client/informers/externalversions/caching/v1alpha1", + "pkg/client/informers/externalversions/internalinterfaces", + "pkg/client/injection/client", + "pkg/client/injection/client/fake", + "pkg/client/injection/informers/caching/v1alpha1/image", + "pkg/client/injection/informers/caching/v1alpha1/image/fake", + "pkg/client/injection/informers/factory", + "pkg/client/injection/informers/factory/fake", + "pkg/client/listers/caching/v1alpha1", + ] + pruneopts = "T" + revision = "67bca2c83dfa6f2ff5e60672d531fed47604eb11" + +[[projects]] + branch = "release-0.12" + digest = "1:048b68ed41f274d9f9c663076f987bb4be938273ba13bc68088910ad634a354b" + name = "knative.dev/pkg" + packages = [ + "apis", + "apis/duck", + "apis/duck/v1", + "apis/duck/v1alpha1", + "apis/duck/v1beta1", + "apis/testing/v1", + "changeset", + "client/injection/kube/client", + "client/injection/kube/client/fake", + "client/injection/kube/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration", + "client/injection/kube/informers/admissionregistration/v1beta1/validatingwebhookconfiguration", + "client/injection/kube/informers/apps/v1/deployment", + "client/injection/kube/informers/apps/v1/deployment/fake", + "client/injection/kube/informers/autoscaling/v2beta1/horizontalpodautoscaler", + "client/injection/kube/informers/autoscaling/v2beta1/horizontalpodautoscaler/fake", + "client/injection/kube/informers/core/v1/configmap", + "client/injection/kube/informers/core/v1/configmap/fake", + "client/injection/kube/informers/core/v1/endpoints", + "client/injection/kube/informers/core/v1/endpoints/fake", + "client/injection/kube/informers/core/v1/namespace", + "client/injection/kube/informers/core/v1/namespace/fake", + "client/injection/kube/informers/core/v1/pod", + "client/injection/kube/informers/core/v1/pod/fake", + "client/injection/kube/informers/core/v1/secret", + "client/injection/kube/informers/core/v1/secret/fake", + "client/injection/kube/informers/core/v1/service", + "client/injection/kube/informers/core/v1/service/fake", + "client/injection/kube/informers/factory", + "client/injection/kube/informers/factory/fake", + "codegen/cmd/injection-gen", + "codegen/cmd/injection-gen/args", + "codegen/cmd/injection-gen/generators", + "configmap", + "configmap/testing", + "controller", + "injection", + "injection/clients/dynamicclient", + "injection/clients/dynamicclient/fake", + "injection/sharedmain", + "kmeta", + "kmp", + "logging", + "logging/logkey", + "logging/testing", + "metrics", + "metrics/metricskey", + "metrics/metricstest", + "metrics/testing", + "network", + "network/prober", + "profiling", + "ptr", + "reconciler/testing", + "signals", + "system", + "system/testing", + "test", + "test/ghutil", + "test/gke", + "test/helpers", + "test/ingress", + "test/logging", + "test/logstream", + "test/mako", + "test/mako/alerter", + "test/mako/alerter/github", + "test/mako/alerter/slack", + "test/mako/config", + "test/monitoring", + "test/prometheus", + "test/slackutil", + "test/spoof", + "test/vegeta/pacers", + "test/webhook-apicoverage/coveragecalculator", + "test/webhook-apicoverage/resourcetree", + "test/webhook-apicoverage/tools", + "test/webhook-apicoverage/view", + "test/webhook-apicoverage/webhook", + "test/zipkin", + "testutils/clustermanager/perf-tests", + "testutils/clustermanager/perf-tests/pkg", + "tracing", + "tracing/config", + "tracing/testing", + "tracker", + "version", + "webhook", + "webhook/certificates", + "webhook/certificates/resources", + "webhook/configmaps", + "webhook/resourcesemantics", + "webhook/resourcesemantics/defaulting", + "webhook/resourcesemantics/validation", + "websocket", + ] + pruneopts = "T" + revision = "b8dc5fbc6d2f4717a69d15382921a51f93ab4cbb" + +[[projects]] + branch = "master" + digest = "1:057975fb4fbfbdc136c8ddb4367275ad9287fbf2fa5ccc0a1a6bb1e143af8a8e" + name = "knative.dev/test-infra" + packages = [ + "scripts", + "shared/common", + "shared/gcs", + "shared/junit", + "shared/performance", + "shared/prow", + "shared/testgrid", + "tools/dep-collector", + ] + pruneopts = "UT" + revision = "d5990f0e5a05d5819a40ad3b4de6227406850b48" + +[[projects]] + digest = "1:92b88da51692abe195601cb17d35bbb9b6bc2011237a2f234fedba7411ed8122" + name = "sigs.k8s.io/structured-merge-diff" + packages = [ + "fieldpath", + "merge", + "schema", + "typed", + "value", + ] + pruneopts = "NUT" + revision = "442c558739770972beae46a7f64f19fe34a2c9af" + version = "v1.0.1" + +[[projects]] + digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" + name = "sigs.k8s.io/yaml" + packages = ["."] + pruneopts = "NUT" + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/davecgh/go-spew/spew", + "github.com/ghodss/yaml", + "github.com/gogo/protobuf/proto", + "github.com/gogo/protobuf/types", + "github.com/golang/protobuf/proto", + "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", + "github.com/google/go-containerregistry/pkg/authn/k8schain", + "github.com/google/go-containerregistry/pkg/name", + "github.com/google/go-containerregistry/pkg/v1", + "github.com/google/go-containerregistry/pkg/v1/partial", + "github.com/google/go-containerregistry/pkg/v1/random", + "github.com/google/go-containerregistry/pkg/v1/remote", + "github.com/google/go-containerregistry/pkg/v1/remote/transport", + "github.com/google/go-containerregistry/pkg/v1/types", + "github.com/google/mako/clients/proto/analyzers/threshold_analyzer_go_proto", + "github.com/google/mako/go/quickstore", + "github.com/google/mako/spec/proto/mako_go_proto", + "github.com/gorilla/websocket", + "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2", + "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2", + "github.com/jetstack/cert-manager/pkg/apis/meta/v1", + "github.com/kelseyhightower/envconfig", + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd", + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider", + "github.com/mattbaird/jsonpatch", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/client_model/go", + "github.com/prometheus/common/expfmt", + "github.com/spf13/pflag", + "github.com/tsenart/vegeta", + "github.com/tsenart/vegeta/lib", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/stats", + "go.opencensus.io/stats/view", + "go.opencensus.io/tag", + "go.opencensus.io/trace", + "go.uber.org/atomic", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + "go.uber.org/zap/zaptest", + "golang.org/x/net/context", + "golang.org/x/net/http2", + "golang.org/x/net/http2/h2c", + "golang.org/x/sync/errgroup", + "google.golang.org/grpc", + "istio.io/api/networking/v1alpha3", + "istio.io/client-go/pkg/apis/networking/v1alpha3", + "k8s.io/api/apps/v1", + "k8s.io/api/authentication/v1", + "k8s.io/api/autoscaling/v2beta1", + "k8s.io/api/core/v1", + "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/api/validation", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/selection", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/apimachinery/pkg/util/validation", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/dynamic", + "k8s.io/client-go/dynamic/fake", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/listers/apps/v1", + "k8s.io/client-go/listers/autoscaling/v2beta1", + "k8s.io/client-go/listers/core/v1", + "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "k8s.io/client-go/plugin/pkg/client/auth/oidc", + "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/retry", + "k8s.io/client-go/util/workqueue", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/informer-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/kubernetes/pkg/version", + "k8s.io/metrics/pkg/apis/custom_metrics", + "knative.dev/caching/pkg/apis/caching", + "knative.dev/caching/pkg/apis/caching/v1alpha1", + "knative.dev/caching/pkg/client/clientset/versioned", + "knative.dev/caching/pkg/client/clientset/versioned/fake", + "knative.dev/caching/pkg/client/injection/client", + "knative.dev/caching/pkg/client/injection/client/fake", + "knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image", + "knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake", + "knative.dev/caching/pkg/client/listers/caching/v1alpha1", + "knative.dev/pkg/apis", + "knative.dev/pkg/apis/duck", + "knative.dev/pkg/apis/duck/v1", + "knative.dev/pkg/apis/duck/v1alpha1", + "knative.dev/pkg/apis/duck/v1beta1", + "knative.dev/pkg/apis/testing/v1", + "knative.dev/pkg/client/injection/kube/client", + "knative.dev/pkg/client/injection/kube/client/fake", + "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment", + "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/fake", + "knative.dev/pkg/client/injection/kube/informers/autoscaling/v2beta1/horizontalpodautoscaler", + "knative.dev/pkg/client/injection/kube/informers/autoscaling/v2beta1/horizontalpodautoscaler/fake", + "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap", + "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake", + "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints", + "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake", + "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace", + "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace/fake", + "knative.dev/pkg/client/injection/kube/informers/core/v1/pod", + "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/fake", + "knative.dev/pkg/client/injection/kube/informers/core/v1/secret", + "knative.dev/pkg/client/injection/kube/informers/core/v1/secret/fake", + "knative.dev/pkg/client/injection/kube/informers/core/v1/service", + "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake", + "knative.dev/pkg/client/injection/kube/informers/factory", + "knative.dev/pkg/codegen/cmd/injection-gen", + "knative.dev/pkg/configmap", + "knative.dev/pkg/configmap/testing", + "knative.dev/pkg/controller", + "knative.dev/pkg/injection", + "knative.dev/pkg/injection/clients/dynamicclient", + "knative.dev/pkg/injection/clients/dynamicclient/fake", + "knative.dev/pkg/injection/sharedmain", + "knative.dev/pkg/kmeta", + "knative.dev/pkg/kmp", + "knative.dev/pkg/logging", + "knative.dev/pkg/logging/logkey", + "knative.dev/pkg/logging/testing", + "knative.dev/pkg/metrics", + "knative.dev/pkg/metrics/metricskey", + "knative.dev/pkg/metrics/metricstest", + "knative.dev/pkg/metrics/testing", + "knative.dev/pkg/network", + "knative.dev/pkg/network/prober", + "knative.dev/pkg/profiling", + "knative.dev/pkg/ptr", + "knative.dev/pkg/reconciler/testing", + "knative.dev/pkg/signals", + "knative.dev/pkg/system", + "knative.dev/pkg/system/testing", + "knative.dev/pkg/test", + "knative.dev/pkg/test/helpers", + "knative.dev/pkg/test/ingress", + "knative.dev/pkg/test/logging", + "knative.dev/pkg/test/logstream", + "knative.dev/pkg/test/mako", + "knative.dev/pkg/test/prometheus", + "knative.dev/pkg/test/spoof", + "knative.dev/pkg/test/vegeta/pacers", + "knative.dev/pkg/test/webhook-apicoverage/coveragecalculator", + "knative.dev/pkg/test/webhook-apicoverage/resourcetree", + "knative.dev/pkg/test/webhook-apicoverage/tools", + "knative.dev/pkg/test/webhook-apicoverage/view", + "knative.dev/pkg/test/webhook-apicoverage/webhook", + "knative.dev/pkg/testutils/clustermanager/perf-tests", + "knative.dev/pkg/tracing", + "knative.dev/pkg/tracing/config", + "knative.dev/pkg/tracing/testing", + "knative.dev/pkg/tracker", + "knative.dev/pkg/version", + "knative.dev/pkg/webhook", + "knative.dev/pkg/webhook/certificates", + "knative.dev/pkg/webhook/configmaps", + "knative.dev/pkg/webhook/resourcesemantics", + "knative.dev/pkg/webhook/resourcesemantics/defaulting", + "knative.dev/pkg/webhook/resourcesemantics/validation", + "knative.dev/pkg/websocket", + "knative.dev/test-infra/scripts", + "knative.dev/test-infra/shared/junit", + "knative.dev/test-infra/shared/performance", + "knative.dev/test-infra/shared/prow", + "knative.dev/test-infra/shared/testgrid", + "knative.dev/test-infra/tools/dep-collector", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/test/vendor/knative.dev/serving/Gopkg.toml b/test/vendor/knative.dev/serving/Gopkg.toml new file mode 100644 index 0000000000..44e82cd2b5 --- /dev/null +++ b/test/vendor/knative.dev/serving/Gopkg.toml @@ -0,0 +1,171 @@ +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. + +required = [ + "k8s.io/apimachinery/pkg/util/sets/types", + "k8s.io/code-generator/cmd/deepcopy-gen", + "k8s.io/code-generator/cmd/defaulter-gen", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/lister-gen", + "k8s.io/code-generator/cmd/informer-gen", + "knative.dev/pkg/codegen/cmd/injection-gen", + # TODO(#4549): Drop this when we drop our patches. + "k8s.io/kubernetes/pkg/version", + "knative.dev/caching/pkg/apis/caching", + # For cluster management in performance testing. + "knative.dev/pkg/testutils/clustermanager/perf-tests", + "knative.dev/test-infra/scripts", + "knative.dev/test-infra/tools/dep-collector", + # For load testing. + "github.com/tsenart/vegeta" +] + +[[constraint]] + name = "github.com/tsenart/vegeta" + branch = "master" + +[[override]] + name = "gopkg.in/yaml.v2" + version = "v2.2.4" + +[[override]] + name = "knative.dev/pkg" + branch = "release-0.12" + +[[constraint]] + name = "knative.dev/caching" + branch = "master" + +[[override]] + name = "github.com/google/mako" + version = "v0.1.0" + +[[override]] + name = "go.uber.org/zap" + revision = "67bc79d13d155c02fd008f721863ff8cc5f30659" + +[[constraint]] + name = "github.com/google/go-containerregistry" + # HEAD as of 2019-09-10 + revision = "b02d448a3705facf11018efff34f1d2830be5724" + +[[constraint]] + name = "github.com/jetstack/cert-manager" + version = "v0.12.0" + +[[constraint]] + name = "github.com/gogo/protobuf" + version = "v1.3.1" + +[[constraint]] + name = "istio.io/api" + version = "1.4.1" + +[[constraint]] + name = "istio.io/client-go" + version = "1.4.1" + +[[override]] + name = "k8s.io/api" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/code-generator" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/client-go" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/apiserver" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/metrics" + version = "kubernetes-1.16.4" + +[[override]] + name = "k8s.io/kube-openapi" + # This is the version at which k8s.io/apiserver depends on this at its 1.16.4 tag. + revision = "743ec37842bffe49dd4221d9026f30fb1d5adbc4" + +[[override]] + name = "sigs.k8s.io/structured-merge-diff" + # This is the version at which k8s.io/apiserver depends on this at its 1.16.4 tag. + version = "1.0.1" + +[[override]] + name = "github.com/rogpeppe/go-internal" + version = "1.3.0" + +[[override]] + name = "github.com/kelseyhightower/envconfig" + version = "v1.4.0" + +# Added for the custom-metrics-apiserver specifically +[[override]] + name = "github.com/kubernetes-incubator/custom-metrics-apiserver" + revision = "3d9be26a50eb64531fc40eb31a5f3e6720956dc6" + +[[override]] + name = "bitbucket.org/ww/goautoneg" + source = "github.com/munnerz/goautoneg" + +[prune] + go-tests = true + unused-packages = true + non-go = true + +[[prune.project]] + name = "k8s.io/code-generator" + unused-packages = false + non-go = false + +[[prune.project]] + name = "knative.dev/test-infra" + non-go = false + +[[prune.project]] + name = "knative.dev/pkg" + unused-packages = false + non-go = false + +[[prune.project]] + name = "knative.dev/caching" + unused-packages = false + non-go = false + + +# The dependencies below are required for opencensus. +[[override]] + name = "google.golang.org/genproto" + revision = "357c62f0e4bbba7e6cc403ae09edcf3e2b9028fe" + +[[override]] + name = "contrib.go.opencensus.io/exporter/prometheus" + version = "0.1.0" + +[[override]] + name = "contrib.go.opencensus.io/exporter/zipkin" + version = "0.1.1" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[override]] + name = "github.com/census-instrumentation/opencensus-proto" + version = "0.2.0" + +[[override]] + name="github.com/golang/protobuf" + version = "1.3.2" diff --git a/test/vendor/knative.dev/serving/LICENSE b/test/vendor/knative.dev/serving/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/knative.dev/serving/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/knative.dev/serving/Makefile b/test/vendor/knative.dev/serving/Makefile new file mode 100644 index 0000000000..efd098a360 --- /dev/null +++ b/test/vendor/knative.dev/serving/Makefile @@ -0,0 +1,42 @@ +#This makefile is used by ci-operator + +CGO_ENABLED=0 +GOOS=linux +CORE_IMAGES=./cmd/activator ./cmd/autoscaler ./cmd/autoscaler-hpa ./cmd/controller ./cmd/queue ./cmd/webhook ./cmd/networking/istio ./cmd/networking/certmanager ./cmd/networking/nscert +TEST_IMAGES=$(shell find ./test/test_images -mindepth 1 -maxdepth 1 -type d) + +install: + for img in $(CORE_IMAGES); do \ + go install $$img ; \ + done +.PHONY: install + +test-install: + for img in $(TEST_IMAGES); do \ + go install $$img ; \ + done +.PHONY: test-install + +test-e2e: + ./openshift/e2e-tests-openshift.sh +.PHONY: test-e2e + +# Generate Dockerfiles for core and test images used by ci-operator. The files need to be committed manually. +generate-dockerfiles: + ./openshift/ci-operator/generate-dockerfiles.sh openshift/ci-operator/knative-images $(CORE_IMAGES) + ./openshift/ci-operator/generate-dockerfiles.sh openshift/ci-operator/knative-test-images $(TEST_IMAGES) +.PHONY: generate-dockerfiles + +generate-p12n-dockerfiles: + ./openshift/productization/generate-dockerfiles/gen_dockerfiles.sh openshift/productization/dist-git +.PHONY: generate-p12n-dockerfiles + +# Generates a ci-operator configuration for a specific branch. +generate-ci-config: + ./openshift/ci-operator/generate-ci-config.sh $(BRANCH) > ci-operator-config.yaml +.PHONY: generate-ci-config + +# Generate an aggregated knative yaml file with replaced image references +generate-release: + ./openshift/release/generate-release.sh $(RELEASE) +.PHONY: generate-release diff --git a/test/vendor/knative.dev/serving/OWNERS b/test/vendor/knative.dev/serving/OWNERS new file mode 100644 index 0000000000..6f5414d602 --- /dev/null +++ b/test/vendor/knative.dev/serving/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-approvers + +reviewers: +- serving-reviewers diff --git a/test/vendor/knative.dev/serving/OWNERS_ALIASES b/test/vendor/knative.dev/serving/OWNERS_ALIASES new file mode 100644 index 0000000000..fa13003bf2 --- /dev/null +++ b/test/vendor/knative.dev/serving/OWNERS_ALIASES @@ -0,0 +1,162 @@ +aliases: + serving-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + serving-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + + serving-api-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + serving-api-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + + autoscaling-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + autoscaling-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + + monitoring-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + monitoring-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + + productivity-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + productivity-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + + networking-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + networking-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + + build-approvers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + build-reviewers: + - alanfx + - mgencur + - mvinkler + - bbrowning + - jcrossley3 + - bobmcwhirter + - markusthoemmes + - vdemeester + - evanchooly + - arilivigni + diff --git a/test/vendor/knative.dev/serving/README.md b/test/vendor/knative.dev/serving/README.md new file mode 100644 index 0000000000..61755b07c5 --- /dev/null +++ b/test/vendor/knative.dev/serving/README.md @@ -0,0 +1,29 @@ +# Knative Serving + +[![GoDoc](https://godoc.org/github.com/knative/serving?status.svg)](https://godoc.org/github.com/knative/serving) +[![Go Report Card](https://goreportcard.com/badge/knative/serving)](https://goreportcard.com/report/knative/serving) +[![Releases](https://img.shields.io/github/release-pre/knative/serving.svg)](https://github.com/knative/serving/releases) +[![LICENSE](https://img.shields.io/github/license/knative/serving.svg)](https://github.com/knative/serving/blob/master/LICENSE) +[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://knative.slack.com) + +Knative Serving builds on Kubernetes and Istio to support deploying and serving +of serverless applications and functions. Serving is easy to get started with +and scales to support advanced scenarios. + +The Knative Serving project provides middleware primitives that enable: + +- Rapid deployment of serverless containers +- Automatic scaling up and down to zero +- Routing and network programming for Istio components +- Point-in-time snapshots of deployed code and configurations + +For documentation on using Knative Serving, see the +[serving section](https://www.knative.dev/docs/serving/) of the +[Knative documentation site](https://www.knative.dev/docs). + +For documentation on the Knative Serving specification, see the +[docs](https://github.com/knative/serving/tree/master/docs) folder of this +repository. + +If you are interested in contributing, see [CONTRIBUTING.md](./CONTRIBUTING.md) +and [DEVELOPMENT.md](./DEVELOPMENT.md). diff --git a/test/vendor/knative.dev/serving/ci b/test/vendor/knative.dev/serving/ci new file mode 100644 index 0000000000..e7e743ed5f --- /dev/null +++ b/test/vendor/knative.dev/serving/ci @@ -0,0 +1 @@ +Thu Feb 6 16:57:23 CET 2020 diff --git a/test/vendor/knative.dev/serving/cmd/activator/OWNERS b/test/vendor/knative.dev/serving/cmd/activator/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/activator/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/github.com/knative/serving/cmd/activator/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/activator/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/activator/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/activator/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/activator/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/activator/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/activator/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/activator/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/activator/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/activator/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/activator/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/activator/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/activator/kodata/refs b/test/vendor/knative.dev/serving/cmd/activator/kodata/refs new file mode 120000 index 0000000000..739d35bf96 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/activator/kodata/refs @@ -0,0 +1 @@ +../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/activator/main.go b/test/vendor/knative.dev/serving/cmd/activator/main.go new file mode 100644 index 0000000000..cf98003fd2 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/activator/main.go @@ -0,0 +1,332 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "net" + "net/http" + "os" + "strconv" + "sync" + "time" + + "github.com/kelseyhightower/envconfig" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + + // Injection related imports. + kubeclient "knative.dev/pkg/client/injection/kube/client" + "knative.dev/pkg/injection" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + + "k8s.io/apimachinery/pkg/util/wait" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/injection/sharedmain" + pkglogging "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + "knative.dev/pkg/metrics" + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/profiling" + "knative.dev/pkg/signals" + "knative.dev/pkg/system" + "knative.dev/pkg/tracing" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/pkg/version" + "knative.dev/pkg/websocket" + "knative.dev/serving/pkg/activator" + activatorconfig "knative.dev/serving/pkg/activator/config" + activatorhandler "knative.dev/serving/pkg/activator/handler" + activatornet "knative.dev/serving/pkg/activator/net" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/autoscaler" + pkghttp "knative.dev/serving/pkg/http" + "knative.dev/serving/pkg/logging" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" +) + +const ( + component = "activator" + + // Add enough buffer to not block request serving on stats collection + requestCountingQueueLength = 100 + + // The number of requests that are queued on the breaker before the 503s are sent. + // The value must be adjusted depending on the actual production requirements. + breakerQueueDepth = 10000 + + // The upper bound for concurrent requests sent to the revision. + // As new endpoints show up, the Breakers concurrency increases up to this value. + breakerMaxConcurrency = 1000 + + // The port on which autoscaler WebSocket server listens. + autoscalerPort = ":8080" +) + +var ( + masterURL = flag.String("master", "", "The address of the Kubernetes API server. "+ + "Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") +) + +func statReporter(statSink *websocket.ManagedConnection, stopCh <-chan struct{}, + statChan <-chan []autoscaler.StatMessage, logger *zap.SugaredLogger) { + for { + select { + case sm := <-statChan: + go func() { + for _, msg := range sm { + if err := statSink.Send(msg); err != nil { + logger.Errorw("Error while sending stat", zap.Error(err)) + } + } + }() + case <-stopCh: + // It's a sending connection, so no drainage required. + statSink.Shutdown() + return + } + } +} + +type config struct { + PodName string `split_words:"true" required:"true"` + PodIP string `split_words:"true" required:"true"` +} + +func main() { + flag.Parse() + + // Set up a context that we can cancel to tell informers and other subprocesses to stop. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Report stats on Go memory usage every 30 seconds. + msp := metrics.NewMemStatsAll() + msp.Start(ctx, 30*time.Second) + if err := view.Register(msp.DefaultViews()...); err != nil { + log.Fatalf("Error exporting go memstats view: %v", err) + } + + cfg, err := sharedmain.GetConfig(*masterURL, *kubeconfig) + if err != nil { + log.Fatal("Error building kubeconfig:", err) + } + + log.Printf("Registering %d clients", len(injection.Default.GetClients())) + log.Printf("Registering %d informer factories", len(injection.Default.GetInformerFactories())) + log.Printf("Registering %d informers", len(injection.Default.GetInformers())) + + ctx, informers := injection.Default.SetupInformers(ctx, cfg) + + var env config + if err := envconfig.Process("", &env); err != nil { + log.Fatalf("Failed to process env: %v", err) + } + + kubeClient := kubeclient.Get(ctx) + + // We sometimes startup faster than we can reach kube-api. Poll on failure to prevent us terminating + if perr := wait.PollImmediate(time.Second, 60*time.Second, func() (bool, error) { + if err = version.CheckMinimumVersion(kubeClient.Discovery()); err != nil { + log.Printf("Failed to get k8s version %v", err) + } + return err == nil, nil + }); perr != nil { + log.Fatal("Timed out attempting to get k8s version: ", err) + } + + // Set up our logger. + loggingConfig, err := sharedmain.GetLoggingConfig(ctx) + if err != nil { + log.Fatal("Error loading/parsing logging configuration: ", err) + } + + logger, atomicLevel := pkglogging.NewLoggerFromConfig(loggingConfig, component) + logger = logger.With(zap.String(logkey.ControllerType, component), + zap.String(logkey.Pod, env.PodName)) + ctx = pkglogging.WithLogger(ctx, logger) + defer flush(logger) + + // Run informers instead of starting them from the factory to prevent the sync hanging because of empty handler. + if err := controller.StartInformers(ctx.Done(), informers...); err != nil { + logger.Fatalw("Failed to start informers", zap.Error(err)) + } + + logger.Info("Starting the knative activator") + + reporter, err := activator.NewStatsReporter(env.PodName) + if err != nil { + logger.Fatalw("Failed to create stats reporter", zap.Error(err)) + } + + statCh := make(chan []autoscaler.StatMessage) + defer close(statCh) + + reqCh := make(chan activatorhandler.ReqEvent, requestCountingQueueLength) + defer close(reqCh) + + params := queue.BreakerParams{QueueDepth: breakerQueueDepth, MaxConcurrency: breakerMaxConcurrency, InitialCapacity: 0} + + // Start throttler. + throttler := activatornet.NewThrottler(ctx, params, + // We want to join host port since that will be our search space in the Throttler. + net.JoinHostPort(env.PodIP, strconv.Itoa(networking.BackendHTTPPort))) + go throttler.Run(ctx) + + oct := tracing.NewOpenCensusTracer(tracing.WithExporter(networking.ActivatorServiceName, logger)) + + tracerUpdater := configmap.TypeFilter(&tracingconfig.Config{})(func(name string, value interface{}) { + cfg := value.(*tracingconfig.Config) + if err := oct.ApplyConfig(cfg); err != nil { + logger.Errorw("Unable to apply open census tracer config", zap.Error(err)) + return + } + }) + + // Set up our config store + configMapWatcher := configmap.NewInformedWatcher(kubeClient, system.Namespace()) + configStore := activatorconfig.NewStore(logger, tracerUpdater) + configStore.WatchConfigs(configMapWatcher) + + // Open a WebSocket connection to the autoscaler. + autoscalerEndpoint := fmt.Sprintf("ws://%s.%s.svc.%s%s", "autoscaler", system.Namespace(), pkgnet.GetClusterDomainName(), autoscalerPort) + logger.Info("Connecting to Autoscaler at ", autoscalerEndpoint) + statSink := websocket.NewDurableSendingConnection(autoscalerEndpoint, logger) + go statReporter(statSink, ctx.Done(), statCh, logger) + + // Create and run our concurrency reporter + reportTicker := time.NewTicker(time.Second) + defer reportTicker.Stop() + cr := activatorhandler.NewConcurrencyReporter(ctx, env.PodName, reqCh, + reportTicker.C, statCh, reporter) + go cr.Run(ctx.Done()) + + // Create activation handler chain + // Note: innermost handlers are specified first, ie. the last handler in the chain will be executed first + var ah http.Handler = activatorhandler.New( + ctx, + throttler, + reporter) + ah = activatorhandler.NewRequestEventHandler(reqCh, ah) + ah = tracing.HTTPSpanMiddleware(ah) + ah = configStore.HTTPMiddleware(ah) + reqLogHandler, err := pkghttp.NewRequestLogHandler(ah, logging.NewSyncFileWriter(os.Stdout), "", + requestLogTemplateInputGetter(revisioninformer.Get(ctx).Lister()), false /*enableProbeRequestLog*/) + if err != nil { + logger.Fatalw("Unable to create request log handler", zap.Error(err)) + } + ah = reqLogHandler + + // NOTE: MetricHandler is being used as the outermost handler of the meaty bits. We're not interested in measuring + // the healthchecks or probes. + ah = activatorhandler.NewMetricHandler(ctx, reporter, ah) + ah = activatorhandler.NewContextHandler(ctx, ah) + + // Network probe handlers. + ah = &activatorhandler.ProbeHandler{NextHandler: ah} + ah = network.NewProbeHandler(ah) + + // Set up our health check based on the health of stat sink and environmental factors. + sigCtx, sigCancel := context.WithCancel(context.Background()) + hc := newHealthCheck(sigCtx, logger, statSink) + ah = &activatorhandler.HealthHandler{HealthCheck: hc, NextHandler: ah, Logger: logger} + + profilingHandler := profiling.NewHandler(logger, false) + // Watch the logging config map and dynamically update logging levels. + configMapWatcher.Watch(pkglogging.ConfigMapName(), pkglogging.UpdateLevelFromConfigMap(logger, atomicLevel, component)) + + // Watch the observability config map + configMapWatcher.Watch(metrics.ConfigMapName(), + metrics.UpdateExporterFromConfigMap(component, logger), + updateRequestLogFromConfigMap(logger, reqLogHandler), + profilingHandler.UpdateFromConfigMap) + + if err = configMapWatcher.Start(ctx.Done()); err != nil { + logger.Fatalw("Failed to start configuration manager", zap.Error(err)) + } + + servers := map[string]*http.Server{ + "http1": pkgnet.NewServer(":"+strconv.Itoa(networking.BackendHTTPPort), ah), + "h2c": pkgnet.NewServer(":"+strconv.Itoa(networking.BackendHTTP2Port), ah), + "profile": profiling.NewServer(profilingHandler), + } + + errCh := make(chan error, len(servers)) + for name, server := range servers { + go func(name string, s *http.Server) { + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errCh <- fmt.Errorf("%s server failed: %w", name, err) + } + }(name, server) + } + + sigCh := signals.SetupSignalHandler() + + // Wait for the signal to drain. + select { + case <-sigCh: + logger.Info("Received SIGTERM") + // Send a signal to let readiness probes start failing. + sigCancel() + case err := <-errCh: + logger.Errorw("Failed to run HTTP server", zap.Error(err)) + } + + // The drain has started (we are now failing readiness probes). Let the effects of this + // propagate so that new requests are no longer routed our way. + time.Sleep(30 * time.Second) + logger.Info("Done waiting, shutting down servers.") + + // Drain outstanding requests, and stop accepting new ones. + for _, server := range servers { + server.Shutdown(context.Background()) + } + logger.Info("Servers shutdown.") +} + +func newHealthCheck(sigCtx context.Context, logger *zap.SugaredLogger, statSink *websocket.ManagedConnection) func() error { + once := sync.Once{} + return func() error { + select { + // When we get SIGTERM (sigCtx done), let readiness probes start failing. + case <-sigCtx.Done(): + once.Do(func() { + logger.Info("Signal context canceled") + }) + return errors.New("received SIGTERM from kubelet") + default: + logger.Debug("No signal yet.") + return statSink.Status() + } + } +} + +func flush(logger *zap.SugaredLogger) { + logger.Sync() + os.Stdout.Sync() + os.Stderr.Sync() + metrics.FlushExporter() +} diff --git a/test/vendor/knative.dev/serving/cmd/activator/request_log.go b/test/vendor/knative.dev/serving/cmd/activator/request_log.go new file mode 100644 index 0000000000..cc524ce52d --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/activator/request_log.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "net/http" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/serving" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + pkghttp "knative.dev/serving/pkg/http" +) + +func updateRequestLogFromConfigMap(logger *zap.SugaredLogger, h *pkghttp.RequestLogHandler) func(configMap *corev1.ConfigMap) { + return func(configMap *corev1.ConfigMap) { + newTemplate := configMap.Data["logging.request-log-template"] + if err := h.SetTemplate(newTemplate); err != nil { + logger.Errorw("Failed to update the request log template.", zap.Error(err), "template", newTemplate) + } else { + logger.Infow("Updated the request log template.", "template", newTemplate) + } + } +} + +func requestLogTemplateInputGetter(revisionLister servinglisters.RevisionLister) pkghttp.RequestLogTemplateInputGetter { + return func(req *http.Request, resp *pkghttp.RequestLogResponse) *pkghttp.RequestLogTemplateInput { + namespace := pkghttp.LastHeaderValue(req.Header, activator.RevisionHeaderNamespace) + name := pkghttp.LastHeaderValue(req.Header, activator.RevisionHeaderName) + revInfo := &pkghttp.RequestLogRevision{ + Namespace: namespace, + Name: name, + } + + revision, err := revisionLister.Revisions(namespace).Get(name) + if err == nil && revision.Labels != nil { + revInfo.Configuration = revision.Labels[serving.ConfigurationLabelKey] + revInfo.Service = revision.Labels[serving.ServiceLabelKey] + } + + return &pkghttp.RequestLogTemplateInput{ + Request: req, + Response: resp, + Revision: revInfo, + } + } +} diff --git a/test/vendor/knative.dev/serving/cmd/activator/request_log_test.go b/test/vendor/knative.dev/serving/cmd/activator/request_log_test.go new file mode 100644 index 0000000000..9708012921 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/activator/request_log_test.go @@ -0,0 +1,197 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/go-cmp/cmp" + + testing2 "knative.dev/pkg/logging/testing" + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + pkghttp "knative.dev/serving/pkg/http" + + corev1 "k8s.io/api/core/v1" +) + +const ( + testRevisionName = "testRevision" + testNamespaceName = "testNs" + testServiceName = "testSvc" + testConfigName = "testConfig" +) + +func TestUpdateRequestLogFromConfigMap(t *testing.T) { + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + buf := bytes.NewBufferString("") + handler, err := pkghttp.NewRequestLogHandler(baseHandler, buf, "", + requestLogTemplateInputGetter(revisionLister(t, true)), false /*enableProbeRequestLog*/) + if err != nil { + t.Fatalf("want: no error, got: %v", err) + } + + tests := []struct { + name string + url string + body string + template string + want string + }{{ + name: "empty template", + url: "http://example.com/testpage", + body: "test", + template: "", + want: "", + }, { + name: "template with new line", + url: "http://example.com/testpage", + body: "test", + template: "{{.Request.URL}}\n", + want: "http://example.com/testpage\n", + }, { + name: "invalid template", + url: "http://example.com", + body: "test", + template: "{{}}", + want: "http://example.com\n", + }, { + name: "revision info", + url: "http://example.com", + body: "test", + template: "{{.Revision.Name}}, {{.Revision.Namespace}}, {{.Revision.Service}}, {{.Revision.Configuration}}, {{.Revision.PodName}}, {{.Revision.PodIP}}", + want: "testRevision, testNs, testSvc, testConfig, , \n", + }, { + name: "empty template 2", + url: "http://example.com/testpage", + body: "test", + template: "", + want: "", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + buf.Reset() + cm := &corev1.ConfigMap{} + cm.Data = map[string]string{"logging.request-log-template": test.template} + (updateRequestLogFromConfigMap(testing2.TestLogger(t), handler))(cm) + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, test.url, bytes.NewBufferString(test.body)) + req.Header = map[string][]string{ + activator.RevisionHeaderName: {testRevisionName}, + activator.RevisionHeaderNamespace: {testNamespaceName}, + } + handler.ServeHTTP(resp, req) + + got := buf.String() + if got != test.want { + t.Errorf("got '%v', want '%v'", got, test.want) + } + }) + } +} + +func TestRequestLogTemplateInputGetter(t *testing.T) { + tests := []struct { + name string + getter pkghttp.RequestLogTemplateInputGetter + request *http.Request + response *pkghttp.RequestLogResponse + want pkghttp.RequestLogRevision + }{{ + name: "success", + getter: requestLogTemplateInputGetter(revisionLister(t, true)), + request: &http.Request{Header: map[string][]string{ + activator.RevisionHeaderName: {testRevisionName}, + activator.RevisionHeaderNamespace: {testNamespaceName}, + }}, + response: &pkghttp.RequestLogResponse{Code: http.StatusAlreadyReported}, + want: pkghttp.RequestLogRevision{ + Namespace: testNamespaceName, + Name: testRevisionName, + Configuration: testConfigName, + Service: testServiceName, + }, + }, { + name: "revision not found", + getter: requestLogTemplateInputGetter(revisionLister(t, true)), + request: &http.Request{Header: map[string][]string{ + activator.RevisionHeaderName: {"foo"}, + activator.RevisionHeaderNamespace: {"bar"}, + }}, + response: &pkghttp.RequestLogResponse{Code: http.StatusAlreadyReported}, + want: pkghttp.RequestLogRevision{ + Name: "foo", + Namespace: "bar", + }, + }, { + name: "labels not found", + getter: requestLogTemplateInputGetter(revisionLister(t, false)), + request: &http.Request{Header: map[string][]string{ + activator.RevisionHeaderName: {testRevisionName}, + activator.RevisionHeaderNamespace: {testNamespaceName}, + }}, + response: &pkghttp.RequestLogResponse{Code: http.StatusAlreadyReported}, + want: pkghttp.RequestLogRevision{ + Namespace: testNamespaceName, + Name: testRevisionName, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.getter(test.request, test.response) + if !cmp.Equal(*got.Revision, test.want) { + t.Errorf("Got = %v, want: %v, diff: %s", got.Revision, test.want, cmp.Diff(got.Revision, test.want)) + } + if got.Request != test.request { + t.Errorf("Got = %v, want: %v", got.Request, test.request) + } + if got.Response != test.response { + t.Errorf("Got = %v, want: %v", got.Response, test.response) + } + }) + } +} + +func revisionLister(t *testing.T, addLabels bool) servinglisters.RevisionLister { + rev := &v1alpha1.Revision{} + rev.Name = testRevisionName + rev.Namespace = testNamespaceName + if addLabels { + rev.Labels = map[string]string{ + serving.ConfigurationLabelKey: testConfigName, + serving.ServiceLabelKey: testServiceName, + } + } + + ctx, _ := rtesting.SetupFakeContext(t) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespaceName).Create(rev) + ri := fakerevisioninformer.Get(ctx) + ri.Informer().GetIndexer().Add(rev) + return ri.Lister() +} diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/OWNERS b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/OWNERS new file mode 100644 index 0000000000..690ff0e48e --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers + +reviewers: +- autoscaling-reviewers + +labels: +- area/autoscale diff --git a/test/vendor/github.com/knative/serving/cmd/autoscaler/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/autoscaler/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/autoscaler/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/autoscaler/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/autoscaler/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/autoscaler/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/refs b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/refs new file mode 120000 index 0000000000..739d35bf96 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/kodata/refs @@ -0,0 +1 @@ +../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/main.go b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/main.go new file mode 100644 index 0000000000..bfd9f75f7c --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler-hpa/main.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + // The set of controllers this controller process runs. + "knative.dev/serving/pkg/reconciler/autoscaling/hpa" + + // This defines the shared main for injected controllers. + "knative.dev/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("hpaautoscaler", hpa.NewController) +} diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler/OWNERS b/test/vendor/knative.dev/serving/cmd/autoscaler/OWNERS new file mode 100644 index 0000000000..690ff0e48e --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers + +reviewers: +- autoscaling-reviewers + +labels: +- area/autoscale diff --git a/test/vendor/github.com/knative/serving/cmd/controller/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/autoscaler/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/controller/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/autoscaler/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/controller/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/autoscaler/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/controller/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/autoscaler/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/controller/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/autoscaler/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/controller/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/autoscaler/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler/kodata/refs b/test/vendor/knative.dev/serving/cmd/autoscaler/kodata/refs new file mode 120000 index 0000000000..739d35bf96 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler/kodata/refs @@ -0,0 +1 @@ +../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler/main.go b/test/vendor/knative.dev/serving/cmd/autoscaler/main.go new file mode 100644 index 0000000000..437c2c008d --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler/main.go @@ -0,0 +1,227 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Multitenant autoscaler executable. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net/http" + "time" + + basecmd "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd" + "github.com/spf13/pflag" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/rest" + kubeclient "knative.dev/pkg/client/injection/kube/client" + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/injection" + "knative.dev/pkg/injection/sharedmain" + "knative.dev/pkg/logging" + "knative.dev/pkg/metrics" + "knative.dev/pkg/profiling" + "knative.dev/pkg/signals" + "knative.dev/pkg/system" + "knative.dev/pkg/version" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/autoscaler/statserver" + "knative.dev/serving/pkg/reconciler/autoscaling/kpa" + "knative.dev/serving/pkg/reconciler/metric" + "knative.dev/serving/pkg/resources" +) + +const ( + statsServerAddr = ":8080" + statsBufferLen = 1000 + component = "autoscaler" + controllerNum = 2 +) + +var ( + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") +) + +func main() { + // Initialize early to get access to flags and merge them with the autoscaler flags. + customMetricsAdapter := &basecmd.AdapterBase{} + customMetricsAdapter.Flags().AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + // Set up signals so we handle the first shutdown signal gracefully. + ctx := signals.NewContext() + + // Report stats on Go memory usage every 30 seconds. + msp := metrics.NewMemStatsAll() + msp.Start(ctx, 30*time.Second) + if err := view.Register(msp.DefaultViews()...); err != nil { + log.Fatalf("Error exporting go memstats view: %v", err) + } + + cfg, err := sharedmain.GetConfig(*masterURL, *kubeconfig) + if err != nil { + log.Fatal("Error building kubeconfig:", err) + } + + log.Printf("Registering %d clients", len(injection.Default.GetClients())) + log.Printf("Registering %d informer factories", len(injection.Default.GetInformerFactories())) + log.Printf("Registering %d informers", len(injection.Default.GetInformers())) + log.Printf("Registering %d controllers", controllerNum) + + // Adjust our client's rate limits based on the number of controller's we are running. + cfg.QPS = controllerNum * rest.DefaultQPS + cfg.Burst = controllerNum * rest.DefaultBurst + + ctx, informers := injection.Default.SetupInformers(ctx, cfg) + + kubeClient := kubeclient.Get(ctx) + + // We sometimes startup faster than we can reach kube-api. Poll on failure to prevent us terminating + if perr := wait.PollImmediate(time.Second, 60*time.Second, func() (bool, error) { + if err = version.CheckMinimumVersion(kubeClient.Discovery()); err != nil { + log.Printf("Failed to get k8s version %v", err) + } + return err == nil, nil + }); perr != nil { + log.Fatal("Timed out attempting to get k8s version: ", err) + } + + // Set up our logger. + loggingConfig, err := sharedmain.GetLoggingConfig(ctx) + if err != nil { + log.Fatal("Error loading/parsing logging configuration:", err) + } + logger, atomicLevel := logging.NewLoggerFromConfig(loggingConfig, component) + defer flush(logger) + ctx = logging.WithLogger(ctx, logger) + + // statsCh is the main communication channel between the stats server and multiscaler. + statsCh := make(chan autoscaler.StatMessage, statsBufferLen) + defer close(statsCh) + + profilingHandler := profiling.NewHandler(logger, false) + + cmw := configmap.NewInformedWatcher(kubeclient.Get(ctx), system.Namespace()) + // Watch the logging config map and dynamically update logging levels. + cmw.Watch(logging.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, component)) + // Watch the observability config map + cmw.Watch(metrics.ConfigMapName(), + metrics.UpdateExporterFromConfigMap(component, logger), + profilingHandler.UpdateFromConfigMap) + + endpointsInformer := endpointsinformer.Get(ctx) + + collector := autoscaler.NewMetricCollector(statsScraperFactoryFunc(endpointsInformer.Lister()), logger) + customMetricsAdapter.WithCustomMetrics(autoscaler.NewMetricProvider(collector)) + + // Set up scalers. + // uniScalerFactory depends endpointsInformer to be set. + multiScaler := autoscaler.NewMultiScaler(ctx.Done(), uniScalerFactoryFunc(endpointsInformer, collector), logger) + + controllers := []*controller.Impl{ + kpa.NewController(ctx, cmw, multiScaler), + metric.NewController(ctx, cmw, collector), + } + + // Set up a statserver. + statsServer := statserver.New(statsServerAddr, statsCh, logger) + + // Start watching the configs. + if err := cmw.Start(ctx.Done()); err != nil { + logger.Fatalw("Failed to start watching configs", zap.Error(err)) + } + + // Start all of the informers and wait for them to sync. + if err := controller.StartInformers(ctx.Done(), informers...); err != nil { + logger.Fatalw("Failed to start informers", zap.Error(err)) + } + + go controller.StartAll(ctx.Done(), controllers...) + + go func() { + for sm := range statsCh { + collector.Record(sm.Key, sm.Stat) + multiScaler.Poke(sm.Key, sm.Stat) + } + }() + + profilingServer := profiling.NewServer(profilingHandler) + + eg, egCtx := errgroup.WithContext(ctx) + eg.Go(func() error { + return customMetricsAdapter.Run(ctx.Done()) + }) + eg.Go(statsServer.ListenAndServe) + eg.Go(profilingServer.ListenAndServe) + + // This will block until either a signal arrives or one of the grouped functions + // returns an error. + <-egCtx.Done() + + statsServer.Shutdown(5 * time.Second) + profilingServer.Shutdown(context.Background()) + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := eg.Wait(); err != nil && err != http.ErrServerClosed { + logger.Errorw("Error while running server", zap.Error(err)) + } +} + +func uniScalerFactoryFunc(endpointsInformer corev1informers.EndpointsInformer, + metricClient autoscaler.MetricClient) autoscaler.UniScalerFactory { + return func(decider *autoscaler.Decider) (autoscaler.UniScaler, error) { + if v, ok := decider.Labels[serving.ConfigurationLabelKey]; !ok || v == "" { + return nil, fmt.Errorf("label %q not found or empty in Decider %s", serving.ConfigurationLabelKey, decider.Name) + } + if decider.Spec.ServiceName == "" { + return nil, fmt.Errorf("%s decider has empty ServiceName", decider.Name) + } + + serviceName := decider.Labels[serving.ServiceLabelKey] // This can be empty. + configName := decider.Labels[serving.ConfigurationLabelKey] + + // Create a stats reporter which tags statistics by PA namespace, configuration name, and PA name. + reporter, err := autoscaler.NewStatsReporter(decider.Namespace, serviceName, configName, decider.Name) + if err != nil { + return nil, err + } + + return autoscaler.New(decider.Namespace, decider.Name, metricClient, endpointsInformer.Lister(), &decider.Spec, reporter) + } +} + +func statsScraperFactoryFunc(endpointsLister corev1listers.EndpointsLister) autoscaler.StatsScraperFactory { + return func(metric *av1alpha1.Metric) (autoscaler.StatsScraper, error) { + podCounter := resources.NewScopedEndpointsCounter( + endpointsLister, metric.Namespace, metric.Spec.ScrapeTarget) + return autoscaler.NewServiceScraper(metric, podCounter) + } +} + +func flush(logger *zap.SugaredLogger) { + logger.Sync() + metrics.FlushExporter() +} diff --git a/test/vendor/knative.dev/serving/cmd/autoscaler/main_test.go b/test/vendor/knative.dev/serving/cmd/autoscaler/main_test.go new file mode 100644 index 0000000000..1e68a2e87f --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/autoscaler/main_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + fakeK8s "k8s.io/client-go/kubernetes/fake" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/autoscaler" + autoscalerfake "knative.dev/serving/pkg/autoscaler/fake" +) + +const ( + testNamespace = "test-namespace" + testRevision = "test-Revision" +) + +var ( + kubeClient = fakeK8s.NewSimpleClientset() + kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) +) + +func TestUniscalerFactoryFailures(t *testing.T) { + tests := []struct { + name string + labels map[string]string + want string + }{{ + "nil labels", nil, fmt.Sprintf("label %q not found or empty in Decider", serving.ConfigurationLabelKey), + }, { + "empty labels", map[string]string{}, fmt.Sprintf("label %q not found or empty in Decider", serving.ConfigurationLabelKey), + }, { + "config missing", map[string]string{ + "some-unimportant-label": "lo-digo", + }, + fmt.Sprintf("label %q not found or empty in Decider", serving.ConfigurationLabelKey), + }, { + "values not ascii", map[string]string{ + serving.ServiceLabelKey: "la", + serving.ConfigurationLabelKey: "verité", + }, "invalid value: only ASCII characters accepted", + }, { + "too long of a value", map[string]string{ + serving.ServiceLabelKey: "cat is ", + serving.ConfigurationLabelKey: "l" + strings.Repeat("o", 253) + "ng", + }, "max length must be 255 characters", + }} + + uniScalerFactory := getTestUniScalerFactory() + decider := &autoscaler.Decider{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + }, + Spec: autoscaler.DeciderSpec{ + ServiceName: "wholesome-service", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + decider.Labels = test.labels + + _, err := uniScalerFactory(decider) + if err == nil { + t.Fatal("No error was returned") + } + if got, want := err.Error(), test.want; !strings.Contains(got, want) { + t.Errorf("Error = %q, want to contain = %q", got, want) + } + }) + } + + // Now blank out service name and give correct labels. + decider.Spec.ServiceName = "" + decider.Labels = map[string]string{ + serving.RevisionLabelKey: testRevision, + serving.ServiceLabelKey: "some-nice-service", + serving.ConfigurationLabelKey: "test-config", + } + + _, err := uniScalerFactory(decider) + if err == nil { + t.Fatal("No error was returned") + } + if got, want := err.Error(), "decider has empty ServiceName"; !strings.Contains(got, want) { + t.Errorf("Error = %q, want to contain = %q", got, want) + } +} + +func endpoints(ns, n string) { + ep := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: n, + }, + Subsets: []corev1.EndpointSubset{{}}, + } + kubeClient.CoreV1().Endpoints(ns).Create(ep) + kubeInformer.Core().V1().Endpoints().Informer().GetIndexer().Add(ep) +} + +func TestUniScalerFactoryFunc(t *testing.T) { + endpoints(testNamespace, "magic-services-offered") + uniScalerFactory := getTestUniScalerFactory() + for _, srv := range []string{"some", ""} { + decider := &autoscaler.Decider{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + Labels: map[string]string{ + serving.RevisionLabelKey: testRevision, + serving.ServiceLabelKey: srv, + serving.ConfigurationLabelKey: "test-config", + }, + }, + Spec: autoscaler.DeciderSpec{ + ServiceName: "magic-services-offered", + }, + } + + if _, err := uniScalerFactory(decider); err != nil { + t.Errorf("got error from uniScalerFactory: %v", err) + } + } +} + +func getTestUniScalerFactory() func(decider *autoscaler.Decider) (autoscaler.UniScaler, error) { + return uniScalerFactoryFunc(kubeInformer.Core().V1().Endpoints(), &autoscalerfake.StaticMetricClient) +} diff --git a/test/vendor/knative.dev/serving/cmd/controller/OWNERS b/test/vendor/knative.dev/serving/cmd/controller/OWNERS new file mode 100644 index 0000000000..e57e66dd50 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/controller/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-api-approvers + +reviewers: +- serving-api-reviewers + +labels: +- area/API diff --git a/test/vendor/github.com/knative/serving/cmd/queue/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/controller/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/queue/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/controller/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/queue/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/controller/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/queue/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/controller/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/queue/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/controller/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/queue/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/controller/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/controller/kodata/refs b/test/vendor/knative.dev/serving/cmd/controller/kodata/refs new file mode 120000 index 0000000000..739d35bf96 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/controller/kodata/refs @@ -0,0 +1 @@ +../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/controller/main.go b/test/vendor/knative.dev/serving/cmd/controller/main.go new file mode 100644 index 0000000000..c3bd0d8877 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/controller/main.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + // The set of controllers this controller process runs. + "knative.dev/serving/pkg/reconciler/configuration" + "knative.dev/serving/pkg/reconciler/gc" + "knative.dev/serving/pkg/reconciler/labeler" + "knative.dev/serving/pkg/reconciler/revision" + "knative.dev/serving/pkg/reconciler/route" + "knative.dev/serving/pkg/reconciler/serverlessservice" + "knative.dev/serving/pkg/reconciler/service" + + // This defines the shared main for injected controllers. + "knative.dev/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("controller", + configuration.NewController, + labeler.NewController, + revision.NewController, + route.NewController, + serverlessservice.NewController, + service.NewController, + gc.NewController, + ) +} diff --git a/test/vendor/knative.dev/serving/cmd/default-domain/main.go b/test/vendor/knative.dev/serving/cmd/default-domain/main.go new file mode 100644 index 0000000000..22a551eca1 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/default-domain/main.go @@ -0,0 +1,216 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "net/http" + "os" + "strings" + "time" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/logging" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/client/clientset/versioned" + "knative.dev/serving/pkg/network" + routecfg "knative.dev/serving/pkg/reconciler/route/config" +) + +var ( + masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") + magicDNS = flag.String("magic-dns", "", "The hostname for the magic DNS service, e.g. xip.io or nip.io") +) + +const ( + // Interval to poll for objects. + pollInterval = 10 * time.Second + // How long to wait for objects. + waitTimeout = 20 * time.Minute + appName = "default-domain" +) + +func clientsFromFlags() (*kubernetes.Clientset, *versioned.Clientset, error) { + cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig) + if err != nil { + return nil, nil, fmt.Errorf("error building kubeconfig: %w", err) + } + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, nil, fmt.Errorf("error building kube clientset: %w", err) + } + client, err := versioned.NewForConfig(cfg) + if err != nil { + return nil, nil, fmt.Errorf("error building serving clientset: %w", err) + } + return kubeClient, client, nil +} + +func lookupConfigMap(kubeClient *kubernetes.Clientset, name string) (*corev1.ConfigMap, error) { + return kubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(name, metav1.GetOptions{}) +} + +func findGatewayAddress(kubeclient *kubernetes.Clientset, client *versioned.Clientset) (*corev1.LoadBalancerIngress, error) { + netCM, err := lookupConfigMap(kubeclient, network.ConfigName) + if err != nil { + return nil, err + } + netCfg, err := network.NewConfigFromConfigMap(netCM) + if err != nil { + return nil, err + } + + // Create a KIngress that points at that Service + ing, err := client.NetworkingV1alpha1().Ingresses(system.Namespace()).Create(&v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "default-domain-", + Namespace: system.Namespace(), + Annotations: map[string]string{ + networking.IngressClassAnnotationKey: netCfg.DefaultIngressClass, + }, + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{os.Getenv("POD_NAME") + ".default-domain.invalid"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: "default-domain-service", + ServiceNamespace: system.Namespace(), + ServicePort: intstr.FromInt(80), + }, + }}, + }}, + }, + }}, + }, + }) + if err != nil { + return nil, err + } + defer client.NetworkingV1alpha1().Ingresses(system.Namespace()).Delete(ing.Name, &metav1.DeleteOptions{}) + + // Wait for the Ingress to be Ready. + if err := wait.PollImmediate(pollInterval, waitTimeout, func() (done bool, err error) { + ing, err = client.NetworkingV1alpha1().Ingresses(system.Namespace()).Get( + ing.Name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return ing.Status.IsReady(), nil + }); err != nil { + return nil, err + } + if len(ing.Status.PublicLoadBalancer.Ingress) == 0 { + return nil, errors.New("ingress has no public load balancers in status") + } + + // We expect an ingress LB with the form foo.bar.svc.cluster.local (though + // we aren't strictly sensitive to the suffix, this is just illustrative). + internalDomain := ing.Status.PublicLoadBalancer.Ingress[0].DomainInternal + parts := strings.SplitN(internalDomain, ".", 3) + if len(parts) < 3 { + return nil, fmt.Errorf("ingress public load balancer had unexpected shape: %q", internalDomain) + } + name, namespace := parts[0], parts[1] + + // Wait for the Ingress Service to have an external IP. + var svc *corev1.Service + if err := wait.PollImmediate(pollInterval, waitTimeout, func() (done bool, err error) { + svc, err = kubeclient.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return len(svc.Status.LoadBalancer.Ingress) != 0, nil + }); err != nil { + return nil, err + } + return &svc.Status.LoadBalancer.Ingress[0], nil +} + +func main() { + flag.Parse() + logger := logging.FromContext(context.Background()).Named(appName) + defer logger.Sync() + + kubeClient, client, err := clientsFromFlags() + if err != nil { + logger.Fatalw("Error building kube clientset", zap.Error(err)) + } + + // Fetch and parse the domain ConfigMap from the system namespace. + domainCM, err := lookupConfigMap(kubeClient, routecfg.DomainConfigName) + if err != nil { + logger.Fatalw("Error getting ConfigMap", zap.Error(err)) + } + domainConfig, err := routecfg.NewDomainFromConfigMap(domainCM) + if err != nil { + logger.Fatalw("Error parsing ConfigMap", zap.Error(err)) + } + // If there is a catch-all domain configured, then bail out (successfully) here. + defaultDomain := domainConfig.LookupDomainForLabels(map[string]string{}) + if defaultDomain != routecfg.DefaultDomain { + logger.Infof("Domain is configured as: %v", defaultDomain) + return + } + + // Start an HTTP Server + h := network.NewProbeHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + server := http.Server{Addr: ":8080", Handler: h} + go server.ListenAndServe() + + // Determine the address of the gateway service. + address, err := findGatewayAddress(kubeClient, client) + if err != nil { + logger.Fatalw("Error finding gateway address", zap.Error(err)) + } + if address.IP == "" { + logger.Info("Gateway has a domain instead of IP address -- leaving default domain config intact") + return + } + + // Use the IP (assumes IPv4) to set up a magic DNS name under a top-level Magic + // DNS service like xip.io or nip.io, where: + // 1.2.3.4.xip.io ===(magically resolves to)===> 1.2.3.4 + // Add this magic DNS name without a label selector to the ConfigMap, + // and send it back to the API server. + domain := fmt.Sprintf("%s.%s", address.IP, *magicDNS) + domainCM.Data[domain] = "" + if _, err = kubeClient.CoreV1().ConfigMaps(system.Namespace()).Update(domainCM); err != nil { + logger.Fatalw("Error updating ConfigMap", zap.Error(err)) + } + + logger.Infof("Updated default domain to: %s", domain) + server.Shutdown(context.Background()) +} diff --git a/test/vendor/knative.dev/serving/cmd/networking/OWNERS b/test/vendor/knative.dev/serving/cmd/networking/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/github.com/knative/serving/cmd/networking/certmanager/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/networking/certmanager/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/networking/certmanager/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/networking/certmanager/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/networking/certmanager/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/networking/certmanager/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/refs b/test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/refs new file mode 120000 index 0000000000..fe164fe40f --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/certmanager/kodata/refs @@ -0,0 +1 @@ +../../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/networking/certmanager/main.go b/test/vendor/knative.dev/serving/cmd/networking/certmanager/main.go new file mode 100644 index 0000000000..caa6f0a627 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/certmanager/main.go @@ -0,0 +1,29 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "knative.dev/serving/pkg/reconciler/certificate" + + // This defines the shared main for injected controllers. + "knative.dev/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("certcontroller", + certificate.NewController) +} diff --git a/test/vendor/knative.dev/serving/cmd/networking/doc.go b/test/vendor/knative.dev/serving/cmd/networking/doc.go new file mode 100644 index 0000000000..8972140f43 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networking diff --git a/test/vendor/github.com/knative/serving/cmd/networking/istio/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/networking/istio/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/networking/istio/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/networking/istio/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/networking/istio/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/networking/istio/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/networking/istio/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/networking/istio/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/networking/istio/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/networking/istio/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/networking/istio/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/networking/istio/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/networking/istio/kodata/refs b/test/vendor/knative.dev/serving/cmd/networking/istio/kodata/refs new file mode 120000 index 0000000000..fe164fe40f --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/istio/kodata/refs @@ -0,0 +1 @@ +../../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/networking/istio/main.go b/test/vendor/knative.dev/serving/cmd/networking/istio/main.go new file mode 100644 index 0000000000..f690be3396 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/istio/main.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "knative.dev/serving/pkg/reconciler/ingress" + + // This defines the shared main for injected controllers. + "knative.dev/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("istiocontroller", ingress.NewController) +} diff --git a/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/HEAD new file mode 120000 index 0000000000..481bd4eff4 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/HEAD @@ -0,0 +1 @@ +../../../../.git/HEAD \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/LICENSE new file mode 120000 index 0000000000..1477615432 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/LICENSE @@ -0,0 +1 @@ +../../../../LICENSE \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/VENDOR-LICENSE new file mode 120000 index 0000000000..7322c09d95 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/nscert/kodata/VENDOR-LICENSE @@ -0,0 +1 @@ +../../../../third_party/VENDOR-LICENSE \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/networking/nscert/main.go b/test/vendor/knative.dev/serving/cmd/networking/nscert/main.go new file mode 100644 index 0000000000..8ef7988cf0 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/networking/nscert/main.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "knative.dev/serving/pkg/reconciler/nscert" + + // This defines the shared main for injected controllers. + "knative.dev/pkg/injection/sharedmain" +) + +func main() { + sharedmain.Main("nscontroller", nscert.NewController) +} diff --git a/test/vendor/knative.dev/serving/cmd/queue/OWNERS b/test/vendor/knative.dev/serving/cmd/queue/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/queue/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/github.com/knative/serving/cmd/webhook/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/queue/kodata/HEAD similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/webhook/kodata/HEAD rename to test/vendor/knative.dev/serving/cmd/queue/kodata/HEAD diff --git a/test/vendor/github.com/knative/serving/cmd/webhook/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/queue/kodata/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/webhook/kodata/LICENSE rename to test/vendor/knative.dev/serving/cmd/queue/kodata/LICENSE diff --git a/test/vendor/github.com/knative/serving/cmd/webhook/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/queue/kodata/VENDOR-LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/cmd/webhook/kodata/VENDOR-LICENSE rename to test/vendor/knative.dev/serving/cmd/queue/kodata/VENDOR-LICENSE diff --git a/test/vendor/knative.dev/serving/cmd/queue/kodata/refs b/test/vendor/knative.dev/serving/cmd/queue/kodata/refs new file mode 120000 index 0000000000..739d35bf96 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/queue/kodata/refs @@ -0,0 +1 @@ +../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/queue/main.go b/test/vendor/knative.dev/serving/cmd/queue/main.go new file mode 100644 index 0000000000..e924881fc2 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/queue/main.go @@ -0,0 +1,628 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/kelseyhightower/envconfig" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + pkglogging "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + "knative.dev/pkg/metrics" + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/profiling" + "knative.dev/pkg/signals" + "knative.dev/pkg/tracing" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/serving/pkg/activator" + activatorutil "knative.dev/serving/pkg/activator/util" + "knative.dev/serving/pkg/apis/networking" + pkghttp "knative.dev/serving/pkg/http" + "knative.dev/serving/pkg/logging" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/queue/health" + "knative.dev/serving/pkg/queue/readiness" + queuestats "knative.dev/serving/pkg/queue/stats" +) + +const ( + // Add enough buffer to not block request serving on stats collection + requestCountingQueueLength = 100 + + // Duration the /quitquitquit handler should wait before returning. + // This is to give Istio a little bit more time to remove the pod + // from its configuration and propagate that to all istio-proxies + // in the mesh. + quitSleepDuration = 20 * time.Second + + badProbeTemplate = "unexpected probe header value: %s" + + // Metrics' names (without component prefix). + requestCountN = "request_count" + responseTimeInMsecN = "request_latencies" + appRequestCountN = "app_request_count" + appResponseTimeInMsecN = "app_request_latencies" + queueDepthN = "queue_depth" + + healthURLTemplate = "http://127.0.0.1:%d" + // The 25 millisecond retry interval is an unscientific compromise between wanting to get + // started as early as possible while still wanting to give the container some breathing + // room to get up and running. + aggressivePollInterval = 25 * time.Millisecond + // reportingPeriod is the interval of time between reporting stats by queue proxy. + reportingPeriod = 1 * time.Second +) + +var ( + logger *zap.SugaredLogger + + // Metric counters. + requestCountM = stats.Int64( + requestCountN, + "The number of requests that are routed to queue-proxy", + stats.UnitDimensionless) + responseTimeInMsecM = stats.Float64( + responseTimeInMsecN, + "The response time in millisecond", + stats.UnitMilliseconds) + appRequestCountM = stats.Int64( + appRequestCountN, + "The number of requests that are routed to user-container", + stats.UnitDimensionless) + appResponseTimeInMsecM = stats.Float64( + appResponseTimeInMsecN, + "The response time in millisecond", + stats.UnitMilliseconds) + queueDepthM = stats.Int64( + queueDepthN, + "The current number of items in the serving and waiting queue, or not reported if unlimited concurrency.", + stats.UnitDimensionless) + + readinessProbeTimeout = flag.Int("probe-period", -1, "run readiness probe with given timeout") +) + +type config struct { + ContainerConcurrency int `split_words:"true" required:"true"` + QueueServingPort int `split_words:"true" required:"true"` + UserPort int `split_words:"true" required:"true"` + RevisionTimeoutSeconds int `split_words:"true" required:"true"` + ServingReadinessProbe string `split_words:"true" required:"true"` + EnableProfiling bool `split_words:"true"` // optional + + // Logging configuration + ServingLoggingConfig string `split_words:"true" required:"true"` + ServingLoggingLevel string `split_words:"true" required:"true"` + ServingRequestLogTemplate string `split_words:"true"` // optional + ServingEnableProbeRequestLog bool `split_words:"true"` // optional + + // Metrics configuration + ServingNamespace string `split_words:"true" required:"true"` + ServingRevision string `split_words:"true" required:"true"` + ServingConfiguration string `split_words:"true" required:"true"` + ServingPodIP string `split_words:"true" required:"true"` + ServingPod string `split_words:"true" required:"true"` + ServingService string `split_words:"true"` // optional + ServingRequestMetricsBackend string `split_words:"true"` // optional + + // /var/log configuration + EnableVarLogCollection bool `split_words:"true"` // optional + UserContainerName string `split_words:"true"` // optional + VarLogVolumeName string `split_words:"true"` // optional + InternalVolumePath string `split_words:"true"` // optional + + // Tracing configuration + TracingConfigDebug bool `split_words:"true"` // optional + TracingConfigBackend tracingconfig.BackendType `split_words:"true"` // optional + TracingConfigSampleRate float64 `split_words:"true"` // optional + TracingConfigZipkinEndpoint string `split_words:"true"` // optional + TracingConfigStackdriverProjectID string `split_words:"true"` // optional +} + +// Make handler a closure for testing. +func proxyHandler(reqChan chan queue.ReqEvent, breaker *queue.Breaker, tracingEnabled bool, next http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if network.IsKubeletProbe(r) { + next.ServeHTTP(w, r) + return + } + + if tracingEnabled { + proxyCtx, proxySpan := trace.StartSpan(r.Context(), "proxy") + r = r.WithContext(proxyCtx) + defer proxySpan.End() + } + + // Metrics for autoscaling. + in, out := queue.ReqIn, queue.ReqOut + if activator.Name == network.KnativeProxyHeader(r) { + in, out = queue.ProxiedIn, queue.ProxiedOut + } + reqChan <- queue.ReqEvent{Time: time.Now(), EventType: in} + defer func() { + reqChan <- queue.ReqEvent{Time: time.Now(), EventType: out} + }() + network.RewriteHostOut(r) + + // Enforce queuing and concurrency limits. + if breaker != nil { + if err := breaker.Maybe(r.Context(), func() { + next.ServeHTTP(w, r) + }); err != nil { + switch err { + case context.DeadlineExceeded, queue.ErrRequestQueueFull: + http.Error(w, err.Error(), http.StatusServiceUnavailable) + default: + w.WriteHeader(http.StatusInternalServerError) + } + } + } else { + next.ServeHTTP(w, r) + } + } +} + +func knativeProbeHandler(healthState *health.State, prober func() bool, isAggressive bool, tracingEnabled bool, next http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + ph := network.KnativeProbeHeader(r) + + if ph == "" { + next.ServeHTTP(w, r) + return + } + + var probeSpan *trace.Span + if tracingEnabled { + _, probeSpan = trace.StartSpan(r.Context(), "probe") + defer probeSpan.End() + } + + if ph != queue.Name { + http.Error(w, fmt.Sprintf(badProbeTemplate, ph), http.StatusBadRequest) + probeSpan.Annotate([]trace.Attribute{ + trace.StringAttribute("queueproxy.probe.error", fmt.Sprintf(badProbeTemplate, ph))}, "error") + return + } + + if prober == nil { + http.Error(w, "no probe", http.StatusInternalServerError) + probeSpan.Annotate([]trace.Attribute{ + trace.StringAttribute("queueproxy.probe.error", "no probe")}, "error") + return + } + + healthState.HandleHealthProbe(func() bool { + if !prober() { + probeSpan.Annotate([]trace.Attribute{ + trace.StringAttribute("queueproxy.probe.error", "container not ready")}, "error") + return false + } + return true + }, isAggressive, w) + } +} + +func probeQueueHealthPath(port int, timeoutSeconds int) error { + if port <= 0 { + return fmt.Errorf("port must be a positive value, got %d", port) + } + + url := fmt.Sprintf(healthURLTemplate, port) + timeoutDuration := readiness.PollTimeout + if timeoutSeconds != 0 { + timeoutDuration = time.Duration(timeoutSeconds) * time.Second + } + httpClient := &http.Client{ + Transport: &http.Transport{ + // Do not use the cached connection + DisableKeepAlives: true, + }, + Timeout: timeoutDuration, + } + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) + defer cancel() + stopCh := ctx.Done() + + var lastErr error + // Using PollImmediateUntil instead of PollImmediate because if timeout is reached while waiting for first + // invocation of conditionFunc, it exits immediately without trying for a second time. + timeoutErr := wait.PollImmediateUntil(aggressivePollInterval, func() (bool, error) { + var req *http.Request + req, lastErr = http.NewRequest(http.MethodGet, url, nil) + if lastErr != nil { + // Return nil error for retrying + return false, nil + } + // Add the header to indicate this is a probe request. + req.Header.Add(network.ProbeHeaderName, queue.Name) + req.Header.Add(network.UserAgentKey, network.QueueProxyUserAgent) + res, lastErr := httpClient.Do(req) + if lastErr != nil { + // Return nil error for retrying + return false, nil + } + defer res.Body.Close() + return health.IsHTTPProbeReady(res), nil + }, stopCh) + + if lastErr != nil { + return fmt.Errorf("failed to probe: %w", lastErr) + } + + // An http.StatusOK was never returned during probing + if timeoutErr != nil { + return errors.New("probe returned not ready") + } + + return nil +} + +func main() { + flag.Parse() + + // Parse the environment. + var env config + if err := envconfig.Process("", &env); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + // If this is set, we run as a standalone binary to probe the queue-proxy. + if *readinessProbeTimeout >= 0 { + if err := probeQueueHealthPath(env.QueueServingPort, *readinessProbeTimeout); err != nil { + // used instead of the logger to produce a concise event message + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + } + + // Setup the logger. + logger, _ = pkglogging.NewLogger(env.ServingLoggingConfig, env.ServingLoggingLevel) + logger = logger.Named("queueproxy") + defer flush(logger) + + logger = logger.With( + zap.String(logkey.Key, types.NamespacedName{ + Namespace: env.ServingNamespace, + Name: env.ServingRevision, + }.String()), + zap.String(logkey.Pod, env.ServingPod)) + + if err := validateEnv(env); err != nil { + logger.Fatal(err.Error()) + } + + // Report stats on Go memory usage every 30 seconds. + msp := metrics.NewMemStatsAll() + msp.Start(context.Background(), 30*time.Second) + if err := view.Register(msp.DefaultViews()...); err != nil { + logger.Fatalw("Error exporting go memstats view", zap.Error(err)) + } + + // Setup reporters and processes to handle stat reporting. + promStatReporter, err := queue.NewPrometheusStatsReporter( + env.ServingNamespace, env.ServingConfiguration, env.ServingRevision, + env.ServingPod, reportingPeriod) + if err != nil { + logger.Fatalw("Failed to create stats reporter", zap.Error(err)) + } + + reqChan := make(chan queue.ReqEvent, requestCountingQueueLength) + defer close(reqChan) + + reportTicker := time.NewTicker(reportingPeriod) + defer reportTicker.Stop() + + queue.NewStats(time.Now(), reqChan, reportTicker.C, promStatReporter.Report) + + // Setup probe to run for checking user-application healthiness. + probe := buildProbe(env.ServingReadinessProbe) + healthState := &health.State{} + + server := buildServer(env, healthState, probe, reqChan, logger) + adminServer := buildAdminServer(healthState) + metricsServer := buildMetricsServer(promStatReporter) + + servers := map[string]*http.Server{ + "main": server, + "admin": adminServer, + "metrics": metricsServer, + } + + if env.EnableProfiling { + servers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) + } + + errCh := make(chan error, len(servers)) + for name, server := range servers { + go func(name string, s *http.Server) { + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errCh <- fmt.Errorf("%s server failed: %w", name, err) + } + }(name, server) + } + + // Setup /var/log. + // Logic that isn't required to be executed before the critical path + // and should be started last to not impact start up latency + go func() { + if env.EnableVarLogCollection { + createVarLogLink(env) + } + }() + + // Blocks until we actually receive a TERM signal or one of the servers + // exit unexpectedly. We fold both signals together because we only want + // to act on the first of those to reach here. + select { + case err := <-errCh: + logger.Errorw("Failed to bring up queue-proxy, shutting down.", zap.Error(err)) + flush(logger) + os.Exit(1) + case <-signals.SetupSignalHandler(): + logger.Info("Received TERM signal, attempting to gracefully shutdown servers.") + healthState.Shutdown(func() { + // Give Istio time to sync our "not ready" state. + time.Sleep(quitSleepDuration) + + // Calling server.Shutdown() allows pending requests to + // complete, while no new work is accepted. + if err := server.Shutdown(context.Background()); err != nil { + logger.Errorw("Failed to shutdown proxy server", zap.Error(err)) + } + // Removing the main server from the shutdown logic as we've already shut it down. + delete(servers, "main") + }) + + flush(logger) + for serverName, srv := range servers { + if err := srv.Shutdown(context.Background()); err != nil { + logger.Errorw("Failed to shutdown server", zap.String("server", serverName), zap.Error(err)) + } + } + } +} + +func validateEnv(env config) error { + if !env.EnableVarLogCollection { + return nil + } + + if env.VarLogVolumeName == "" { + return errors.New("VAR_LOG_VOLUME_NAME must be specified when ENABLE_VAR_LOG_COLLECTION is true") + } + if env.InternalVolumePath == "" { + return errors.New("INTERNAL_VOLUME_PATH must be specified when ENABLE_VAR_LOG_COLLECTION is true") + } + + return nil +} + +func buildProbe(probeJSON string) *readiness.Probe { + coreProbe, err := readiness.DecodeProbe(probeJSON) + if err != nil { + logger.Fatalw("Queue container failed to parse readiness probe", zap.Error(err)) + } + return readiness.NewProbe(coreProbe) +} + +func buildServer(env config, healthState *health.State, rp *readiness.Probe, reqChan chan queue.ReqEvent, + logger *zap.SugaredLogger) *http.Server { + target := &url.URL{ + Scheme: "http", + Host: net.JoinHostPort("127.0.0.1", strconv.Itoa(env.UserPort)), + } + + httpProxy := httputil.NewSingleHostReverseProxy(target) + httpProxy.Transport = buildTransport(env, logger) + httpProxy.ErrorHandler = pkgnet.ErrorHandler(logger) + httpProxy.BufferPool = network.NewBufferPool() + httpProxy.FlushInterval = -1 + activatorutil.SetupHeaderPruning(httpProxy) + + breaker := buildBreaker(env) + metricsSupported := supportsMetrics(env, logger) + tracingEnabled := env.TracingConfigBackend != tracingconfig.None + + // Create queue handler chain. + // Note: innermost handlers are specified first, ie. the last handler in the chain will be executed first. + var composedHandler http.Handler = httpProxy + if metricsSupported { + composedHandler = pushRequestMetricHandler(httpProxy, appRequestCountM, appResponseTimeInMsecM, + queueDepthM, breaker, env) + } + composedHandler = proxyHandler(reqChan, breaker, tracingEnabled, composedHandler) + composedHandler = queue.ForwardedShimHandler(composedHandler) + composedHandler = queue.TimeToFirstByteTimeoutHandler(composedHandler, + time.Duration(env.RevisionTimeoutSeconds)*time.Second, "request timeout") + composedHandler = pushRequestLogHandler(composedHandler, env) + + if metricsSupported { + composedHandler = pushRequestMetricHandler(composedHandler, requestCountM, responseTimeInMsecM, + nil /*queueDepthM*/, nil /*breaker*/, env) + } + composedHandler = tracing.HTTPSpanMiddleware(composedHandler) + + composedHandler = knativeProbeHandler(healthState, rp.ProbeContainer, rp.IsAggressive(), tracingEnabled, composedHandler) + composedHandler = network.NewProbeHandler(composedHandler) + + return pkgnet.NewServer(":"+strconv.Itoa(env.QueueServingPort), composedHandler) +} + +func buildTransport(env config, logger *zap.SugaredLogger) http.RoundTripper { + if env.TracingConfigBackend == tracingconfig.None { + return pkgnet.AutoTransport + } + + oct := tracing.NewOpenCensusTracer(tracing.WithExporter(env.ServingPod, logger)) + oct.ApplyConfig(&tracingconfig.Config{ + Backend: env.TracingConfigBackend, + Debug: env.TracingConfigDebug, + ZipkinEndpoint: env.TracingConfigZipkinEndpoint, + StackdriverProjectID: env.TracingConfigStackdriverProjectID, + SampleRate: env.TracingConfigSampleRate, + }) + + return &ochttp.Transport{ + Base: pkgnet.AutoTransport, + } +} + +func buildBreaker(env config) *queue.Breaker { + if env.ContainerConcurrency < 1 { + return nil + } + + // We set the queue depth to be equal to the container concurrency * 10 to + // allow the autoscaler time to react. + queueDepth := env.ContainerConcurrency * 10 + params := queue.BreakerParams{QueueDepth: queueDepth, MaxConcurrency: env.ContainerConcurrency, InitialCapacity: env.ContainerConcurrency} + logger.Infof("Queue container is starting with %#v", params) + + return queue.NewBreaker(params) +} + +func supportsMetrics(env config, logger *zap.SugaredLogger) bool { + // Setup request metrics reporting for end-user metrics. + if env.ServingRequestMetricsBackend == "" { + return false + } + + if err := setupMetricsExporter(env.ServingRequestMetricsBackend); err != nil { + logger.Errorw("Error setting up request metrics exporter. Request metrics will be unavailable.", zap.Error(err)) + return false + } + + return true +} + +func buildAdminServer(healthState *health.State) *http.Server { + adminMux := http.NewServeMux() + adminMux.HandleFunc(queue.RequestQueueDrainPath, healthState.DrainHandlerFunc()) + + return &http.Server{ + Addr: ":" + strconv.Itoa(networking.QueueAdminPort), + Handler: adminMux, + } +} + +func buildMetricsServer(promStatReporter *queue.PrometheusStatsReporter) *http.Server { + metricsMux := http.NewServeMux() + metricsMux.Handle("/metrics", promStatReporter.Handler()) + return &http.Server{ + Addr: ":" + strconv.Itoa(networking.AutoscalingQueueMetricsPort), + Handler: metricsMux, + } +} + +// createVarLogLink creates a symlink allowing the fluentd daemon set to capture the +// logs from the user container /var/log. See fluentd config for more details. +func createVarLogLink(env config) { + link := strings.Join([]string{env.ServingNamespace, env.ServingPod, env.UserContainerName}, "_") + target := path.Join("..", env.VarLogVolumeName) + source := path.Join(env.InternalVolumePath, link) + if err := os.Symlink(target, source); err != nil { + logger.Errorw("Failed to create /var/log symlink. Log collection will not work.", zap.Error(err)) + } +} + +func pushRequestLogHandler(currentHandler http.Handler, env config) http.Handler { + if env.ServingRequestLogTemplate == "" { + return currentHandler + } + + revInfo := &pkghttp.RequestLogRevision{ + Name: env.ServingRevision, + Namespace: env.ServingNamespace, + Service: env.ServingService, + Configuration: env.ServingConfiguration, + PodName: env.ServingPod, + PodIP: env.ServingPodIP, + } + handler, err := pkghttp.NewRequestLogHandler(currentHandler, logging.NewSyncFileWriter(os.Stdout), env.ServingRequestLogTemplate, + pkghttp.RequestLogTemplateInputGetterFromRevision(revInfo), env.ServingEnableProbeRequestLog) + + if err != nil { + logger.Errorw("Error setting up request logger. Request logs will be unavailable.", zap.Error(err)) + return currentHandler + } + return handler +} + +func pushRequestMetricHandler(currentHandler http.Handler, countMetric *stats.Int64Measure, + latencyMetric *stats.Float64Measure, queueDepthMetric *stats.Int64Measure, breaker *queue.Breaker, env config) http.Handler { + r, err := queuestats.NewStatsReporter(env.ServingNamespace, env.ServingService, env.ServingConfiguration, env.ServingRevision, env.ServingPod, countMetric, latencyMetric, queueDepthMetric) + if err != nil { + logger.Errorw("Error setting up request metrics reporter. Request metrics will be unavailable.", zap.Error(err)) + return currentHandler + } + + handler, err := queue.NewRequestMetricHandler(currentHandler, r, breaker) + if err != nil { + logger.Errorw("Error setting up request metrics handler. Request metrics will be unavailable.", zap.Error(err)) + return currentHandler + } + return handler +} + +func setupMetricsExporter(backend string) error { + // Set up OpenCensus exporter. + // NOTE: We use revision as the component instead of queue because queue is + // implementation specific. The current metrics are request relative. Using + // revision is reasonable. + // TODO(yanweiguo): add the ability to emit metrics with names not combined + // to component. + ops := metrics.ExporterOptions{ + Domain: metrics.Domain(), + Component: "revision", + PrometheusPort: networking.UserQueueMetricsPort, + ConfigMap: map[string]string{ + metrics.BackendDestinationKey: backend, + }, + } + return metrics.UpdateExporter(ops, logger) +} + +func flush(logger *zap.SugaredLogger) { + logger.Sync() + os.Stdout.Sync() + os.Stderr.Sync() + metrics.FlushExporter() +} diff --git a/test/vendor/knative.dev/serving/cmd/queue/main_test.go b/test/vendor/knative.dev/serving/cmd/queue/main_test.go new file mode 100644 index 0000000000..df8fb04bff --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/queue/main_test.go @@ -0,0 +1,452 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.opencensus.io/plugin/ochttp" + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/ptr" + "knative.dev/pkg/tracing" + tracingconfig "knative.dev/pkg/tracing/config" + tracetesting "knative.dev/pkg/tracing/testing" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/queue/health" +) + +const wantHost = "a-better-host.com" + +func TestHandlerReqEvent(t *testing.T) { + var httpHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(activator.RevisionHeaderName) != "" { + w.WriteHeader(http.StatusBadRequest) + return + } + + if r.Header.Get(activator.RevisionHeaderNamespace) != "" { + w.WriteHeader(http.StatusBadRequest) + return + } + + if got, want := r.Host, wantHost; got != want { + t.Errorf("Host header = %q, want: %q", got, want) + } + if got, want := r.Header.Get(network.OriginalHostHeader), ""; got != want { + t.Errorf("%s header was preserved", network.OriginalHostHeader) + } + + w.WriteHeader(http.StatusOK) + } + + server := httptest.NewServer(httpHandler) + serverURL, _ := url.Parse(server.URL) + + defer server.Close() + proxy := httputil.NewSingleHostReverseProxy(serverURL) + + params := queue.BreakerParams{QueueDepth: 10, MaxConcurrency: 10, InitialCapacity: 10} + breaker := queue.NewBreaker(params) + reqChan := make(chan queue.ReqEvent, 10) + h := proxyHandler(reqChan, breaker, true /*tracingEnabled*/, proxy) + + writer := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + + // Verify the Original host header processing. + req.Host = "nimporte.pas" + req.Header.Set(network.OriginalHostHeader, wantHost) + + req.Header.Set(network.ProxyHeaderName, activator.Name) + h(writer, req) + select { + case e := <-reqChan: + if e.EventType != queue.ProxiedIn { + t.Errorf("Want: %v, got: %v\n", queue.ReqIn, e.EventType) + } + case <-time.After(5 * time.Second): + t.Fatal("Timed out waiting for an event to be intercepted") + } +} + +func TestProbeHandler(t *testing.T) { + testcases := []struct { + name string + prober func() bool + wantCode int + wantBody string + requestHeader string + }{{ + name: "unexpected probe header", + prober: func() bool { return true }, + wantCode: http.StatusBadRequest, + wantBody: fmt.Sprintf(badProbeTemplate, "test-probe"), + requestHeader: "test-probe", + }, { + name: "true probe function", + prober: func() bool { return true }, + wantCode: http.StatusOK, + wantBody: queue.Name, + requestHeader: queue.Name, + }, { + name: "nil probe function", + prober: nil, + wantCode: http.StatusInternalServerError, + wantBody: "no probe", + requestHeader: queue.Name, + }, { + name: "false probe function", + prober: func() bool { return false }, + wantCode: http.StatusServiceUnavailable, + wantBody: "queue not ready", + requestHeader: queue.Name, + }} + + healthState := &health.State{} + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + writer := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + req.Header.Set(network.ProbeHeaderName, tc.requestHeader) + + h := knativeProbeHandler(healthState, tc.prober, true /* isAggresive*/, true /*tracingEnabled*/, nil) + h(writer, req) + + if got, want := writer.Code, tc.wantCode; got != want { + t.Errorf("probe status = %v, want: %v", got, want) + } + if got, want := strings.TrimSpace(writer.Body.String()), tc.wantBody; got != want { + // \r\n might be inserted, etc. + t.Errorf("probe body = %q, want: %q, diff: %s", got, want, cmp.Diff(got, want)) + } + }) + } +} + +func TestCreateVarLogLink(t *testing.T) { + dir, err := ioutil.TempDir("", "TestCreateVarLogLink") + if err != nil { + t.Errorf("Failed to created temporary directory: %v", err) + } + defer os.RemoveAll(dir) + var env = config{ + ServingNamespace: "default", + ServingPod: "service-7f97f9465b-5kkm5", + UserContainerName: "user-container", + VarLogVolumeName: "knative-var-log", + InternalVolumePath: dir, + } + createVarLogLink(env) + + source := path.Join(dir, "default_service-7f97f9465b-5kkm5_user-container") + want := "../knative-var-log" + got, err := os.Readlink(source) + if err != nil { + t.Errorf("Failed to read symlink: %v", err) + } + if got != want { + t.Errorf("Incorrect symlink = %q, want %q, diff: %s", got, want, cmp.Diff(got, want)) + } +} + +func TestProbeQueueInvalidPort(t *testing.T) { + const port = 0 // invalid port + + if err := probeQueueHealthPath(port, 1); err == nil { + t.Error("Expected error, got nil") + } else if diff := cmp.Diff(err.Error(), "port must be a positive value, got 0"); diff != "" { + t.Errorf("Unexpected not ready message: %s", diff) + } +} + +func TestProbeQueueConnectionFailure(t *testing.T) { + port := 12345 // some random port (that's not listening) + + if err := probeQueueHealthPath(port, 1); err == nil { + t.Error("Expected error, got nil") + } +} + +func TestProbeQueueNotReady(t *testing.T) { + queueProbed := ptr.Int32(0) + ts := newProbeTestServer(func(w http.ResponseWriter) { + atomic.AddInt32(queueProbed, 1) + w.WriteHeader(http.StatusBadRequest) + }) + + defer ts.Close() + + u, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("%s is not a valid URL: %v", ts.URL, err) + } + + port, err := strconv.Atoi(u.Port()) + if err != nil { + t.Fatalf("Failed to convert port(%s) to int: %v", u.Port(), err) + } + + err = probeQueueHealthPath(port, 1) + + if diff := cmp.Diff(err.Error(), "probe returned not ready"); diff != "" { + t.Errorf("Unexpected not ready message: %s", diff) + } + + if atomic.LoadInt32(queueProbed) == 0 { + t.Errorf("Expected the queue proxy server to be probed") + } +} + +func TestProbeQueueReady(t *testing.T) { + queueProbed := ptr.Int32(0) + ts := newProbeTestServer(func(w http.ResponseWriter) { + atomic.AddInt32(queueProbed, 1) + w.WriteHeader(http.StatusOK) + }) + + defer ts.Close() + + u, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("%s is not a valid URL: %v", ts.URL, err) + } + + port, err := strconv.Atoi(u.Port()) + if err != nil { + t.Fatalf("Failed to convert port(%s) to int: %v", u.Port(), err) + } + + if err = probeQueueHealthPath(port, 1); err != nil { + t.Errorf("probeQueueHealthPath(%d, 1s) = %s", port, err) + } + + if atomic.LoadInt32(queueProbed) == 0 { + t.Errorf("Expected the queue proxy server to be probed") + } +} + +func TestProbeQueueTimeout(t *testing.T) { + queueProbed := ptr.Int32(0) + ts := newProbeTestServer(func(w http.ResponseWriter) { + atomic.AddInt32(queueProbed, 1) + time.Sleep(2 * time.Second) + w.WriteHeader(http.StatusOK) + }) + + defer ts.Close() + + u, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("%s is not a valid URL: %v", ts.URL, err) + } + + port, err := strconv.Atoi(u.Port()) + if err != nil { + t.Fatalf("failed to convert port(%s) to int", u.Port()) + } + + timeout := 1 + if err = probeQueueHealthPath(port, timeout); err == nil { + t.Errorf("Expected probeQueueHealthPath(%d, %v) to return timeout error", port, timeout) + } + + ts.Close() + + if atomic.LoadInt32(queueProbed) == 0 { + t.Errorf("Expected the queue proxy server to be probed") + } +} + +func TestProbeQueueDelayedReady(t *testing.T) { + count := ptr.Int32(0) + ts := newProbeTestServer(func(w http.ResponseWriter) { + if atomic.AddInt32(count, 1) < 9 { + w.WriteHeader(http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) + }) + + defer ts.Close() + + u, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("%s is not a valid URL: %v", ts.URL, err) + } + + port, err := strconv.Atoi(u.Port()) + if err != nil { + t.Fatalf("Failed to convert port(%s) to int: %v", u.Port(), err) + } + + timeout := 0 + if err := probeQueueHealthPath(port, timeout); err != nil { + t.Errorf("probeQueueHealthPath(%d) = %s", port, err) + } +} + +func TestQueueTraceSpans(t *testing.T) { + testcases := []struct { + name string + prober func() bool + wantSpans int + requestHeader string + probeWillFail bool + probeTrace bool + enableTrace bool + }{{ + name: "proxy trace", + prober: func() bool { return true }, + wantSpans: 2, + requestHeader: "", + probeWillFail: false, + probeTrace: false, + enableTrace: true, + }, { + name: "true prober function with probe trace", + prober: func() bool { return true }, + wantSpans: 1, + requestHeader: queue.Name, + probeWillFail: false, + probeTrace: true, + enableTrace: true, + }, { + name: "unexpected probe header", + prober: func() bool { return true }, + wantSpans: 1, + requestHeader: "test-probe", + probeWillFail: true, + probeTrace: true, + enableTrace: true, + }, { + name: "nil prober function", + prober: nil, + wantSpans: 1, + requestHeader: queue.Name, + probeWillFail: true, + probeTrace: true, + enableTrace: true, + }, { + name: "false prober function", + prober: func() bool { return false }, + wantSpans: 1, + requestHeader: queue.Name, + probeWillFail: true, + probeTrace: true, + enableTrace: true, + }, { + name: "no traces", + prober: func() bool { return true }, + wantSpans: 0, + requestHeader: queue.Name, + probeWillFail: false, + probeTrace: false, + enableTrace: false, + }} + + healthState := &health.State{} + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // Create tracer with reporter recorder + reporter, co := tracetesting.FakeZipkinExporter() + defer reporter.Close() + oct := tracing.NewOpenCensusTracer(co) + defer oct.Finish() + + cfg := tracingconfig.Config{ + Backend: tracingconfig.Zipkin, + Debug: true, + } + if !tc.enableTrace { + cfg.Backend = tracingconfig.None + } + if err := oct.ApplyConfig(&cfg); err != nil { + t.Errorf("Failed to apply tracer config: %v", err) + } + + writer := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + + if !tc.probeTrace { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + serverURL, _ := url.Parse(server.URL) + + proxy := httputil.NewSingleHostReverseProxy(serverURL) + params := queue.BreakerParams{QueueDepth: 10, MaxConcurrency: 10, InitialCapacity: 10} + breaker := queue.NewBreaker(params) + reqChan := make(chan queue.ReqEvent, 10) + + proxy.Transport = &ochttp.Transport{ + Base: pkgnet.AutoTransport, + } + + h := proxyHandler(reqChan, breaker, true /*tracingEnabled*/, proxy) + h(writer, req) + } else { + h := knativeProbeHandler(healthState, tc.prober, true /* isAggresive*/, true /*tracingEnabled*/, nil) + req.Header.Set(network.ProbeHeaderName, tc.requestHeader) + h(writer, req) + } + + gotSpans := reporter.Flush() + if len(gotSpans) != tc.wantSpans { + t.Errorf("Got %d spans, expected %d", len(gotSpans), tc.wantSpans) + } + spanNames := []string{"probe", "/", "proxy"} + if !tc.probeTrace { + spanNames = spanNames[1:] + } + for i, spanName := range spanNames[0:tc.wantSpans] { + if gotSpans[i].Name != spanName { + t.Errorf("Got span %d named %q, expected %q", i, gotSpans[i].Name, spanName) + } + if tc.probeWillFail { + if len(gotSpans[i].Annotations) == 0 { + t.Error("Expected error as value for failed span Annotation, got empty Annotation") + } else if gotSpans[i].Annotations[0].Value != "error" { + t.Errorf("Expected error as value for failed span Annotation, got %q", gotSpans[i].Annotations[0].Value) + } + } + } + }) + } +} + +func newProbeTestServer(f func(w http.ResponseWriter)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(network.UserAgentKey) == network.QueueProxyUserAgent { + f(w) + } + })) +} diff --git a/test/vendor/knative.dev/serving/cmd/webhook/OWNERS b/test/vendor/knative.dev/serving/cmd/webhook/OWNERS new file mode 100644 index 0000000000..e57e66dd50 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/webhook/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-api-approvers + +reviewers: +- serving-api-reviewers + +labels: +- area/API diff --git a/test/vendor/knative.dev/serving/cmd/webhook/kodata/HEAD b/test/vendor/knative.dev/serving/cmd/webhook/kodata/HEAD new file mode 120000 index 0000000000..8f63681d36 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/webhook/kodata/HEAD @@ -0,0 +1 @@ +../../../.git/HEAD \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/webhook/kodata/LICENSE b/test/vendor/knative.dev/serving/cmd/webhook/kodata/LICENSE new file mode 120000 index 0000000000..5853aaea53 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/webhook/kodata/LICENSE @@ -0,0 +1 @@ +../../../LICENSE \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/webhook/kodata/VENDOR-LICENSE b/test/vendor/knative.dev/serving/cmd/webhook/kodata/VENDOR-LICENSE new file mode 120000 index 0000000000..3cc8976451 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/webhook/kodata/VENDOR-LICENSE @@ -0,0 +1 @@ +../../../third_party/VENDOR-LICENSE \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/webhook/kodata/refs b/test/vendor/knative.dev/serving/cmd/webhook/kodata/refs new file mode 120000 index 0000000000..739d35bf96 --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/webhook/kodata/refs @@ -0,0 +1 @@ +../../../.git/refs \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/cmd/webhook/main.go b/test/vendor/knative.dev/serving/cmd/webhook/main.go new file mode 100644 index 0000000000..c0908bc81a --- /dev/null +++ b/test/vendor/knative.dev/serving/cmd/webhook/main.go @@ -0,0 +1,166 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/injection/sharedmain" + "knative.dev/pkg/logging" + "knative.dev/pkg/metrics" + "knative.dev/pkg/signals" + "knative.dev/pkg/webhook" + "knative.dev/pkg/webhook/certificates" + "knative.dev/pkg/webhook/configmaps" + "knative.dev/pkg/webhook/resourcesemantics" + "knative.dev/pkg/webhook/resourcesemantics/defaulting" + "knative.dev/pkg/webhook/resourcesemantics/validation" + + // resource validation types + autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + net "knative.dev/serving/pkg/apis/networking/v1alpha1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + + // config validation constructors + tracingconfig "knative.dev/pkg/tracing/config" + defaultconfig "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/gc" + metricsconfig "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + certconfig "knative.dev/serving/pkg/reconciler/certificate/config" + istioconfig "knative.dev/serving/pkg/reconciler/ingress/config" + domainconfig "knative.dev/serving/pkg/reconciler/route/config" +) + +var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ + v1alpha1.SchemeGroupVersion.WithKind("Revision"): &v1alpha1.Revision{}, + v1alpha1.SchemeGroupVersion.WithKind("Configuration"): &v1alpha1.Configuration{}, + v1alpha1.SchemeGroupVersion.WithKind("Route"): &v1alpha1.Route{}, + v1alpha1.SchemeGroupVersion.WithKind("Service"): &v1alpha1.Service{}, + v1beta1.SchemeGroupVersion.WithKind("Revision"): &v1beta1.Revision{}, + v1beta1.SchemeGroupVersion.WithKind("Configuration"): &v1beta1.Configuration{}, + v1beta1.SchemeGroupVersion.WithKind("Route"): &v1beta1.Route{}, + v1beta1.SchemeGroupVersion.WithKind("Service"): &v1beta1.Service{}, + v1.SchemeGroupVersion.WithKind("Revision"): &v1.Revision{}, + v1.SchemeGroupVersion.WithKind("Configuration"): &v1.Configuration{}, + v1.SchemeGroupVersion.WithKind("Route"): &v1.Route{}, + v1.SchemeGroupVersion.WithKind("Service"): &v1.Service{}, + + autoscalingv1alpha1.SchemeGroupVersion.WithKind("PodAutoscaler"): &autoscalingv1alpha1.PodAutoscaler{}, + autoscalingv1alpha1.SchemeGroupVersion.WithKind("Metric"): &autoscalingv1alpha1.Metric{}, + + net.SchemeGroupVersion.WithKind("Certificate"): &net.Certificate{}, + net.SchemeGroupVersion.WithKind("Ingress"): &net.Ingress{}, + net.SchemeGroupVersion.WithKind("ServerlessService"): &net.ServerlessService{}, +} + +func NewDefaultingAdmissionController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + // Decorate contexts with the current state of the config. + store := defaultconfig.NewStore(logging.FromContext(ctx).Named("config-store")) + store.WatchConfigs(cmw) + + return defaulting.NewAdmissionController(ctx, + + // Name of the resource webhook. + "webhook.serving.knative.dev", + + // The path on which to serve the webhook. + "/defaulting", + + // The resources to validate and default. + types, + + // A function that infuses the context passed to Validate/SetDefaults with custom metadata. + func(ctx context.Context) context.Context { + return v1.WithUpgradeViaDefaulting(store.ToContext(ctx)) + }, + + // Whether to disallow unknown fields. + true, + ) +} + +func NewValidationAdmissionController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + return validation.NewAdmissionController(ctx, + + // Name of the resource webhook. + "validation.webhook.serving.knative.dev", + + // The path on which to serve the webhook. + "/resource-validation", + + // The resources to validate and default. + types, + + // A function that infuses the context passed to Validate/SetDefaults with custom metadata. + func(ctx context.Context) context.Context { + return ctx + }, + + // Whether to disallow unknown fields. + true, + ) +} + +func NewConfigValidationController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + return configmaps.NewAdmissionController(ctx, + + // Name of the configmap webhook. + "config.webhook.serving.knative.dev", + + // The path on which to serve the webhook. + "/config-validation", + + // The configmaps to validate. + configmap.Constructors{ + tracingconfig.ConfigName: tracingconfig.NewTracingConfigFromConfigMap, + autoscaler.ConfigName: autoscaler.NewConfigFromConfigMap, + certconfig.CertManagerConfigName: certconfig.NewCertManagerConfigFromConfigMap, + gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx), + network.ConfigName: network.NewConfigFromConfigMap, + istioconfig.IstioConfigName: istioconfig.NewIstioFromConfigMap, + deployment.ConfigName: deployment.NewConfigFromConfigMap, + metrics.ConfigMapName(): metricsconfig.NewObservabilityConfigFromConfigMap, + logging.ConfigMapName(): logging.NewConfigFromConfigMap, + domainconfig.DomainConfigName: domainconfig.NewDomainFromConfigMap, + defaultconfig.DefaultsConfigName: defaultconfig.NewDefaultsConfigFromConfigMap, + }, + ) +} + +func main() { + // Set up a signal context with our webhook options + ctx := webhook.WithOptions(signals.NewContext(), webhook.Options{ + ServiceName: "webhook", + Port: 8443, + SecretName: "webhook-certs", + }) + + sharedmain.MainWithContext(ctx, "webhook", + certificates.NewController, + NewDefaultingAdmissionController, + NewValidationAdmissionController, + NewConfigValidationController, + ) +} diff --git a/test/vendor/knative.dev/serving/code-of-conduct.md b/test/vendor/knative.dev/serving/code-of-conduct.md new file mode 100644 index 0000000000..5f04b3187c --- /dev/null +++ b/test/vendor/knative.dev/serving/code-of-conduct.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at +knative-code-of-conduct@googlegroups.com. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/test/vendor/knative.dev/serving/community/README.md b/test/vendor/knative.dev/serving/community/README.md new file mode 100644 index 0000000000..875790e516 --- /dev/null +++ b/test/vendor/knative.dev/serving/community/README.md @@ -0,0 +1,5 @@ +# Knative Community + +For community information, see +[Knative Contributing](https://www.knative.dev/contributing/) on the +[https://www.knative.dev](https://www.knative.dev/) site. diff --git a/test/vendor/knative.dev/serving/config/100-namespace.yaml b/test/vendor/knative.dev/serving/config/100-namespace.yaml new file mode 120000 index 0000000000..edab691b93 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/100-namespace.yaml @@ -0,0 +1 @@ +core/100-namespace.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-addressable-resolvers-clusterrole.yaml b/test/vendor/knative.dev/serving/config/200-addressable-resolvers-clusterrole.yaml new file mode 120000 index 0000000000..dcd8f99259 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-addressable-resolvers-clusterrole.yaml @@ -0,0 +1 @@ +core/rbac/200-addressable-resolvers-clusterrole.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-clusterrole-certmanager.yaml b/test/vendor/knative.dev/serving/config/200-clusterrole-certmanager.yaml new file mode 120000 index 0000000000..8eddd094dd --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-clusterrole-certmanager.yaml @@ -0,0 +1 @@ +cert-manager/200-clusterrole.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-clusterrole-istio.yaml b/test/vendor/knative.dev/serving/config/200-clusterrole-istio.yaml new file mode 120000 index 0000000000..8b996e07e9 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-clusterrole-istio.yaml @@ -0,0 +1 @@ +istio-ingress/200-clusterrole.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-clusterrole-metrics.yaml b/test/vendor/knative.dev/serving/config/200-clusterrole-metrics.yaml new file mode 120000 index 0000000000..41ac79309d --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-clusterrole-metrics.yaml @@ -0,0 +1 @@ +hpa-autoscaling/200-clusterrole-metrics.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-clusterrole-namespaced.yaml b/test/vendor/knative.dev/serving/config/200-clusterrole-namespaced.yaml new file mode 120000 index 0000000000..f555e4e9a6 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-clusterrole-namespaced.yaml @@ -0,0 +1 @@ +core/rbac/200-clusterrole-namespaced.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-clusterrole.yaml b/test/vendor/knative.dev/serving/config/200-clusterrole.yaml new file mode 120000 index 0000000000..b8ceb9d219 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-clusterrole.yaml @@ -0,0 +1 @@ +core/rbac/200-clusterrole.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-podspecable-bindings-clusterrole.yaml b/test/vendor/knative.dev/serving/config/200-podspecable-bindings-clusterrole.yaml new file mode 120000 index 0000000000..51cbb49ec9 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-podspecable-bindings-clusterrole.yaml @@ -0,0 +1 @@ +core/rbac/200-podspecable-bindings-clusterrole.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/200-serviceaccount.yaml b/test/vendor/knative.dev/serving/config/200-serviceaccount.yaml new file mode 120000 index 0000000000..151307ac84 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/200-serviceaccount.yaml @@ -0,0 +1 @@ +core/rbac/200-serviceaccount.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics-server.yaml b/test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics-server.yaml new file mode 120000 index 0000000000..cfcb33b68b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics-server.yaml @@ -0,0 +1 @@ +hpa-autoscaling/201-clusterrolebinding-metrics-server.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics.yaml b/test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics.yaml new file mode 120000 index 0000000000..2d10d8a06d --- /dev/null +++ b/test/vendor/knative.dev/serving/config/201-clusterrolebinding-metrics.yaml @@ -0,0 +1 @@ +hpa-autoscaling/201-clusterrolebinding-metrics.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/201-clusterrolebinding.yaml b/test/vendor/knative.dev/serving/config/201-clusterrolebinding.yaml new file mode 120000 index 0000000000..badb803bb3 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/201-clusterrolebinding.yaml @@ -0,0 +1 @@ +core/rbac/201-clusterrolebinding.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/201-rolebinding-metrics-server.yaml b/test/vendor/knative.dev/serving/config/201-rolebinding-metrics-server.yaml new file mode 120000 index 0000000000..50208a006f --- /dev/null +++ b/test/vendor/knative.dev/serving/config/201-rolebinding-metrics-server.yaml @@ -0,0 +1 @@ +hpa-autoscaling/201-rolebinding-metrics-server.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/202-gateway.yaml b/test/vendor/knative.dev/serving/config/202-gateway.yaml new file mode 120000 index 0000000000..4e530b44ee --- /dev/null +++ b/test/vendor/knative.dev/serving/config/202-gateway.yaml @@ -0,0 +1 @@ +istio-ingress/202-gateway.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/203-local-gateway.yaml b/test/vendor/knative.dev/serving/config/203-local-gateway.yaml new file mode 120000 index 0000000000..8296e3ecb1 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/203-local-gateway.yaml @@ -0,0 +1 @@ +istio-ingress/203-local-gateway.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-certificate.yaml b/test/vendor/knative.dev/serving/config/300-certificate.yaml new file mode 120000 index 0000000000..ac41200ebf --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-certificate.yaml @@ -0,0 +1 @@ +core/resources/certificate.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-configuration.yaml b/test/vendor/knative.dev/serving/config/300-configuration.yaml new file mode 120000 index 0000000000..f8c1477766 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-configuration.yaml @@ -0,0 +1 @@ +core/resources/configuration.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-imagecache.yaml b/test/vendor/knative.dev/serving/config/300-imagecache.yaml new file mode 120000 index 0000000000..70e20bf270 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-imagecache.yaml @@ -0,0 +1 @@ +../vendor/knative.dev/caching/config/image.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-ingress.yaml b/test/vendor/knative.dev/serving/config/300-ingress.yaml new file mode 120000 index 0000000000..dcabdf0cb3 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-ingress.yaml @@ -0,0 +1 @@ +core/resources/ingress.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-metric.yaml b/test/vendor/knative.dev/serving/config/300-metric.yaml new file mode 120000 index 0000000000..f4ac600fca --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-metric.yaml @@ -0,0 +1 @@ +core/resources/metric.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-pa.yaml b/test/vendor/knative.dev/serving/config/300-pa.yaml new file mode 120000 index 0000000000..3d214c7f81 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-pa.yaml @@ -0,0 +1 @@ +core/resources/podautoscaler.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-revision.yaml b/test/vendor/knative.dev/serving/config/300-revision.yaml new file mode 120000 index 0000000000..8256c9b228 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-revision.yaml @@ -0,0 +1 @@ +core/resources/revision.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-route.yaml b/test/vendor/knative.dev/serving/config/300-route.yaml new file mode 120000 index 0000000000..f6e9d50a01 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-route.yaml @@ -0,0 +1 @@ +core/resources/route.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-service.yaml b/test/vendor/knative.dev/serving/config/300-service.yaml new file mode 120000 index 0000000000..2148b81899 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-service.yaml @@ -0,0 +1 @@ +core/resources/service.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/300-sks.yaml b/test/vendor/knative.dev/serving/config/300-sks.yaml new file mode 120000 index 0000000000..6fcdada686 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/300-sks.yaml @@ -0,0 +1 @@ +core/resources/serverlessservice.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/500-webhook-configmap-validation.yaml b/test/vendor/knative.dev/serving/config/500-webhook-configmap-validation.yaml new file mode 120000 index 0000000000..948218552b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/500-webhook-configmap-validation.yaml @@ -0,0 +1 @@ +./core/webhooks/configmap-validation.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/500-webhook-defaulting.yaml b/test/vendor/knative.dev/serving/config/500-webhook-defaulting.yaml new file mode 120000 index 0000000000..604b1f814b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/500-webhook-defaulting.yaml @@ -0,0 +1 @@ +./core/webhooks/defaulting.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/500-webhook-resource-validation.yaml b/test/vendor/knative.dev/serving/config/500-webhook-resource-validation.yaml new file mode 120000 index 0000000000..68f024380e --- /dev/null +++ b/test/vendor/knative.dev/serving/config/500-webhook-resource-validation.yaml @@ -0,0 +1 @@ +./core/webhooks/resource-validation.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/500-webhook-secret.yaml b/test/vendor/knative.dev/serving/config/500-webhook-secret.yaml new file mode 120000 index 0000000000..c8b148a1f3 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/500-webhook-secret.yaml @@ -0,0 +1 @@ +./core/webhooks/secret.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/999-cache.yaml b/test/vendor/knative.dev/serving/config/999-cache.yaml new file mode 120000 index 0000000000..c004bf36f1 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/999-cache.yaml @@ -0,0 +1 @@ +core/999-cache.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/OWNERS b/test/vendor/knative.dev/serving/config/OWNERS new file mode 100644 index 0000000000..a966984efb --- /dev/null +++ b/test/vendor/knative.dev/serving/config/OWNERS @@ -0,0 +1,9 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-wg-leads + + +reviewers: +- serving-wg-leads + diff --git a/test/vendor/knative.dev/serving/config/README.md b/test/vendor/knative.dev/serving/config/README.md new file mode 100644 index 0000000000..a6221bb686 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/README.md @@ -0,0 +1,31 @@ +# Welcome to the knative/serving config directory! + +The files in this directory are organized as follows: + +- `core/`: the elements that are required for knative/serving to function, +- `istio-ingress/`: the configuration needed to plug in the istio ingress + implementation, +- `hpa-autoscaling/`: the configuration needed to extend the core with HPA-class + autoscaling, +- `namespace-wildcards/`: the configuration needed to extend the core to + provision wildcard certificates per-namespace, +- `cert-manager/`: the configuration needed to plug in the `cert-manager` + certificate implementation, +- `monitoring/`: an installable bundle of tooling for assorted observability + functions, +- `*.yaml`: symlinks that form a particular "rendered view" of the + knative/serving configuration. + +## Core + +The Core is complex enough that it further breaks down as follows: + +- `rbac/`: The service accounts, [cluster] roles, and [cluster] role bindings + needed for the core controllers to function, or to plug knative/serving into + standard Kubernetes RBAC constructs. +- `configmaps/`: The configmaps that are used to configure the core components. +- `resources/`: The serving resource definitions. +- `webhooks/`: The serving {mutating, validating} admission webhook + configurations, and supporting resources. +- `deployments/`: The serving executable components and associated configuration + resources. diff --git a/test/vendor/knative.dev/serving/config/activator-hpa.yaml b/test/vendor/knative.dev/serving/config/activator-hpa.yaml new file mode 120000 index 0000000000..15a3555cad --- /dev/null +++ b/test/vendor/knative.dev/serving/config/activator-hpa.yaml @@ -0,0 +1 @@ +core/deployments/activator-hpa.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/activator.yaml b/test/vendor/knative.dev/serving/config/activator.yaml new file mode 120000 index 0000000000..aa5465250c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/activator.yaml @@ -0,0 +1 @@ +core/deployments/activator.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/autoscaler-hpa.yaml b/test/vendor/knative.dev/serving/config/autoscaler-hpa.yaml new file mode 120000 index 0000000000..9c7c8d6859 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/autoscaler-hpa.yaml @@ -0,0 +1 @@ +hpa-autoscaling/controller.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/autoscaler.yaml b/test/vendor/knative.dev/serving/config/autoscaler.yaml new file mode 120000 index 0000000000..3f67084652 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/autoscaler.yaml @@ -0,0 +1 @@ +core/deployments/autoscaler.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/cert-manager/200-clusterrole.yaml b/test/vendor/knative.dev/serving/config/cert-manager/200-clusterrole.yaml new file mode 100644 index 0000000000..ca4a39da60 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/cert-manager/200-clusterrole.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # These are the permissions needed by the `cert-manager` `Certificate` implementation. + name: knative-serving-certmanager + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" + networking.knative.dev/certificate-provider: cert-manager +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "clusterissuers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] diff --git a/test/vendor/knative.dev/serving/config/cert-manager/config.yaml b/test/vendor/knative.dev/serving/config/cert-manager/config.yaml new file mode 100644 index 0000000000..2ad76cb8c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/cert-manager/config.yaml @@ -0,0 +1,46 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-certmanager + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/certificate-provider: cert-manager +data: + + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this block and unindented to actually change the configuration. + + # issuerRef is a reference to the issuer for this certificate. + # IssuerRef should be either `ClusterIssuer` or `Issuer`. + # Please refer `IssuerRef` in https://github.com/jetstack/cert-manager/blob/master/pkg/apis/certmanager/v1alpha1/types_certificate.go + # for more details about IssuerRef configuration. + issuerRef: | + kind: ClusterIssuer + name: letsencrypt-issuer diff --git a/test/vendor/knative.dev/serving/config/cert-manager/controller.yaml b/test/vendor/knative.dev/serving/config/cert-manager/controller.yaml new file mode 100644 index 0000000000..898f4f31db --- /dev/null +++ b/test/vendor/knative.dev/serving/config/cert-manager/controller.yaml @@ -0,0 +1,93 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: networking-certmanager + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/certificate-provider: cert-manager +spec: + selector: + matchLabels: + app: networking-certmanager + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: networking-certmanager + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: networking-certmanager + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/networking/certmanager + + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: networking-certmanager + serving.knative.dev/release: devel + networking.knative.dev/certificate-provider: cert-manager + name: networking-certmanager + namespace: knative-serving +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: networking-certmanager diff --git a/test/vendor/knative.dev/serving/config/config-autoscaler.yaml b/test/vendor/knative.dev/serving/config/config-autoscaler.yaml new file mode 120000 index 0000000000..a3ad66dd0e --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-autoscaler.yaml @@ -0,0 +1 @@ +core/configmaps/autoscaler.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-certmanager.yaml b/test/vendor/knative.dev/serving/config/config-certmanager.yaml new file mode 120000 index 0000000000..42575fa7c3 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-certmanager.yaml @@ -0,0 +1 @@ +cert-manager/config.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-defaults.yaml b/test/vendor/knative.dev/serving/config/config-defaults.yaml new file mode 120000 index 0000000000..cdce59640f --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-defaults.yaml @@ -0,0 +1 @@ +core/configmaps/defaults.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-deployment.yaml b/test/vendor/knative.dev/serving/config/config-deployment.yaml new file mode 120000 index 0000000000..ac8682ddec --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-deployment.yaml @@ -0,0 +1 @@ +core/configmaps/deployment.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-domain.yaml b/test/vendor/knative.dev/serving/config/config-domain.yaml new file mode 120000 index 0000000000..1702a3b138 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-domain.yaml @@ -0,0 +1 @@ +core/configmaps/domain.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-gc.yaml b/test/vendor/knative.dev/serving/config/config-gc.yaml new file mode 120000 index 0000000000..bb86ac4f73 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-gc.yaml @@ -0,0 +1 @@ +core/configmaps/gc.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-istio.yaml b/test/vendor/knative.dev/serving/config/config-istio.yaml new file mode 120000 index 0000000000..0598b6e30f --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-istio.yaml @@ -0,0 +1 @@ +istio-ingress/config.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-logging.yaml b/test/vendor/knative.dev/serving/config/config-logging.yaml new file mode 120000 index 0000000000..fdd487354e --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-logging.yaml @@ -0,0 +1 @@ +core/configmaps/logging.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-network.yaml b/test/vendor/knative.dev/serving/config/config-network.yaml new file mode 120000 index 0000000000..e4e48d42d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-network.yaml @@ -0,0 +1 @@ +core/configmaps/network.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-observability.yaml b/test/vendor/knative.dev/serving/config/config-observability.yaml new file mode 120000 index 0000000000..972be50a8c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-observability.yaml @@ -0,0 +1 @@ +core/configmaps/observability.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/config-tracing.yaml b/test/vendor/knative.dev/serving/config/config-tracing.yaml new file mode 120000 index 0000000000..8a578ada95 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/config-tracing.yaml @@ -0,0 +1 @@ +core/configmaps/tracing.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/controller.yaml b/test/vendor/knative.dev/serving/config/controller.yaml new file mode 120000 index 0000000000..a3175117ed --- /dev/null +++ b/test/vendor/knative.dev/serving/config/controller.yaml @@ -0,0 +1 @@ +core/deployments/controller.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/core/100-namespace.yaml b/test/vendor/knative.dev/serving/config/core/100-namespace.yaml new file mode 100644 index 0000000000..8a30b8ba22 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/100-namespace.yaml @@ -0,0 +1,22 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: knative-serving + labels: + # TODO(mattmoor): We should not require any istio annotations. + istio-injection: enabled + serving.knative.dev/release: devel diff --git a/test/vendor/knative.dev/serving/config/core/999-cache.yaml b/test/vendor/knative.dev/serving/config/core/999-cache.yaml new file mode 100644 index 0000000000..71c5f06d40 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/999-cache.yaml @@ -0,0 +1,25 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: caching.internal.knative.dev/v1alpha1 +kind: Image +metadata: + name: queue-proxy + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/queue diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/autoscaler.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/autoscaler.yaml new file mode 100644 index 0000000000..a7e7189acc --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/autoscaler.yaml @@ -0,0 +1,137 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # The Revision ContainerConcurrency field specifies the maximum number + # of requests the Container can handle at once. Container concurrency + # target percentage is how much of that maximum to use in a stable + # state. E.g. if a Revision specifies ContainerConcurrency of 10, then + # the Autoscaler will try to maintain 7 concurrent connections per pod + # on average. + # Note: this limit will be applied to container concurrency set at every + # level (ConfigMap, Revision Spec or Annotation). + # For legacy and backwards compatibility reasons, this value also accepts + # fractional values in (0, 1] interval (i.e. 0.7 ⇒ 70%). + # Thus minimal percentage value must be greater than 1.0, or it will be + # treated as a fraction. + container-concurrency-target-percentage: "70" + + # The container concurrency target default is what the Autoscaler will + # try to maintain when concurrency is used as the scaling metric for a + # Revision and the Revision specifies unlimited concurrency. + # Even when specifying unlimited concurrency, the autoscaler will + # horizontally scale the application based on this target concurrency. + # NOTE: Only one metric can be used for autoscaling a Revision. + container-concurrency-target-default: "100" + + # The requests per second (RPS) target default is what the Autoscaler will + # try to maintain when RPS is used as the scaling metric for a Revision and + # the Revision specifies unlimited RPS. Even when specifying unlimited RPS, + # the autoscaler will horizontally scale the application based on this + # target RPS. + # Must be greater than 1.0. + # NOTE: Only one metric can be used for autoscaling a Revision. + requests-per-second-target-default: "200" + + # The target burst capacity specifies the size of burst in concurrent + # requests that the system operator expects the system will receive. + # Autoscaler will try to protect the system from queueing by introducing + # Activator in the request path if the current spare capacity of the + # service is less than this setting. + # If this setting is 0, then Activator will be in the request path only + # when the revision is scaled to 0. + # If this setting is > 0 and container-concurrency-target-percentage is + # 100% or 1.0, then activator will always be in the request path. + # -1 denotes unlimited target-burst-capacity and activator will always + # be in the request path. + # Other negative values are invalid. + target-burst-capacity: "200" + + # When operating in a stable mode, the autoscaler operates on the + # average concurrency over the stable window. + # Stable window must be in whole seconds. + stable-window: "60s" + + # When observed average concurrency during the panic window reaches + # panic-threshold-percentage the target concurrency, the autoscaler + # enters panic mode. When operating in panic mode, the autoscaler + # scales on the average concurrency over the panic window which is + # panic-window-percentage of the stable-window. + # When computing the panic window it will be rounded to the closest + # whole second. + panic-window-percentage: "10.0" + + # The percentage of the container concurrency target at which to + # enter panic mode when reached within the panic window. + panic-threshold-percentage: "200.0" + + # Max scale up rate limits the rate at which the autoscaler will + # increase pod count. It is the maximum ratio of desired pods versus + # observed pods. + # Cannot less or equal to 1. + # I.e with value of 2.0 the number of pods can at most go N to 2N + # over single Autoscaler period (see tick-interval), but at least N to + # N+1, if Autoscaler needs to scale up. + max-scale-up-rate: "1000.0" + + # Max scale down rate limits the rate at which the autoscaler will + # decrease pod count. It is the maximum ratio of observed pods versus + # desired pods. + # Cannot less or equal to 1. + # I.e. with value of 2.0 the number of pods can at most go N to N/2 + # over single Autoscaler evaluation period (see tick-interval), but at + # least N to N-1, if Autoscaler needs to scale down. + # Not yet used // TODO(vagababov) remove once other parts are ready. + max-scale-down-rate: "2.0" + + # Scale to zero feature flag + enable-scale-to-zero: "true" + + # Tick interval is the time between autoscaling calculations. + tick-interval: "2s" + + # Dynamic parameters (take effect when config map is updated): + + # Scale to zero grace period is the time an inactive revision is left + # running before it is scaled to zero (min: 30s). + scale-to-zero-grace-period: "30s" + + # Enable graceful scaledown feature flag. + # Once enabled, it allows the autoscaler to prioritize pods processing + # fewer (or zero) requests for removal when scaling down. + enable-graceful-scaledown: "false" diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/defaults.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/defaults.yaml new file mode 100644 index 0000000000..6daa9bc456 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/defaults.yaml @@ -0,0 +1,83 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-defaults + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # revision-timeout-seconds contains the default number of + # seconds to use for the revision's per-request timeout, if + # none is specified. + revision-timeout-seconds: "300" # 5 minutes + + # max-revision-timeout-seconds contains the maximum number of + # seconds that can be used for revision-timeout-seconds. + # This value must be greater than or equal to revision-timeout-seconds. + # If omitted, the system default is used (600 seconds). + max-revision-timeout-seconds: "600" # 10 minutes + + # revision-cpu-request contains the cpu allocation to assign + # to revisions by default. If omitted, no value is specified + # and the system default is used. + revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU) + + # revision-memory-request contains the memory allocation to assign + # to revisions by default. If omitted, no value is specified + # and the system default is used. + revision-memory-request: "100M" # 100 megabytes of memory + + # revision-cpu-limit contains the cpu allocation to limit + # revisions to by default. If omitted, no value is specified + # and the system default is used. + revision-cpu-limit: "1000m" # 1 CPU (aka 1000 milli-CPU) + + # revision-memory-limit contains the memory allocation to limit + # revisions to by default. If omitted, no value is specified + # and the system default is used. + revision-memory-limit: "200M" # 200 megabytes of memory + + # container-name-template contains a template for the default + # container name, if none is specified. This field supports + # Go templating and is supplied with the ObjectMeta of the + # enclosing Service or Configuration, so values such as + # {{.Name}} are also valid. + container-name-template: "user-container" + + # container-concurrency specifies the maximum number + # of requests the Container can handle at once, and requests + # above this threshold are queued. Setting a value of zero + # disables this throttling and lets through as many requests as + # the pod receives. + container-concurrency: "0" diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/deployment.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/deployment.yaml new file mode 100644 index 0000000000..c1ffb14e61 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/deployment.yaml @@ -0,0 +1,45 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-deployment + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + # This is the Go import path for the binary that is containerized + # and substituted here. + queueSidecarImage: knative.dev/serving/cmd/queue + + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # List of repositories for which tag to digest resolving should be skipped + registriesSkippingTagResolving: "ko.local,dev.local" diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/domain.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/domain.yaml new file mode 100644 index 0000000000..5c4ce51d70 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/domain.yaml @@ -0,0 +1,60 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-domain + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Default value for domain. + # Although it will match all routes, it is the least-specific rule so it + # will only be used if no other domain matches. + example.com: | + + # These are example settings of domain. + # example.org will be used for routes having app=nonprofit. + example.org: | + selector: + app: nonprofit + + # Routes having domain suffix of 'svc.cluster.local' will not be exposed + # through Ingress. You can define your own label selector to assign that + # domain suffix to your Route here, or you can set the label + # "serving.knative.dev/visibility=cluster-local" + # to achieve the same effect. This shows how to make routes having + # the label app=secret only exposed to the local cluster. + svc.cluster.local: | + selector: + app: secret + diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/gc.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/gc.yaml new file mode 100644 index 0000000000..f132d9847c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/gc.yaml @@ -0,0 +1,55 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-gc + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Delay after revision creation before considering it for GC + stale-revision-create-delay: "48h" + + # Duration since a route has pointed at the revision before it + # should be GC'd. + # This minus lastpinned-debounce must be longer than the controller + # resync period (10 hours). + stale-revision-timeout: "15h" + + # Minimum number of generations of revisions to keep before considering + # them for GC + stale-revision-minimum-generations: "20" + + # To avoid constant updates, we allow an existing annotation to be stale by this + # amount before we update the timestamp. + stale-revision-lastpinned-debounce: "5h" diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/logging.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/logging.yaml new file mode 100644 index 0000000000..9e7fabcb76 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/logging.yaml @@ -0,0 +1,71 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Common configuration for all Knative codebase + zap-logger-config: | + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + + # Log level overrides + # For all components except the autoscaler and queue proxy, + # changes are be picked up immediately. + # For autoscaler and queue proxy, changes require recreation of the pods. + loglevel.controller: "info" + loglevel.autoscaler: "info" + loglevel.queueproxy: "info" + loglevel.webhook: "info" + loglevel.activator: "info" diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/network.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/network.yaml new file mode 100644 index 0000000000..1bf59ca013 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/network.yaml @@ -0,0 +1,137 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-network + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # istio.sidecar.includeOutboundIPRanges specifies the IP ranges that Istio sidecar + # will intercept. + # + # Replace this with the IP ranges of your cluster (see below for some examples). + # Separate multiple entries with a comma. + # Example: "10.4.0.0/14,10.7.240.0/20" + # + # If set to "*" Istio will intercept all traffic within + # the cluster as well as traffic that is going outside the cluster. + # Traffic going outside the cluster will be blocked unless + # necessary egress rules are created. + # + # If omitted or set to "", value of global.proxy.includeIPRanges + # provided at Istio deployment time is used. In default Knative serving + # deployment, global.proxy.includeIPRanges value is set to "*". + # + # If an invalid value is passed, "" is used instead. + # + # If valid set of IP address ranges are put into this value, + # Istio will no longer intercept traffic going to IP addresses + # outside the provided ranges and there is no need to specify + # egress rules. + # + # To determine the IP ranges of your cluster: + # IBM Cloud Private: cat cluster/config.yaml | grep service_cluster_ip_range + # IBM Cloud Kubernetes Service: "172.30.0.0/16,172.20.0.0/16,10.10.10.0/24" + # Google Container Engine (GKE): gcloud container clusters describe $CLUSTER_NAME --zone=$CLUSTER_ZONE | grep -e clusterIpv4Cidr -e servicesIpv4Cidr + # Azure Kubernetes Service (AKS): "10.0.0.0/16" + # Azure Container Service (ACS; deprecated): "10.244.0.0/16,10.240.0.0/16" + # Azure Container Service Engine (ACS-Engine; OSS): Configurable, but defaults to "10.0.0.0/16" + # Minikube: "10.0.0.1/24" + # + # For more information, visit + # https://istio.io/docs/tasks/traffic-management/egress/ + # + istio.sidecar.includeOutboundIPRanges: "*" + + # ingress.class specifies the default ingress class + # to use when not dictated by Route annotation. + # + # If not specified, will use the Istio ingress. + # + # Note that changing the Ingress class of an existing Route + # will result in undefined behavior. Therefore it is best to only + # update this value during the setup of Knative, to avoid getting + # undefined behavior. + ingress.class: "istio.ingress.networking.knative.dev" + + # certificate.class specifies the default Certificate class + # to use when not dictated by Route annotation. + # + # If not specified, will use the Cert-Manager Certificate. + # + # Note that changing the Certificate class of an existing Route + # will result in undefined behavior. Therefore it is best to only + # update this value during the setup of Knative, to avoid getting + # undefined behavior. + certificate.class: "cert-manager.certificate.networking.internal.knative.dev" + + # domainTemplate specifies the golang text template string to use + # when constructing the Knative service's DNS name. The default + # value is "{{.Name}}.{{.Namespace}}.{{.Domain}}". And those three + # values (Name, Namespace, Domain) are the only variables defined. + # + # Changing this value might be necessary when the extra levels in + # the domain name generated is problematic for wildcard certificates + # that only support a single level of domain name added to the + # certificate's domain. In those cases you might consider using a value + # of "{{.Name}}-{{.Namespace}}.{{.Domain}}", or removing the Namespace + # entirely from the template. When choosing a new value be thoughtful + # of the potential for conflicts - for example, when users choose to use + # characters such as `-` in their service, or namespace, names. + # {{.Annotations}} can be used for any customization in the go template if needed. + # We strongly recommend keeping namespace part of the template to avoid domain name clashes + # Example '{{.Name}}-{{.Namespace}}.{{ index .Annotations "sub"}}.{{.Domain}}' + # and you have an annotation {"sub":"foo"}, then the generated template would be {Name}-{Namespace}.foo.{Domain} + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}" + + # tagTemplate specifies the golang text template string to use + # when constructing the DNS name for "tags" within the traffic blocks + # of Routes and Configuration. This is used in conjunction with the + # domainTemplate above to determine the full URL for the tag. + tagTemplate: "{{.Tag}}-{{.Name}}" + + # Controls whether TLS certificates are automatically provisioned and + # installed in the Knative ingress to terminate external TLS connection. + # 1. Enabled: enabling auto-TLS feature. + # 2. Disabled: disabling auto-TLS feature. + autoTLS: "Disabled" + + # Controls the behavior of the HTTP endpoint for the Knative ingress. + # It requires autoTLS to be enabled. + # 1. Enabled: The Knative ingress will be able to serve HTTP connection. + # 2. Disabled: The Knative ingress will reject HTTP traffic. + # 3. Redirected: The Knative ingress will send a 302 redirect for all + # http connections, asking the clients to use HTTPS + httpProtocol: "Enabled" + diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/observability.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/observability.yaml new file mode 100644 index 0000000000..4d736ed123 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/observability.yaml @@ -0,0 +1,112 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # logging.enable-var-log-collection defaults to false. + # The fluentd daemon set will be set up to collect /var/log if + # this flag is true. + logging.enable-var-log-collection: "false" + + # logging.revision-url-template provides a template to use for producing the + # logging URL that is injected into the status of each Revision. + # This value is what you might use the the Knative monitoring bundle, and provides + # access to Kibana after setting up kubectl proxy. + logging.revision-url-template: | + http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) + + # If non-empty, this enables queue proxy writing user request logs to stdout, excluding probe + # requests. + # The value determines the shape of the request logs and it must be a valid go text/template. + # It is important to keep this as a single line. Multiple lines are parsed as separate entities + # by most collection agents and will split the request logs into multiple records. + # + # The following fields and functions are available to the template: + # + # Request: An http.Request (see https://golang.org/pkg/net/http/#Request) + # representing an HTTP request received by the server. + # + # Response: + # struct { + # Code int // HTTP status code (see https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) + # Size int // An int representing the size of the response. + # Latency float64 // A float64 representing the latency of the response in seconds. + # } + # + # Revision: + # struct { + # Name string // Knative revision name + # Namespace string // Knative revision namespace + # Service string // Knative service name + # Configuration string // Knative configuration name + # PodName string // Name of the pod hosting the revision + # PodIP string // IP of the pod hosting the revision + # } + # + logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' + + # If true, this enables queue proxy writing request logs for probe requests to stdout. + # It uses the same template for user requests, i.e. logging.request-log-template. + logging.enable-probe-request-log: "false" + + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using stackdriver will incur additional charges + metrics.backend-destination: prometheus + + # metrics.request-metrics-backend-destination specifies the request metrics + # destination. It enables queue proxy to send request metrics. + # Currently supported values: prometheus (the default), stackdriver. + metrics.request-metrics-backend-destination: prometheus + + # metrics.stackdriver-project-id field specifies the stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used if this field is not provided. + metrics.stackdriver-project-id: "" + + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to + # Stackdriver using "global" resource type and custom metric type if the + # metrics are not supported by "knative_revision" resource type. Setting this + # flag to "true" could cause extra Stackdriver charge. + # If metrics.backend-destination is not Stackdriver, this is ignored. + metrics.allow-stackdriver-custom-metrics: "false" + + # profiling.enable indicates whether it is allowed to retrieve runtime profiling data from + # the pods via an HTTP server in the format expected by the pprof visualization tool. When + # enabled, the Knative Serving pods expose the profiling data on an alternate HTTP port 8008. + # The HTTP context root for profiling is then /debug/pprof/. + profiling.enable: "false" diff --git a/test/vendor/knative.dev/serving/config/core/configmaps/tracing.yaml b/test/vendor/knative.dev/serving/config/core/configmaps/tracing.yaml new file mode 100644 index 0000000000..6677cb59fd --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/configmaps/tracing.yaml @@ -0,0 +1,57 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-tracing + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # + # This may be "zipkin" or "stackdriver", the default is "none" + backend: "none" + + # URL to zipkin collector where traces are sent. + # This must be specified when backend is "zipkin" + zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + + # The GCP project into which stackdriver metrics will be written + # when backend is "stackdriver". If unspecified, the project-id + # is read from GCP metadata when running on GCP. + stackdriver-project-id: "my-project" + + # Enable zipkin debug mode. This allows all spans to be sent to the server + # bypassing sampling. + debug: "false" + + # Percentage (0-1) of requests to trace + sample-rate: "0.1" diff --git a/test/vendor/knative.dev/serving/config/core/deployments/activator-hpa.yaml b/test/vendor/knative.dev/serving/config/core/deployments/activator-hpa.yaml new file mode 100644 index 0000000000..3930befc4a --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/deployments/activator-hpa.yaml @@ -0,0 +1,34 @@ +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: activator + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + minReplicas: 1 + maxReplicas: 20 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator + metrics: + - type: Resource + resource: + name: cpu + # Percentage of the requested CPU + targetAverageUtilization: 100 diff --git a/test/vendor/knative.dev/serving/config/core/deployments/activator.yaml b/test/vendor/knative.dev/serving/config/core/deployments/activator.yaml new file mode 100644 index 0000000000..8be66c1f99 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/deployments/activator.yaml @@ -0,0 +1,133 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: activator + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: activator + role: activator + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: activator + role: activator + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: activator + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/activator + + # The numbers are based on performance test results from + # https://github.com/knative/serving/issues/1625#issuecomment-511930023 + resources: + requests: + cpu: 300m + memory: 60Mi + limits: + cpu: 1000m + memory: 600Mi + + env: + # Run Activator with GC collection when newly generated memory is 500%. + - name: GOGC + value: "500" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: http1 + containerPort: 8012 + - name: h2c + containerPort: 8013 + + readinessProbe: &probe + httpGet: + port: 8012 + httpHeaders: + - name: k-kubelet-probe + value: "activator" + livenessProbe: *probe + + # The activator (often) sits on the dataplane, and may proxy long (e.g. + # streaming, websockets) requests. We give a long grace period for the + # activator to "lame duck" and drain outstanding requests before we + # forcibly terminate the pod (and outstanding connections). This value + # should be at least as large as the upper bound on the Revision's + # timeoutSeconds property to avoid servicing events disrupting + # connections. + terminationGracePeriodSeconds: 300 + +--- +apiVersion: v1 +kind: Service +metadata: + name: activator-service + namespace: knative-serving + labels: + app: activator + serving.knative.dev/release: devel +spec: + selector: + app: activator + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: http + port: 80 + targetPort: 8012 + - name: http2 + port: 81 + targetPort: 8013 + type: ClusterIP diff --git a/test/vendor/knative.dev/serving/config/core/deployments/autoscaler.yaml b/test/vendor/knative.dev/serving/config/core/deployments/autoscaler.yaml new file mode 100644 index 0000000000..6b22977309 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/deployments/autoscaler.yaml @@ -0,0 +1,113 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + replicas: 1 + selector: + matchLabels: + app: autoscaler + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: autoscaler + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: autoscaler + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/autoscaler + + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: websocket + containerPort: 8080 + - name: custom-metrics + containerPort: 8443 + + readinessProbe: &probe + httpGet: + port: 8080 + httpHeaders: + - name: k-kubelet-probe + value: "autoscaler" + livenessProbe: *probe + + args: + - "--secure-port=8443" + - "--cert-dir=/tmp" + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler + serving.knative.dev/release: devel + name: autoscaler + namespace: knative-serving +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: http + port: 8080 + targetPort: 8080 + - name: https-custom-metrics + port: 443 + targetPort: 8443 + selector: + app: autoscaler diff --git a/test/vendor/knative.dev/serving/config/core/deployments/controller.yaml b/test/vendor/knative.dev/serving/config/core/deployments/controller.yaml new file mode 100644 index 0000000000..0736a3434e --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/deployments/controller.yaml @@ -0,0 +1,91 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: controller + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: controller + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: controller + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/controller + + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1000m + memory: 1000Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: controller + serving.knative.dev/release: devel + name: controller + namespace: knative-serving +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: controller diff --git a/test/vendor/knative.dev/serving/config/core/deployments/webhook.yaml b/test/vendor/knative.dev/serving/config/core/deployments/webhook.yaml new file mode 100644 index 0000000000..ec6df6231b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/deployments/webhook.yaml @@ -0,0 +1,96 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: webhook + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: webhook + role: webhook + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: webhook + role: webhook + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: webhook + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/webhook + + resources: + requests: + cpu: 20m + memory: 20Mi + limits: + cpu: 200m + memory: 200Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: webhook + serving.knative.dev/release: devel + name: webhook + namespace: knative-serving +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + role: webhook diff --git a/test/vendor/knative.dev/serving/config/core/rbac/200-addressable-resolvers-clusterrole.yaml b/test/vendor/knative.dev/serving/config/core/rbac/200-addressable-resolvers-clusterrole.yaml new file mode 100644 index 0000000000..faa0fb9c25 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/rbac/200-addressable-resolvers-clusterrole.yaml @@ -0,0 +1,36 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-addressable-resolver + labels: + serving.knative.dev/release: devel + # Labeled to facilitate aggregated cluster roles that act on Addressables. + duck.knative.dev/addressable: "true" + +# Do not use this role directly. These rules will be added to the "addressable-resolver" role. +rules: +- apiGroups: + - serving.knative.dev + resources: + - routes + - routes/status + - services + - services/status + verbs: + - get + - list + - watch diff --git a/test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole-namespaced.yaml b/test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole-namespaced.yaml new file mode 100644 index 0000000000..cd7711f9ac --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole-namespaced.yaml @@ -0,0 +1,49 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-admin + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-edit + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["create", "update", "patch", "delete"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-view + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["get", "list", "watch"] diff --git a/test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole.yaml b/test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole.yaml new file mode 100644 index 0000000000..379038f363 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/rbac/200-clusterrole.yaml @@ -0,0 +1,58 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-admin + labels: + serving.knative.dev/release: devel +aggregationRule: + clusterRoleSelectors: + - matchLabels: + serving.knative.dev/controller: "true" +rules: [] # Rules are automatically filled in by the controller manager. +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-core + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" +rules: + - apiGroups: [""] + resources: ["pods", "namespaces", "secrets", "configmaps", "endpoints", "services", "events", "serviceaccounts"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: [""] + resources: ["endpoints/restricted"] # Permission for RestrictedEndpointsAdmission + verbs: ["create"] + - apiGroups: ["apps"] + resources: ["deployments", "deployments/finalizers"] # finalizers are needed for the owner reference of the webhook + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["autoscaling"] + resources: ["horizontalpodautoscalers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["serving.knative.dev", "autoscaling.internal.knative.dev", "networking.internal.knative.dev"] + resources: ["*", "*/status", "*/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "deletecollection", "patch", "watch"] + - apiGroups: ["caching.internal.knative.dev"] + resources: ["images"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] diff --git a/test/vendor/knative.dev/serving/config/core/rbac/200-podspecable-bindings-clusterrole.yaml b/test/vendor/knative.dev/serving/config/core/rbac/200-podspecable-bindings-clusterrole.yaml new file mode 100644 index 0000000000..abe2e3781f --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/rbac/200-podspecable-bindings-clusterrole.yaml @@ -0,0 +1,34 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-podspecable-binding + labels: + serving.knative.dev/release: devel + # Labeled to facilitate aggregated cluster roles that act on PodSpecables. + duck.knative.dev/podspecable: "true" + +# Do not use this role directly. These rules will be added to the "podspecable-binder" role. +rules: +- apiGroups: + - serving.knative.dev + resources: + - configurations + - services + verbs: + - list + - watch + - patch diff --git a/test/vendor/knative.dev/serving/config/core/rbac/200-serviceaccount.yaml b/test/vendor/knative.dev/serving/config/core/rbac/200-serviceaccount.yaml new file mode 100644 index 0000000000..896fad1e4b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/rbac/200-serviceaccount.yaml @@ -0,0 +1,21 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller + namespace: knative-serving + labels: + serving.knative.dev/release: devel diff --git a/test/vendor/knative.dev/serving/config/core/rbac/201-clusterrolebinding.yaml b/test/vendor/knative.dev/serving/config/core/rbac/201-clusterrolebinding.yaml new file mode 100644 index 0000000000..764b0d0110 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/rbac/201-clusterrolebinding.yaml @@ -0,0 +1,28 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: knative-serving-controller-admin + labels: + serving.knative.dev/release: devel +subjects: + - kind: ServiceAccount + name: controller + namespace: knative-serving +roleRef: + kind: ClusterRole + name: knative-serving-admin + apiGroup: rbac.authorization.k8s.io diff --git a/test/vendor/knative.dev/serving/config/core/resources/certificate.yaml b/test/vendor/knative.dev/serving/config/core/resources/certificate.yaml new file mode 100644 index 0000000000..9080b89601 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/certificate.yaml @@ -0,0 +1,43 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + version: v1alpha1 + names: + kind: Certificate + plural: certificates + singular: certificate + categories: + - knative-internal + - networking + shortNames: + - kcert + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/configuration.yaml b/test/vendor/knative.dev/serving/config/core/resources/configuration.yaml new file mode 100644 index 0000000000..e5af2b90cd --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/configuration.yaml @@ -0,0 +1,61 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configurations.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/podspecable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Configuration + plural: configurations + singular: configuration + categories: + - all + - knative + - serving + shortNames: + - config + - cfg + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: LatestCreated + type: string + JSONPath: .status.latestCreatedRevisionName + - name: LatestReady + type: string + JSONPath: .status.latestReadyRevisionName + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/ingress.yaml b/test/vendor/knative.dev/serving/config/core/resources/ingress.yaml new file mode 100644 index 0000000000..08d6b64a15 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/ingress.yaml @@ -0,0 +1,46 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ingresses.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: Ingress + plural: ingresses + singular: ingress + categories: + - knative-internal + - networking + shortNames: + - kingress + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/metric.yaml b/test/vendor/knative.dev/serving/config/core/resources/metric.yaml new file mode 100644 index 0000000000..5f57a94a53 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/metric.yaml @@ -0,0 +1,41 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: metrics.autoscaling.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: autoscaling.internal.knative.dev + version: v1alpha1 + names: + kind: Metric + plural: metrics + singular: metric + categories: + - knative-internal + - autoscaling + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/podautoscaler.yaml b/test/vendor/knative.dev/serving/config/core/resources/podautoscaler.yaml new file mode 100644 index 0000000000..c8340849c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/podautoscaler.yaml @@ -0,0 +1,53 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: podautoscalers.autoscaling.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: autoscaling.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: PodAutoscaler + plural: podautoscalers + singular: podautoscaler + categories: + - knative-internal + - autoscaling + shortNames: + - kpa + - pa + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: DesiredScale + type: integer + JSONPath: ".status.desiredScale" + - name: ActualScale + type: integer + JSONPath: ".status.actualScale" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/revision.yaml b/test/vendor/knative.dev/serving/config/core/resources/revision.yaml new file mode 100644 index 0000000000..979d0b952f --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/revision.yaml @@ -0,0 +1,62 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: revisions.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Revision + plural: revisions + singular: revision + categories: + - all + - knative + - serving + shortNames: + - rev + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Config Name + type: string + JSONPath: ".metadata.labels['serving\\.knative\\.dev/configuration']" + - name: K8s Service Name + type: string + JSONPath: ".status.serviceName" + - name: Generation + type: string # int in string form :( + JSONPath: ".metadata.labels['serving\\.knative\\.dev/configurationGeneration']" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/route.yaml b/test/vendor/knative.dev/serving/config/core/resources/route.yaml new file mode 100644 index 0000000000..cac36ad652 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/route.yaml @@ -0,0 +1,57 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: routes.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/addressable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Route + plural: routes + singular: route + categories: + - all + - knative + - serving + shortNames: + - rt + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: URL + type: string + JSONPath: .status.url + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/serverlessservice.yaml b/test/vendor/knative.dev/serving/config/core/resources/serverlessservice.yaml new file mode 100644 index 0000000000..47d34b107c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/serverlessservice.yaml @@ -0,0 +1,55 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serverlessservices.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: ServerlessService + plural: serverlessservices + singular: serverlessservice + categories: + - knative-internal + - networking + shortNames: + - sks + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Mode + type: string + JSONPath: ".spec.mode" + - name: ServiceName + type: string + JSONPath: ".status.serviceName" + - name: PrivateServiceName + type: string + JSONPath: ".status.privateServiceName" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/resources/service.yaml b/test/vendor/knative.dev/serving/config/core/resources/service.yaml new file mode 100644 index 0000000000..e6b73e94a4 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/resources/service.yaml @@ -0,0 +1,65 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: services.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/addressable: "true" + duck.knative.dev/podspecable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Service + plural: services + singular: service + categories: + - all + - knative + - serving + shortNames: + - kservice + - ksvc + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: URL + type: string + JSONPath: .status.url + - name: LatestCreated + type: string + JSONPath: .status.latestCreatedRevisionName + - name: LatestReady + type: string + JSONPath: .status.latestReadyRevisionName + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" diff --git a/test/vendor/knative.dev/serving/config/core/webhooks/configmap-validation.yaml b/test/vendor/knative.dev/serving/config/core/webhooks/configmap-validation.yaml new file mode 100644 index 0000000000..d3b9847d67 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/webhooks/configmap-validation.yaml @@ -0,0 +1,34 @@ +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: config.webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists diff --git a/test/vendor/knative.dev/serving/config/core/webhooks/defaulting.yaml b/test/vendor/knative.dev/serving/config/core/webhooks/defaulting.yaml new file mode 100644 index 0000000000..f3886d3c74 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/webhooks/defaulting.yaml @@ -0,0 +1,30 @@ +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: webhook.serving.knative.dev diff --git a/test/vendor/knative.dev/serving/config/core/webhooks/resource-validation.yaml b/test/vendor/knative.dev/serving/config/core/webhooks/resource-validation.yaml new file mode 100644 index 0000000000..4942480d41 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/webhooks/resource-validation.yaml @@ -0,0 +1,30 @@ +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: validation.webhook.serving.knative.dev diff --git a/test/vendor/knative.dev/serving/config/core/webhooks/secret.yaml b/test/vendor/knative.dev/serving/config/core/webhooks/secret.yaml new file mode 100644 index 0000000000..107985a4eb --- /dev/null +++ b/test/vendor/knative.dev/serving/config/core/webhooks/secret.yaml @@ -0,0 +1,22 @@ +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: webhook-certs + namespace: knative-serving + labels: + serving.knative.dev/release: devel +# The data is populated at install time. diff --git a/test/vendor/knative.dev/serving/config/custom-metrics-apiservice.yaml b/test/vendor/knative.dev/serving/config/custom-metrics-apiservice.yaml new file mode 120000 index 0000000000..3fe4a2a46c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/custom-metrics-apiservice.yaml @@ -0,0 +1 @@ +hpa-autoscaling/custom-metrics-apiservice.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/hpa-autoscaling/200-clusterrole-metrics.yaml b/test/vendor/knative.dev/serving/config/hpa-autoscaling/200-clusterrole-metrics.yaml new file mode 100644 index 0000000000..03c2720c9b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/hpa-autoscaling/200-clusterrole-metrics.yaml @@ -0,0 +1,25 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: custom-metrics-server-resources + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +rules: + - apiGroups: ["custom.metrics.k8s.io"] + resources: ["*"] + verbs: ["*"] diff --git a/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics-server.yaml b/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics-server.yaml new file mode 100644 index 0000000000..0fa5ea7ea6 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics-server.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: custom-metrics:system:auth-delegator + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving diff --git a/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics.yaml b/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics.yaml new file mode 100644 index 0000000000..0c319c0aa5 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-clusterrolebinding-metrics.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hpa-controller-custom-metrics + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-metrics-server-resources +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system diff --git a/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-rolebinding-metrics-server.yaml b/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-rolebinding-metrics-server.yaml new file mode 100644 index 0000000000..d4bfe39658 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/hpa-autoscaling/201-rolebinding-metrics-server.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: custom-metrics-auth-reader + namespace: kube-system + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving diff --git a/test/vendor/knative.dev/serving/config/hpa-autoscaling/controller.yaml b/test/vendor/knative.dev/serving/config/hpa-autoscaling/controller.yaml new file mode 100644 index 0000000000..2c28aa749a --- /dev/null +++ b/test/vendor/knative.dev/serving/config/hpa-autoscaling/controller.yaml @@ -0,0 +1,93 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler-hpa + namespace: knative-serving + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/autoscaler-provider: hpa +spec: + selector: + matchLabels: + app: autoscaler-hpa + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: autoscaler-hpa + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: autoscaler-hpa + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/autoscaler-hpa + + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler-hpa + serving.knative.dev/release: devel + autoscaling.knative.dev/autoscaler-provider: hpa + name: autoscaler-hpa + namespace: knative-serving +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: autoscaler-hpa diff --git a/test/vendor/knative.dev/serving/config/hpa-autoscaling/custom-metrics-apiservice.yaml b/test/vendor/knative.dev/serving/config/hpa-autoscaling/custom-metrics-apiservice.yaml new file mode 100644 index 0000000000..266b96a7cf --- /dev/null +++ b/test/vendor/knative.dev/serving/config/hpa-autoscaling/custom-metrics-apiservice.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.custom.metrics.k8s.io + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +spec: + service: + name: autoscaler + namespace: knative-serving + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/test/vendor/knative.dev/serving/config/istio-ingress/200-clusterrole.yaml b/test/vendor/knative.dev/serving/config/istio-ingress/200-clusterrole.yaml new file mode 100644 index 0000000000..113c33b850 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/istio-ingress/200-clusterrole.yaml @@ -0,0 +1,27 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # These are the permissions needed by the Istio Ingress implementation. + name: knative-serving-istio + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" + networking.knative.dev/ingress-provider: istio +rules: + - apiGroups: ["networking.istio.io"] + resources: ["virtualservices", "gateways"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] diff --git a/test/vendor/knative.dev/serving/config/istio-ingress/202-gateway.yaml b/test/vendor/knative.dev/serving/config/istio-ingress/202-gateway.yaml new file mode 100644 index 0000000000..23d4d9d5d3 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/istio-ingress/202-gateway.yaml @@ -0,0 +1,33 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is the shared Gateway for all Knative routes to use. +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: knative-ingress-gateway + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" diff --git a/test/vendor/knative.dev/serving/config/istio-ingress/203-local-gateway.yaml b/test/vendor/knative.dev/serving/config/istio-ingress/203-local-gateway.yaml new file mode 100644 index 0000000000..5ed74e3a00 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/istio-ingress/203-local-gateway.yaml @@ -0,0 +1,35 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A cluster local gateway to allow pods outside of the mesh to access +# Services and Routes not exposing through an ingress. If the users +# do have a service mesh setup, this isn't required. +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster-local-gateway + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + istio: cluster-local-gateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" diff --git a/test/vendor/knative.dev/serving/config/istio-ingress/config.yaml b/test/vendor/knative.dev/serving/config/istio-ingress/config.yaml new file mode 100644 index 0000000000..caa720ddad --- /dev/null +++ b/test/vendor/knative.dev/serving/config/istio-ingress/config.yaml @@ -0,0 +1,69 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-istio + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +data: + # TODO(nghia): Extract the .svc.cluster.local suffix into its own config. + + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Default Knative Gateway after v0.3. It points to the Istio + # standard istio-ingressgateway, instead of a custom one that we + # used pre-0.3. The configuration format should be `gateway. + # {{gateway_namespace}}.{{gateway_name}}: "{{ingress_name}}. + # {{ingress_namespace}}.svc.cluster.local"`. The {{gateway_namespace}} + # is optional; when it is omitted, the system will search for + # the gateway in the serving system namespace `knative-serving` + gateway.knative-serving.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + + # A cluster local gateway to allow pods outside of the mesh to access + # Services and Routes not exposing through an ingress. If the users + # do have a service mesh setup, this isn't required and can be removed. + # + # An example use case is when users want to use Istio without any + # sidecar injection (like Knative's istio-ci-no-mesh.yaml). Since every pod + # is outside of the service mesh in that case, a cluster-local service + # will need to be exposed to a cluster-local gateway to be accessible. + # The configuration format should be `local-gateway.{{local_gateway_namespace}}. + # {{local_gateway_name}}: "{{cluster_local_gateway_name}}. + # {{cluster_local_gateway_namespace}}.svc.cluster.local"`. The + # {{local_gateway_namespace}} is optional; when it is omitted, the system + # will search for the local gateway in the serving system namespace + # `knative-serving` + local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + + # To use only Istio service mesh and no cluster-local-gateway, replace + # all local-gateway.* entries by the following entry. + local-gateway.mesh: "mesh" diff --git a/test/vendor/knative.dev/serving/config/istio-ingress/controller.yaml b/test/vendor/knative.dev/serving/config/istio-ingress/controller.yaml new file mode 100644 index 0000000000..75cb545b65 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/istio-ingress/controller.yaml @@ -0,0 +1,78 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: networking-istio + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + matchLabels: + app: networking-istio + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + # This must be outside of the mesh to probe the gateways. + # NOTE: this is allowed here and not elsewhere because + # this is the Istio controller, and so it may be Istio-aware. + sidecar.istio.io/inject: "false" + labels: + app: networking-istio + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: networking-istio + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/networking/istio + + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + +# Unlike other controllers, this doesn't need a Service defined for metrics and +# profiling because it opts out of the mesh (see annotation above). diff --git a/test/vendor/knative.dev/serving/config/monitoring/100-namespace.yaml b/test/vendor/knative.dev/serving/config/monitoring/100-namespace.yaml new file mode 100644 index 0000000000..015070b6f4 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/100-namespace.yaml @@ -0,0 +1,20 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: knative-monitoring + labels: + serving.knative.dev/release: devel diff --git a/test/vendor/knative.dev/serving/config/monitoring/OWNERS b/test/vendor/knative.dev/serving/config/monitoring/OWNERS new file mode 100644 index 0000000000..ab8e1f2983 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- monitoring-approvers + +reviewers: +- monitoring-reviewers + +labels: +- area/monitoring diff --git a/test/vendor/knative.dev/serving/config/monitoring/README.md b/test/vendor/knative.dev/serving/config/monitoring/README.md new file mode 100644 index 0000000000..79e674d80c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/README.md @@ -0,0 +1,26 @@ +# Monitoring Deployment + +This folder contains deployment files for monitoring and logging components. + +## Tracing + +Deployment files are available for a range of distributed tracing solutions. +However, only one solution can be deployed at a time. Refer to the following +links to find out more information on capabilities and benefits of each +solution. + +- [Zipkin](https://zipkin.io/) +- [Jaeger](https://www.jaegertracing.io/) + +## Notes for Contributors + +`kubectl -R -f` installs the files within a folder in alphabetical order. In +order to install the files with correct ordering within a folder, a three digit +prefix is added. + +- Files with a prefix require files with smaller prefixes to be installed before + they are installed. +- Files with the same prefix can be installed in any order within the set + sharing the same prefix. +- Files without any prefix can be installed in any order and they don't have any + dependencies. diff --git a/test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/100-fluentd-configmap.yaml b/test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/100-fluentd-configmap.yaml new file mode 100644 index 0000000000..06e5558304 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/100-fluentd-configmap.yaml @@ -0,0 +1,119 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ConfigMap +apiVersion: v1 +metadata: + name: fluentd-ds-config + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + 100.system.conf: |- + + root_dir /tmp/fluentd-buffers/ + + 200.containers.input.conf: |- + # Capture logs from container's stdout/stderr -> Docker -> .log in JSON format + + @id containers-stdout-stderr + @type tail + path /var/log/containers/*user-container-*.log,/var/log/containers/*build-step-*.log,/var/log/containers/controller-*controller-*.log,/var/log/containers/webhook-*webhook-*.log,/var/log/containers/*autoscaler-*autoscaler-*.log,/var/log/containers/*queue-proxy-*.log,/var/log/containers/activator-*activator-*.log + pos_file /var/log/containers-stdout-stderr.pos + time_format %Y-%m-%dT%H:%M:%S.%NZ + tag raw.kubernetes.* + format json + read_from_head true + + # Capture logs from Knative containers' /var/log + + @id containers-var-log + @type tail + # **/*/**/* allows path expansion to go through one symlink (the one created by the init container) + path /var/lib/kubelet/pods/*/volumes/kubernetes.io~empty-dir/knative-internal/**/*/**/* + path_key stream + pos_file /var/log/containers-var-log.pos + tag raw.kubernetes.* + message_key log + read_from_head true + + @type multi_format + + format json + time_key fluentd-time # fluentd-time is reserved for structured logs + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format none + message_key log + + + + # Combine multi line logs which form an exception stack trace into a single log entry + + @id raw.kubernetes + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + # Make stream path correct from the container's point of view + + @type record_transformer + enable_ruby true + + stream /var/log/${record["stream"].scan(/\/knative-internal\/[^\/]+\/(.*)/).last.last} + + + # Add Kubernetes metadata to logs from /var/log/containers + + @type kubernetes_metadata + + # Add Kubernetes metadata to logs from /var/lib/kubelet/pods/*/volumes/kubernetes.io~empty-dir/knative-internal/**/*/**/* + + @type kubernetes_metadata + tag_to_kubernetes_name_regexp (?[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})\.volumes.kubernetes\.io~empty-dir\.knative-internal\.(?[^_]+)_(?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?user-container)\..*?$ + + 300.forward.input.conf: |- + # Takes the messages sent over TCP, e.g. request logs from Istio + + @type forward + port 24224 + + 900.output.conf: |- + # Send to Elastic Search + + @id elasticsearch + @type elasticsearch + @log_level info + host elasticsearch-logging + port 9200 + logstash_format true + + @type file + path /var/log/fluentd-buffers/kubernetes.system.buffer + flush_mode interval + retry_type exponential_backoff + flush_thread_count 2 + flush_interval 5s + retry_forever + retry_max_interval 30 + chunk_limit_size 2M + queue_limit_length 8 + overflow_action block + + diff --git a/test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/200-fluentd.yaml b/test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/200-fluentd.yaml new file mode 100644 index 0000000000..2ef8e312d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/logging/elasticsearch/200-fluentd.yaml @@ -0,0 +1,165 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fluentd-ds + namespace: knative-monitoring + labels: + app: fluentd-ds + serving.knative.dev/release: devel +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd-ds + labels: + app: fluentd-ds + serving.knative.dev/release: devel +rules: +- apiGroups: + - "" + resources: + - "namespaces" + - "pods" + verbs: + - "get" + - "watch" + - "list" +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd-ds + labels: + app: fluentd-ds + serving.knative.dev/release: devel +subjects: +- kind: ServiceAccount + name: fluentd-ds + namespace: knative-monitoring + apiGroup: "" +roleRef: + kind: ClusterRole + name: fluentd-ds + apiGroup: "" +--- +apiVersion: v1 +kind: Service +metadata: + # Name of this service is referred at multiple places. + # Any changes to this name should ensure to fix it in + # all places where this name is referred. + name: fluentd-ds + namespace: knative-monitoring + labels: + app: fluentd-ds + serving.knative.dev/release: devel +spec: + selector: + app: fluentd-ds + ports: + - name: fluentd-tcp + port: 24224 + protocol: TCP + targetPort: 24224 + - name: fluentd-udp + port: 24224 + protocol: UDP + targetPort: 24224 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-ds + namespace: knative-monitoring + labels: + app: fluentd-ds + version: v2.0.4 + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: fluentd-ds + version: v2.0.4 + template: + metadata: + labels: + app: fluentd-ds + version: v2.0.4 + serving.knative.dev/release: devel + # This annotation ensures that fluentd does not get evicted if the node + # supports critical pod annotation based priority scheme. + # Note that this does not guarantee admission on the nodes (#40573). + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: fluentd-ds + containers: + - name: fluentd-ds + image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + env: + - name: FLUENTD_ARGS + value: --no-supervisor -q + resources: + limits: + memory: 500Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlogcontainers + mountPath: /var/log/containers + readOnly: true + - name: varlogpods + mountPath: /var/log/pods + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: varlibkubeletpods + mountPath: /var/lib/kubelet/pods + readOnly: true + - name: libsystemddir + mountPath: /host/lib + readOnly: true + - name: config-volume + mountPath: /etc/fluent/config.d + nodeSelector: + beta.kubernetes.io/fluentd-ds-ready: "true" + terminationGracePeriodSeconds: 30 + volumes: + - name: varlogcontainers + hostPath: + path: /var/log/containers + # It is needed because files under /var/log/containers link to /var/log/pods + - name: varlogpods + hostPath: + path: /var/log/pods + # It is needed because files under /var/log/pods link to /var/lib/docker/containers + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + # It is needed because user-container's /var/log is located in /var/lib/kubelet/pods/*/volumes/ + - name: varlibkubeletpods + hostPath: + path: /var/lib/kubelet/pods + # It is needed to copy systemd library to decompress journals + - name: libsystemddir + hostPath: + path: /usr/lib64 + - name: config-volume + configMap: + name: fluentd-ds-config diff --git a/test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/100-fluentd-configmap.yaml b/test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/100-fluentd-configmap.yaml new file mode 100644 index 0000000000..5eda924eb5 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/100-fluentd-configmap.yaml @@ -0,0 +1,116 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: ConfigMap +apiVersion: v1 +metadata: + name: fluentd-ds-config + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + 100.system.conf: |- + + root_dir /tmp/fluentd-buffers/ + + 200.containers.input.conf: |- + # Capture logs from container's stdout/stderr -> Docker -> .log in JSON format + + @id containers-stdout-stderr + @type tail + path /var/log/containers/*user-container-*.log,/var/log/containers/*build-step-*.log,/var/log/containers/controller-*controller-*.log,/var/log/containers/webhook-*webhook-*.log,/var/log/containers/*autoscaler-*autoscaler-*.log,/var/log/containers/*queue-proxy-*.log,/var/log/containers/activator-*activator-*.log + pos_file /var/log/containers-stdout-stderr.pos + time_format %Y-%m-%dT%H:%M:%S.%NZ + tag raw.kubernetes.* + format json + read_from_head true + + # Capture logs from Knative containers' /var/log + + @id containers-var-log + @type tail + path /var/lib/kubelet/pods/*/volumes/kubernetes.io~empty-dir/knative-internal/**/*/**/* + path_key stream + pos_file /var/log/containers-var-log.pos + tag raw.kubernetes.* + format none + message_key log + read_from_head true + + # Combine multi line logs which form an exception stack trace into a single log entry + + @id raw.kubernetes + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + # Make stream path correct from the container's point of view + + @type record_transformer + enable_ruby true + + stream /var/log/${record["stream"].scan(/\/knative-internal\/[^\/]+\/(.*)/).last.last} + + + # Add Kubernetes metadata to logs from /var/log/containers + + @type kubernetes_metadata + + # Add Kubernetes metadata to logs from /var/lib/kubelet/pods/*/volumes/kubernetes.io~empty-dir/knative-internal/**/*/**/* + + @type kubernetes_metadata + tag_to_kubernetes_name_regexp (?[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})\.volumes.kubernetes\.io~empty-dir\.knative-internal\.(?[^_]+)_(?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?user-container)\..*?$ + + 300.forward.input.conf: |- + # Takes the messages sent over TCP, e.g. request logs from Istio + + @type forward + port 24224 + + 900.output.conf: |- + # Send to Stackdriver + # google_cloud plugin moves `kubernetes` metadata to `labels`. + + @type google_cloud + + # Try to detect JSON formatted log entries. + detect_json true + # Allow log entries from multiple containers to be sent in the same request. + split_logs_by_tag false + # Set the buffer type to file to improve the reliability and reduce the memory consumption + buffer_type file + buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer + # Set queue_full action to block because we want to pause gracefully + # in case of the off-the-limits load instead of throwing an exception + buffer_queue_full_action block + # Set the chunk limit conservatively to avoid exceeding the recommended + # chunk size of 5MB per write request. + buffer_chunk_limit 1M + # Cap the combined memory usage of this buffer and the one below to + # 1MiB/chunk * (6 + 2) chunks = 8 MiB + buffer_queue_limit 6 + # Never wait more than 5 seconds before flushing logs in the non-error case. + flush_interval 5s + # Never wait longer than 30 seconds between retries. + max_retry_wait 30 + # Disable the limit on the number of retries (retry forever). + disable_retry_limit + # Use multiple threads for processing. + num_threads 2 + use_grpc true + diff --git a/test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/200-fluentd.yaml b/test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/200-fluentd.yaml new file mode 100644 index 0000000000..2ef8e312d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/logging/stackdriver/200-fluentd.yaml @@ -0,0 +1,165 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fluentd-ds + namespace: knative-monitoring + labels: + app: fluentd-ds + serving.knative.dev/release: devel +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd-ds + labels: + app: fluentd-ds + serving.knative.dev/release: devel +rules: +- apiGroups: + - "" + resources: + - "namespaces" + - "pods" + verbs: + - "get" + - "watch" + - "list" +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd-ds + labels: + app: fluentd-ds + serving.knative.dev/release: devel +subjects: +- kind: ServiceAccount + name: fluentd-ds + namespace: knative-monitoring + apiGroup: "" +roleRef: + kind: ClusterRole + name: fluentd-ds + apiGroup: "" +--- +apiVersion: v1 +kind: Service +metadata: + # Name of this service is referred at multiple places. + # Any changes to this name should ensure to fix it in + # all places where this name is referred. + name: fluentd-ds + namespace: knative-monitoring + labels: + app: fluentd-ds + serving.knative.dev/release: devel +spec: + selector: + app: fluentd-ds + ports: + - name: fluentd-tcp + port: 24224 + protocol: TCP + targetPort: 24224 + - name: fluentd-udp + port: 24224 + protocol: UDP + targetPort: 24224 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-ds + namespace: knative-monitoring + labels: + app: fluentd-ds + version: v2.0.4 + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: fluentd-ds + version: v2.0.4 + template: + metadata: + labels: + app: fluentd-ds + version: v2.0.4 + serving.knative.dev/release: devel + # This annotation ensures that fluentd does not get evicted if the node + # supports critical pod annotation based priority scheme. + # Note that this does not guarantee admission on the nodes (#40573). + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + serviceAccountName: fluentd-ds + containers: + - name: fluentd-ds + image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4 + env: + - name: FLUENTD_ARGS + value: --no-supervisor -q + resources: + limits: + memory: 500Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlogcontainers + mountPath: /var/log/containers + readOnly: true + - name: varlogpods + mountPath: /var/log/pods + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: varlibkubeletpods + mountPath: /var/lib/kubelet/pods + readOnly: true + - name: libsystemddir + mountPath: /host/lib + readOnly: true + - name: config-volume + mountPath: /etc/fluent/config.d + nodeSelector: + beta.kubernetes.io/fluentd-ds-ready: "true" + terminationGracePeriodSeconds: 30 + volumes: + - name: varlogcontainers + hostPath: + path: /var/log/containers + # It is needed because files under /var/log/containers link to /var/log/pods + - name: varlogpods + hostPath: + path: /var/log/pods + # It is needed because files under /var/log/pods link to /var/lib/docker/containers + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + # It is needed because user-container's /var/log is located in /var/lib/kubelet/pods/*/volumes/ + - name: varlibkubeletpods + hostPath: + path: /var/lib/kubelet/pods + # It is needed to copy systemd library to decompress journals + - name: libsystemddir + hostPath: + path: /usr/lib64 + - name: config-volume + configMap: + name: fluentd-ds-config diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-custom-config.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-custom-config.yaml new file mode 100644 index 0000000000..1446c1ceab --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-custom-config.yaml @@ -0,0 +1,27 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-custom-config + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + custom.ini: |+ + # You can customize Grafana via changing context of this field. + [auth.anonymous] + # enable anonymous access + enabled = true diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-efficiency.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-efficiency.yaml new file mode 100644 index 0000000000..d7791df983 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-efficiency.yaml @@ -0,0 +1,518 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-knative-efficiency + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + knative-control-plane-efficiency-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "description": "Knative Serving - Control Plane Efficiency", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": 2, + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"knative-serving\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "knative-serving", + "refId": "A" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"knative-build\"}[1m]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "knative-build", + "refId": "C" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"istio-system\"}[1m]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "istio-system", + "refId": "D" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"kube-system\"}[1m]))", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "kube-system", + "refId": "F" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"kube-public\"}[1m]))", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "kube-public", + "refId": "E" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"knative-monitoring\"}[1m]))", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "knative-monitoring", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Namespace CPU Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{namespace=\"knative-serving\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "knative-serving", + "refId": "A" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=\"knative-build\"})", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "knative-build", + "refId": "C" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=\"istio-system\"})", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "istio-system", + "refId": "D" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=\"kube-system\"})", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "kube-system", + "refId": "F" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=\"kube-public\"})", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "kube-public", + "refId": "E" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=\"knative-monitoring\"})", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "legendFormat": "knative-monitoring", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Namespace Memory Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": 2, + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace!~\"knative-serving|knative-monitoring|knative-build|istio-system|kube-system|kube-public|^$\"}[1m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Data plane", + "refId": "A" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=~\"knative-serving|knative-monitoring|knative-build|istio-system|kube-system|kube-public\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Control plane", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Control Plane vs Data Plane CPU Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 5, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{namespace!~\"knative-serving|knative-monitoring|knative-build|istio-system|kube-system|kube-public|^$\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Data plane", + "refId": "A" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=~\"knative-serving|knative-monitoring|knative-build|istio-system|kube-system|kube-public\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Control plane", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Control Plane vs Data Plane Memory Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "5s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Knative Serving - Control Plane Efficiency", + "uid": "1oI1URnik", + "version": 2 + } diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-reconciler.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-reconciler.yaml new file mode 100644 index 0000000000..a8a7547fa7 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-reconciler.yaml @@ -0,0 +1,624 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-knative-reconciler + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + knative-reconciler-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "description": "Knative - Reconciler", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 7, + "panels": [], + "title": "Aggregate", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum by (reconciler)(60 * rate(controller_reconcile_count[1m]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{reconciler}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Reconcile Count (per min)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "panels": [], + "title": "Per Reconciler", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(60 * rate(controller_reconcile_count{reconciler=\"$reconciler\"}[1m]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{reconciler}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$reconciler Reconcile Count (per min)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(controller_reconcile_latency_bucket{reconciler=\"$reconciler\"}[1m])) by (le))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "99th", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.90, sum(rate(controller_reconcile_latency_bucket{reconciler=\"$reconciler\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "90th", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(controller_reconcile_latency_bucket{reconciler=\"$reconciler\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "50th", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$reconciler Reconcile Latency Percentiles", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 9, + "panels": [], + "title": "Per Reconciler & Key", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(60 * rate(controller_reconcile_count{reconciler=\"$reconciler\", key=\"$key\"}[1m]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{reconciler}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$reconciler/$key Reconcile Count (per min)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(controller_reconcile_latency_bucket{reconciler=\"$reconciler\", key=\"$key\"}[1m])) by (le))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "99th", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.90, sum(rate(controller_reconcile_latency_bucket{reconciler=\"$reconciler\", key=\"$key\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "90th", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(controller_reconcile_latency_bucket{reconciler=\"$reconciler\", key=\"$key\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "50th", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "$reconciler/$key Reconcile Latency Percentiles", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "refresh": "5s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Reconciler", + "multi": false, + "name": "reconciler", + "options": [], + "query": "label_values(controller_reconcile_count, reconciler)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Key", + "multi": false, + "name": "key", + "options": [], + "query": "label_values(controller_reconcile_count{reconciler=\"$reconciler\"}, key)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Knative - Reconciler", + "uid": "j0oFdEYiz", + "version": 10 + } diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-scaling.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-scaling.yaml new file mode 100644 index 0000000000..5bff01345b --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative-scaling.yaml @@ -0,0 +1,1183 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: scaling-config + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + scaling-dashboard.json: |+ + { + "__inputs": [ + { + "name": "prometheus", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.0.3" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "5.0.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Knative Serving - Scaling Debugging", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1564079739384, + "links": [], + "panels": [ + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 14, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(autoscaler_actual_pods{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Actual Pods", + "refId": "A" + }, + { + "expr": "sum(autoscaler_requested_pods{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Requested Pods", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Revision Pod Counts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "short", + "label": "Concurrency", + "logBase": 1, + "max": "1", + "min": null, + "show": false + } + ] + } + ], + "title": "Revision Pod Counts", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 18, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$namespace\", pod=~\"$revision-deployment-.*\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Cores requested", + "refId": "A" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod_name=~\"$revision-deployment-.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Cores used", + "refId": "B" + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$namespace\", pod=~\"$revision-deployment-.*\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Core limit", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Revision CPU Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\", pod=~\"$revision-deployment-.*\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Memory requested", + "refId": "A" + }, + { + "expr": "sum(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=~\"$revision-deployment-.*\"})", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "Memory used", + "refId": "B" + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", pod=~\"$revision-deployment-.*\"})", + "format": "time_series", + "intervalFactor": 1, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pod Memory Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "title": "Resource Usages", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 2 + }, + "id": 16, + "panels": [], + "title": "Autoscaler Metrics", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(autoscaler_desired_pods{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"}) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Desired Pods", + "refId": "A" + }, + { + "expr": "sum(autoscaler_observed_pods{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Observed Pods", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Pod Counts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Panic Mode", + "color": "#ea6460", + "dashes": true, + "fill": 2, + "linewidth": 2, + "steppedLine": true, + "yaxis": 2 + }, + { + "alias": "Target Concurrency Per Pod", + "color": "#0a50a1", + "dashes": true, + "steppedLine": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(autoscaler_stable_request_concurrency{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Average Concurrency", + "refId": "A" + }, + { + "expr": "sum(autoscaler_panic_request_concurrency{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Average Panic Concurrency", + "refId": "B" + }, + { + "expr": "sum(autoscaler_target_concurrency_per_pod{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Target Concurrency", + "refId": "C" + }, + { + "expr": "sum(autoscaler_excess_burst_capacity{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Excess Burst Capacity", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Observed Concurrency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(autoscaler_stable_requests_per_second{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Average RPS", + "refId": "A" + }, + { + "expr": "sum(autoscaler_panic_requests_per_second{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Average Panic RPS", + "refId": "B" + }, + { + "expr": "sum(autoscaler_target_requests_per_second{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Target RPS", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Observed RPS", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "decimals": null, + "fill": 1, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 12, + "legend": { + "avg": false, + "current": false, + "hideZero": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Panic Mode", + "color": "#e24d42", + "linewidth": 2, + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(autoscaler_panic_mode{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"} )", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Panic Mode", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Panic Mode", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": "1.0", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 20, + "panels": [], + "title": "Activator Metrics", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(increase(activator_request_count{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (response_code))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ response_code }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Request Count in last minute by Response Code", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 42 + }, + "id": 28, + "legend": { + "avg": true, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(histogram_quantile(0.50, sum(rate(activator_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p50)", + "refId": "A" + }, + { + "expr": "label_replace(histogram_quantile(0.90, sum(rate(activator_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p90)", + "refId": "B" + }, + { + "expr": "label_replace(histogram_quantile(0.95, sum(rate(activator_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p95)", + "refId": "C" + }, + { + "expr": "label_replace(histogram_quantile(0.99, sum(rate(activator_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p99)", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Response Time in last minute", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 29, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "Panic Mode", + "color": "#ea6460", + "dashes": true, + "fill": 2, + "linewidth": 2, + "steppedLine": true, + "yaxis": 2 + }, + { + "alias": "Target Concurrency Per Pod", + "color": "#0a50a1", + "dashes": true, + "steppedLine": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "sum(activator_request_concurrency{namespace_name=\"$namespace\", configuration_name=\"$configuration\", revision_name=\"$revision\"})", + "format": "time_series", + "interval": "1s", + "intervalFactor": 1, + "legendFormat": "Request Concurrency", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Request Concurrency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(autoscaler_desired_pods, namespace_name)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Configuration", + "multi": false, + "name": "configuration", + "options": [], + "query": "label_values(autoscaler_desired_pods{namespace_name=\"$namespace\"}, configuration_name)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Revision", + "multi": false, + "name": "revision", + "options": [], + "query": "label_values(autoscaler_desired_pods{namespace_name=\"$namespace\", configuration_name=\"$configuration\"}, revision_name)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Knative Serving - Scaling Debugging", + "uid": "u_-9SIMiz", + "version": 2 + } diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative.yaml new file mode 100644 index 0000000000..148a54b6fa --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana-dash-knative.yaml @@ -0,0 +1,1196 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-knative + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + revision-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "description": "Knative Serving - Revision HTTP Requests", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 8, + "panels": [], + "title": "Overview (average over the selected time range)", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 2, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "repeat": null, + "repeatDirection": "v", + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(rate(revision_request_count{namespace_name=\"$namespace\", revision_name=~\"$revision\", configuration_name=~\"$configuration\"}[1m])), 0.001)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "Request Volume", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#d44a3a", + "rgba(237, 129, 40, 0.89)", + "#299c46" + ], + "datasource": "prometheus", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(revision_request_count{response_code_class!=\"5xx\", namespace_name=\"$namespace\", revision_name=~\"$revision\", configuration_name=~\"$configuration\"}[1m])) / sum(rate(revision_request_count{namespace_name=\"$namespace\", revision_name=~\"$revision\", configuration_name=~\"$configuration\"}[1m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "95, 99, 99.5", + "title": "Success Rate (non-5xx responses)", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#d44a3a", + "rgba(237, 129, 40, 0.89)", + "#299c46" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 5, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(revision_request_count{response_code_class=\"4xx\", namespace_name=\"$namespace\", revision_name=~\"$revision\", configuration_name=~\"$configuration\"}[1m])) ", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "4xx", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#d44a3a", + "rgba(237, 129, 40, 0.89)", + "#299c46" + ], + "datasource": "prometheus", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(revision_request_count{response_code_class=\"5xx\", namespace_name=\"$namespace\", revision_name=~\"$revision\", configuration_name=~\"$configuration\"}[1m])) ", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "", + "title": "5xx", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 11, + "panels": [], + "title": "Request Volume", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 17, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(round(sum(rate(revision_request_count{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name), 0.001), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{revision_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Request Volume by Revision", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(rate(revision_request_count{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (response_code_class), 0.001)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ response_code_class }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Request Volume by Response Code Class", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 15, + "panels": [], + "title": "Response Time", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 20, + "legend": { + "alignAsTable": false, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(histogram_quantile(0.50, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p50)", + "refId": "A" + }, + { + "expr": "label_replace(histogram_quantile(0.90, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p90)", + "refId": "B" + }, + { + "expr": "label_replace(histogram_quantile(0.95, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p95)", + "refId": "C" + }, + { + "expr": "label_replace(histogram_quantile(0.99, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\"}[1m])) by (revision_name, le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ revision_name }} (p99)", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Response Time by Revision", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 21, + "legend": { + "alignAsTable": false, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(histogram_quantile(0.50, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"2xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "2xx (p50)", + "refId": "C" + }, + { + "expr": "label_replace(histogram_quantile(0.50, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"3xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "3xx (p50)", + "refId": "D" + }, + { + "expr": "label_replace(histogram_quantile(0.50, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"4xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "4xx (p50)", + "refId": "A" + }, + { + "expr": "label_replace(histogram_quantile(0.50, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"5xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "5xx (p50)", + "refId": "B" + }, + { + "expr": "label_replace(histogram_quantile(0.95, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"2xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "2xx (p95)", + "refId": "E" + }, + { + "expr": "label_replace(histogram_quantile(0.95, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"3xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "3xx (p95)", + "refId": "F" + }, + { + "expr": "label_replace(histogram_quantile(0.95, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"4xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "4xx (p95)", + "refId": "G" + }, + { + "expr": "label_replace(histogram_quantile(0.95, sum(rate(revision_request_latencies_bucket{namespace_name=\"$namespace\", configuration_name=~\"$configuration\",revision_name=~\"$revision\",response_code_class=\"5xx\"}[1m])) by (le)), \"revision_name\", \"$2\", \"revision_name\", \"$configuration(-+)(.*)\")", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "5xx (p95)", + "refId": "H" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Response Time by Response Code Class", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "5s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(revision_request_count{namespace_name!=\"unknown\"}, namespace_name)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Configuration", + "multi": false, + "name": "configuration", + "options": [], + "query": "label_values(revision_request_count{namespace_name=\"$namespace\", configuration_name!=\"unknown\"}, configuration_name)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "$tag", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": true, + "label": "Revision", + "multi": true, + "name": "revision", + "options": [], + "query": "label_values(revision_request_count{namespace_name=\"$namespace\", configuration_name=~\"$configuration\", revision_name!=\"unknown\"}, revision_name)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Knative Serving - Revision HTTP Requests", + "uid": "im_gFbWik", + "version": 2 + } + resource-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "description": "Knative Serving - Revision CPU and Memory Usage", + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod_name=~\"$revision.*\", container_name != \"POD\"}[1m])) by (container_name)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{container_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total CPU Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "fill": 1, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{namespace=\"$namespace\", pod_name=~\"$revision.*\", container_name != \"POD\"}) by (container_name)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{container_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total Memory Usage", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "5s", + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(kube_pod_labels{label_serving_knative_dev_configuration=~\".+\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Configuration", + "multi": false, + "name": "configuration", + "options": [], + "query": "label_values(kube_pod_labels{label_serving_knative_dev_configuration=~\".+\", namespace=\"$namespace\"}, label_serving_knative_dev_configuration)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Revision", + "multi": false, + "name": "revision", + "options": [], + "query": "label_values(kube_pod_labels{label_serving_knative_dev_configuration=~\".+\", namespace=\"$namespace\", label_serving_knative_dev_configuration=\"$configuration\"}, label_serving_knative_dev_revision)", + "refresh": 2, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Knative Serving - Revision CPU and Memory Usage", + "uid": "bKOoE9Wmk", + "version": 4 + } diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana.yaml new file mode 100644 index 0000000000..70aad81ca5 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-grafana.yaml @@ -0,0 +1,260 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-datasources + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + prometheus.yaml: |+ + datasources: + - name: prometheus + type: prometheus + access: proxy + org_id: 1 + url: http://prometheus-system-np:8080 + version: 1 + editable: false +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboards + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +data: + dashboards.yaml: |+ + - name: 'knative' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/knative + - name: 'knative-efficiency' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/knative-efficiency + - name: 'knative-reconciler' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/knative-reconciler + - name: 'kubernetes-deployment' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-deployment + - name: 'kubernetes-capacity-planning' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-capacity-planning + - name: 'kubernetes-cluster-health' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-cluster-health + - name: 'kubernetes-cluster-status' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-cluster-status + - name: 'kubernetes-control-plane-status' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-control-plane-status + - name: 'kubernetes-resource-requests' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-resource-requests + - name: 'kubernetes-nodes' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-nodes + - name: 'kubernetes-pods' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-pods + - name: 'kubernetes-statefulset' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/kubernetes-statefulset + - name: 'knative-serving-scaling' + org_id: 1 + folder: '' + type: file + options: + path: /grafana-dashboard-definition/scaling +--- +apiVersion: v1 +kind: Service +metadata: + name: grafana + namespace: knative-monitoring + labels: + app: grafana + serving.knative.dev/release: devel +spec: + type: NodePort + ports: + - port: 30802 + protocol: TCP + targetPort: 3000 + selector: + app: grafana +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: grafana + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + serving.knative.dev/release: devel + spec: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + containers: + - name: grafana + image: grafana/grafana:6.3.3 + env: + - name: GF_PATHS_CONFIG + value: "/etc/grafana/config/custom.ini" + volumeMounts: + - name: grafana-custom-config + mountPath: /etc/grafana/config + - name: grafana-storage + mountPath: /var/lib/grafana + - name: grafana-datasources + mountPath: /etc/grafana/provisioning/datasources + - name: grafana-dashboards + mountPath: /etc/grafana/provisioning/dashboards + - name: grafana-dashboard-definition-knative + mountPath: /grafana-dashboard-definition/knative + - name: grafana-dashboard-definition-knative-efficiency + mountPath: /grafana-dashboard-definition/knative-efficiency + - name: grafana-dashboard-definition-knative-reconciler + mountPath: /grafana-dashboard-definition/knative-reconciler + - name: grafana-dashboard-definition-kubernetes-deployment + mountPath: /grafana-dashboard-definition/kubernetes-deployment + - name: grafana-dashboard-definition-kubernetes-capacity-planning + mountPath: /grafana-dashboard-definition/kubernetes-capacity-planning + - name: grafana-dashboard-definition-kubernetes-cluster-health + mountPath: /grafana-dashboard-definition/kubernetes-cluster-health + - name: grafana-dashboard-definition-kubernetes-cluster-status + mountPath: /grafana-dashboard-definition/kubernetes-cluster-status + - name: grafana-dashboard-definition-kubernetes-control-plane-status + mountPath: /grafana-dashboard-definition/kubernetes-control-plane-status + - name: grafana-dashboard-definition-kubernetes-resource-requests + mountPath: /grafana-dashboard-definition/kubernetes-resource-requests + - name: grafana-dashboard-definition-kubernetes-nodes + mountPath: /grafana-dashboard-definition/kubernetes-nodes + - name: grafana-dashboard-definition-kubernetes-pods + mountPath: /grafana-dashboard-definition/kubernetes-pods + - name: grafana-dashboard-definition-kubernetes-statefulset + mountPath: /grafana-dashboard-definition/kubernetes-statefulset + - name: scaling-config + mountPath: /grafana-dashboard-definition/scaling + ports: + - name: web + containerPort: 3000 + resources: + requests: + memory: 100Mi + cpu: 100m + limits: + memory: 200Mi + cpu: 200m + volumes: + - name: grafana-custom-config + configMap: + name: grafana-custom-config + - name: grafana-storage + emptyDir: {} + - name: grafana-datasources + configMap: + name: grafana-datasources + - name: grafana-dashboards + configMap: + name: grafana-dashboards + - name: grafana-dashboard-definition-knative + configMap: + name: grafana-dashboard-definition-knative + - name: grafana-dashboard-definition-knative-efficiency + configMap: + name: grafana-dashboard-definition-knative-efficiency + - name: grafana-dashboard-definition-knative-reconciler + configMap: + name: grafana-dashboard-definition-knative-reconciler + - name: grafana-dashboard-definition-kubernetes-deployment + configMap: + name: grafana-dashboard-definition-kubernetes-deployment + - name: grafana-dashboard-definition-kubernetes-capacity-planning + configMap: + name: grafana-dashboard-definition-kubernetes-capacity-planning + - name: grafana-dashboard-definition-kubernetes-cluster-health + configMap: + name: grafana-dashboard-definition-kubernetes-cluster-health + - name: grafana-dashboard-definition-kubernetes-cluster-status + configMap: + name: grafana-dashboard-definition-kubernetes-cluster-status + - name: grafana-dashboard-definition-kubernetes-control-plane-status + configMap: + name: grafana-dashboard-definition-kubernetes-control-plane-status + - name: grafana-dashboard-definition-kubernetes-resource-requests + configMap: + name: grafana-dashboard-definition-kubernetes-resource-requests + - name: grafana-dashboard-definition-kubernetes-nodes + configMap: + name: grafana-dashboard-definition-kubernetes-nodes + - name: grafana-dashboard-definition-kubernetes-pods + configMap: + name: grafana-dashboard-definition-kubernetes-pods + - name: grafana-dashboard-definition-kubernetes-statefulset + configMap: + name: grafana-dashboard-definition-kubernetes-statefulset + - name: scaling-config + configMap: + name: scaling-config diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-prometheus-scrape-config.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-prometheus-scrape-config.yaml new file mode 100644 index 0000000000..47ba3067cd --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/100-prometheus-scrape-config.yaml @@ -0,0 +1,417 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-scrape-config + labels: + name: prometheus-scrape-config + serving.knative.dev/release: devel + namespace: knative-monitoring +data: + prometheus.yml: |- + global: + scrape_interval: 30s + scrape_timeout: 10s + evaluation_interval: 30s + scrape_configs: + # Controller endpoint + - job_name: controller + scrape_interval: 3s + scrape_timeout: 3s + kubernetes_sd_configs: + - role: pod + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app, __meta_kubernetes_pod_container_port_name] + action: keep + regex: knative-serving;controller;metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Autoscaler endpoint + - job_name: autoscaler + scrape_interval: 3s + scrape_timeout: 3s + kubernetes_sd_configs: + - role: pod + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app, __meta_kubernetes_pod_container_port_name] + action: keep + regex: knative-serving;autoscaler;metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Activator pods + - job_name: activator + scrape_interval: 3s + scrape_timeout: 3s + kubernetes_sd_configs: + - role: pod + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app, __meta_kubernetes_pod_container_port_name] + action: keep + regex: knative-serving;activator;metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Webhook pods + - job_name: webhook + scrape_interval: 3s + scrape_timeout: 3s + kubernetes_sd_configs: + - role: pod + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_role, __meta_kubernetes_pod_container_port_name] + action: keep + regex: knative-serving;webhook;metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Queue proxy metrics + - job_name: queue-proxy + scrape_interval: 3s + scrape_timeout: 3s + kubernetes_sd_configs: + - role: pod + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_pod_label_serving_knative_dev_revision, __meta_kubernetes_pod_container_port_name] + action: keep + regex: .+;http-usermetric + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Fluentd daemonset + - job_name: fluentd-ds + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: knative-monitoring;fluentd-ds;prometheus-metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Istio mesh + - job_name: istio-mesh + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: istio-system;istio-telemetry;prometheus + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Istio Envoy + # These are very noisy and not enabled by default. + # - job_name: istio-envoy + # scrape_interval: 5s + # kubernetes_sd_configs: + # - role: endpoints + # relabel_configs: + # # Scrape only the the targets matching the following metadata + # - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + # action: keep + # regex: istio-system;istio-statsd-prom-bridge;statsd-prom + # # Rename metadata labels to be reader friendly + # - source_labels: [__meta_kubernetes_namespace] + # target_label: namespace + # - source_labels: [__meta_kubernetes_pod_name] + # target_label: pod + # - source_labels: [__meta_kubernetes_service_name] + # target_label: service + # Istio policy + - job_name: istio-policy + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: istio-system;istio-policy;http-monitoring + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Istio telemetry + - job_name: istio-telemetry + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: istio-system;istio-telemetry;http-monitoring + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Istio pilot + - job_name: istio-pilot + scrape_interval: 5s + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: istio-system;istio-pilot;http-monitoring + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Kube API server + - job_name: kube-apiserver + scheme: https + kubernetes_sd_configs: + - role: endpoints + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server_name: kubernetes + insecure_skip_verify: false + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_component, __meta_kubernetes_service_label_provider, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;apiserver;kubernetes;https + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Kube controller manager + - job_name: kube-controller-manager + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: knative-monitoring;kube-controller-manager;http-metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Kube scheduler + - job_name: kube-scheduler + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_k8s_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: kube-system;kube-scheduler;http-metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Kube state metrics on https-main port + - job_name: kube-state-metrics-http-metrics + honor_labels: true + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: knative-monitoring;kube-state-metrics;http-metrics + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Kube state metrics on https-self port + - job_name: kube-state-metrics-telemetry + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: knative-monitoring;kube-state-metrics;telemetry + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Kubelet - nodes + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can't connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: kubernetes-nodes + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + # Kubelet - cAdvisor + # + # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics + # (those whose names begin with 'container_') have been removed from the + # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to + # retrieve those metrics. + # + # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor + # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics" + # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with + # the --cadvisor-port=0 Kubelet flag). + - job_name: kubernetes-cadvisor + scrape_interval: 15s + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor + # Node exporter + - job_name: node-exporter + scheme: https + kubernetes_sd_configs: + - role: endpoints + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + tls_config: + insecure_skip_verify: true + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: knative-monitoring;node-exporter;https + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + # Prometheus + - job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + # Scrape only the the targets matching the following metadata + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_label_app, __meta_kubernetes_endpoint_port_name] + action: keep + regex: knative-monitoring;prometheus;web + # Rename metadata labels to be reader friendly + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + target_label: pod + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + - job_name: kubernetes-service-endpoints + kubernetes_sd_configs: + - role: endpoints + relabel_configs: + - action: keep + regex: true + source_labels: + - __meta_kubernetes_service_annotation_prometheus_io_scrape + - action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + source_labels: + - __address__ + - __meta_kubernetes_service_annotation_prometheus_io_port + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - action: replace + source_labels: + - __meta_kubernetes_namespace + target_label: kubernetes_namespace + - action: replace + source_labels: + - __meta_kubernetes_service_name + target_label: kubernetes_name diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/200-kube-controller-metrics.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/200-kube-controller-metrics.yaml new file mode 100644 index 0000000000..48547d8e74 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/200-kube-controller-metrics.yaml @@ -0,0 +1,32 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + namespace: knative-monitoring + name: kube-controller-manager + labels: + app: kube-controller-manager + serving.knative.dev/release: devel +spec: + selector: + k8s-app: kube-controller-manager + type: ClusterIP + clusterIP: None + ports: + - name: http-metrics + port: 10252 + targetPort: 10252 + protocol: TCP diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/300-prometheus.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/300-prometheus.yaml new file mode 100644 index 0000000000..de31fceafd --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/prometheus/300-prometheus.yaml @@ -0,0 +1,308 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: prometheus-system-discovery + namespace: knative-monitoring + labels: + app: prometheus + serving.knative.dev/release: devel +spec: + clusterIP: None + ports: + - name: web + port: 9090 + protocol: TCP + targetPort: web + selector: + app: prometheus + sessionAffinity: None + type: ClusterIP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus-system + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: prometheus-system + namespace: default + labels: + serving.knative.dev/release: devel +rules: +- apiGroups: [""] + resources: + - nodes + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: prometheus-system + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +rules: +- apiGroups: [""] + resources: + - nodes + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: prometheus-system + namespace: kube-system + labels: + serving.knative.dev/release: devel +rules: +- apiGroups: [""] + resources: + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: prometheus-system + namespace: istio-system + labels: + serving.knative.dev/release: devel +rules: +- apiGroups: [""] + resources: + - nodes/metrics + - nodes + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus-system + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +rules: +- apiGroups: [""] + resources: + - nodes/metrics + - nodes + - services + - endpoints + - pods + - nodes/proxy + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-system + namespace: default + labels: + serving.knative.dev/release: devel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-system +subjects: +- kind: ServiceAccount + name: prometheus-system + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-system + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-system +subjects: +- kind: ServiceAccount + name: prometheus-system + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-system + namespace: kube-system + labels: + serving.knative.dev/release: devel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-system +subjects: +- kind: ServiceAccount + name: prometheus-system + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-system + namespace: istio-system + labels: + serving.knative.dev/release: devel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-system +subjects: +- kind: ServiceAccount + name: prometheus-system + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-system + labels: + serving.knative.dev/release: devel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-system +subjects: +- kind: ServiceAccount + name: prometheus-system + namespace: knative-monitoring +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus-system-np + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +spec: + type: NodePort + selector: + app: prometheus + ports: + - port: 8080 + targetPort: 9090 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: prometheus-system + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +spec: + replicas: 2 + podManagementPolicy: Parallel + serviceName: prometheus-system + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + serving.knative.dev/release: devel + spec: + containers: + - args: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.path=/prometheus + - --storage.tsdb.retention=2d + - --storage.tsdb.no-lockfile + - --web.enable-lifecycle + - --web.route-prefix=/ + image: prom/prometheus:v2.2.1 + imagePullPolicy: IfNotPresent + name: prometheus + ports: + - containerPort: 9090 + name: web + protocol: TCP + livenessProbe: + httpGet: + path: /-/healthy + port: web + scheme: HTTP + initialDelaySeconds: 30 + failureThreshold: 10 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /-/ready + port: web + scheme: HTTP + failureThreshold: 10 + timeoutSeconds: 3 + resources: + limits: + memory: 1000Mi + requests: + memory: 400Mi + terminationMessagePath: /dev/termination-log + volumeMounts: + - name: prometheus-config-volume + mountPath: /etc/prometheus + - name: prometheus-storage-volume + mountPath: /prometheus + serviceAccountName: prometheus-system + terminationGracePeriodSeconds: 600 + volumes: + - name: prometheus-config-volume + configMap: + defaultMode: 420 + name: prometheus-scrape-config + - name: prometheus-storage-volume + emptyDir: {} + updateStrategy: + type: RollingUpdate diff --git a/test/vendor/knative.dev/serving/config/monitoring/metrics/stackdriver/100-stackdriver-serviceentry.yaml b/test/vendor/knative.dev/serving/config/monitoring/metrics/stackdriver/100-stackdriver-serviceentry.yaml new file mode 100644 index 0000000000..5d8321e581 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/metrics/stackdriver/100-stackdriver-serviceentry.yaml @@ -0,0 +1,52 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: networking.istio.io/v1alpha3 +kind: ServiceEntry +metadata: + name: googleapis + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +spec: + hosts: + - "*.googleapis.com" + - "accounts.google.com" + ports: + - number: 443 + name: https + protocol: HTTPS + location: MESH_EXTERNAL +--- +apiVersion: networking.istio.io/v1alpha3 +kind: ServiceEntry +metadata: + name: metadata-server + namespace: knative-monitoring + labels: + serving.knative.dev/release: devel +spec: + hosts: + - metadata.google.internal + addresses: + - 169.254.169.254 + ports: + - number: 80 + name: http + protocol: HTTP + - number: 443 + name: https + protocol: HTTPS + resolution: DNS + endpoints: + - address: 169.254.169.254 diff --git a/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/105-zipkin-service.yaml b/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/105-zipkin-service.yaml new file mode 100644 index 0000000000..33bc91d94e --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/105-zipkin-service.yaml @@ -0,0 +1,29 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: zipkin + # istio assumes that tracing is installed in istio-system namespace - + # we have to install to istio-system until istio allows overriding this behavior. + namespace: istio-system + labels: + serving.knative.dev/release: devel +spec: + ports: + - name: http + port: 9411 + selector: + app: jaeger diff --git a/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/elasticsearch/100-jaeger.yaml b/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/elasticsearch/100-jaeger.yaml new file mode 100644 index 0000000000..364ecd7cd1 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/elasticsearch/100-jaeger.yaml @@ -0,0 +1,30 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: jaeger + # istio assumes that tracing is installed in istio-system namespace - + # we have to install to istio-system until istio allows overriding this behavior. + namespace: istio-system + labels: + serving.knative.dev/release: devel +spec: + strategy: production + storage: + type: elasticsearch + options: + es: + server-urls: http://elasticsearch-logging.knative-monitoring.svc.cluster.local:9200 diff --git a/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/memory/100-jaeger.yaml b/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/memory/100-jaeger.yaml new file mode 100644 index 0000000000..00e4d300a8 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/tracing/jaeger/memory/100-jaeger.yaml @@ -0,0 +1,23 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: jaegertracing.io/v1 +kind: Jaeger +metadata: + name: jaeger + # istio assumes that tracing is installed in istio-system namespace - + # we have to install to istio-system until istio allows overriding this behavior. + namespace: istio-system + labels: + serving.knative.dev/release: devel diff --git a/test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin-in-mem/100-zipkin.yaml b/test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin-in-mem/100-zipkin.yaml new file mode 100644 index 0000000000..bd2fd6efdd --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin-in-mem/100-zipkin.yaml @@ -0,0 +1,67 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: zipkin + # istio assumes that zipkin is installed in istio-system namespace - + # we have to install to istio-system until istio allows overriding this behavior. + namespace: istio-system + labels: + serving.knative.dev/release: devel +spec: + ports: + - name: http + port: 9411 + selector: + app: zipkin +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zipkin + namespace: istio-system + labels: + serving.knative.dev/release: devel +spec: + replicas: 1 + selector: + matchLabels: + app: zipkin + template: + metadata: + labels: + app: zipkin + serving.knative.dev/release: devel + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: zipkin + image: docker.io/openzipkin/zipkin:2.13.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9411 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + resources: + limits: + memory: 1000Mi + requests: + memory: 256Mi diff --git a/test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin/100-zipkin.yaml b/test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin/100-zipkin.yaml new file mode 100644 index 0000000000..d3c29cc2a9 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/monitoring/tracing/zipkin/100-zipkin.yaml @@ -0,0 +1,75 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: zipkin + # istio assumes that zipkin is installed in istio-system namespace - + # we have to install to istio-system until istio allows overriding this behavior. + namespace: istio-system + labels: + serving.knative.dev/release: devel +spec: + ports: + - name: http + port: 9411 + selector: + app: zipkin +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zipkin + namespace: istio-system + labels: + serving.knative.dev/release: devel +spec: + replicas: 1 + selector: + matchLabels: + app: zipkin + template: + metadata: + labels: + app: zipkin + serving.knative.dev/release: devel + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: zipkin + image: docker.io/openzipkin/zipkin:2.13.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9411 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: STORAGE_TYPE + value: elasticsearch + - name: ES_HOSTS + value: elasticsearch-logging.knative-monitoring.svc.cluster.local:9200 + - name: ES_INDEX + value: zipkin + - name: ZIPKIN_UI_LOGS_URL + value: http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/ + resources: + limits: + memory: 1000Mi + requests: + memory: 256Mi diff --git a/test/vendor/knative.dev/serving/config/namespace-wildcard-certs/controller.yaml b/test/vendor/knative.dev/serving/config/namespace-wildcard-certs/controller.yaml new file mode 100644 index 0000000000..e574fcdd7f --- /dev/null +++ b/test/vendor/knative.dev/serving/config/namespace-wildcard-certs/controller.yaml @@ -0,0 +1,93 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: networking-ns-cert + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/wildcard-certificate-provider: nscert +spec: + selector: + matchLabels: + app: networking-ns-cert + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: networking-ns-cert + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: networking-nscert + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/networking/nscert + + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + + # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config + - name: METRICS_DOMAIN + value: knative.dev/serving + + securityContext: + allowPrivilegeEscalation: false + + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: networking-ns-cert + serving.knative.dev/release: devel + networking.knative.dev/wildcard-certificate-provider: nscert + name: networking-ns-cert + namespace: knative-serving +spec: + ports: + # Define metrics and profiling for them to be accessible within service meshes. + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: networking-ns-cert diff --git a/test/vendor/knative.dev/serving/config/networking-certmanager.yaml b/test/vendor/knative.dev/serving/config/networking-certmanager.yaml new file mode 120000 index 0000000000..a4f43e8b42 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/networking-certmanager.yaml @@ -0,0 +1 @@ +cert-manager/controller.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/networking-istio.yaml b/test/vendor/knative.dev/serving/config/networking-istio.yaml new file mode 120000 index 0000000000..2bf1688b9c --- /dev/null +++ b/test/vendor/knative.dev/serving/config/networking-istio.yaml @@ -0,0 +1 @@ +istio-ingress/controller.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/networking-ns-cert.yaml b/test/vendor/knative.dev/serving/config/networking-ns-cert.yaml new file mode 120000 index 0000000000..37ae032bc5 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/networking-ns-cert.yaml @@ -0,0 +1 @@ +namespace-wildcard-certs/controller.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/config/post-install/default-domain.yaml b/test/vendor/knative.dev/serving/config/post-install/default-domain.yaml new file mode 100644 index 0000000000..46fd11dcc1 --- /dev/null +++ b/test/vendor/knative.dev/serving/config/post-install/default-domain.yaml @@ -0,0 +1,72 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: batch/v1 +kind: Job +metadata: + name: default-domain + namespace: knative-serving + labels: + app: "default-domain" + serving.knative.dev/release: devel +spec: + template: + metadata: + labels: + app: "default-domain" + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: default-domain + # This is the Go import path for the binary that is containerized + # and substituted here. + image: knative.dev/serving/cmd/default-domain + args: ["-magic-dns=xip.io"] + ports: + - name: http + containerPort: 8080 + readinessProbe: &probe + httpGet: + port: 8080 + livenessProbe: *probe + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: Never + backoffLimit: 10 + +--- +apiVersion: v1 +kind: Service +metadata: + name: default-domain-service + namespace: knative-serving + labels: + app: default-domain + serving.knative.dev/release: devel +spec: + selector: + app: default-domain + ports: + - name: http + port: 80 + targetPort: 8080 + type: ClusterIP diff --git a/test/vendor/knative.dev/serving/config/webhook.yaml b/test/vendor/knative.dev/serving/config/webhook.yaml new file mode 120000 index 0000000000..4aedde6aca --- /dev/null +++ b/test/vendor/knative.dev/serving/config/webhook.yaml @@ -0,0 +1 @@ +core/deployments/webhook.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/container.yaml b/test/vendor/knative.dev/serving/container.yaml new file mode 100644 index 0000000000..517c0e328f --- /dev/null +++ b/test/vendor/knative.dev/serving/container.yaml @@ -0,0 +1,7 @@ +go: + modules: + - module: github.com/knative/serving +image_build_method: imagebuilder +platforms: + only: + - x86_64 diff --git a/test/vendor/knative.dev/serving/content_sets.yml b/test/vendor/knative.dev/serving/content_sets.yml new file mode 100644 index 0000000000..511a5b922e --- /dev/null +++ b/test/vendor/knative.dev/serving/content_sets.yml @@ -0,0 +1,3 @@ +x86_64: +- rhel-8-for-x86_64-baseos-rpms +- rhel-8-for-x86_64-appstream-rpms diff --git a/test/vendor/knative.dev/serving/docs/client-conventions.md b/test/vendor/knative.dev/serving/docs/client-conventions.md new file mode 100644 index 0000000000..d15e79ac2b --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/client-conventions.md @@ -0,0 +1,4 @@ +# Client Conventions + +This document has been moved to the +[Knative Client Repository](https://github.com/knative/client/tree/master/conventions). diff --git a/test/vendor/knative.dev/serving/docs/product/personas.md b/test/vendor/knative.dev/serving/docs/product/personas.md new file mode 100644 index 0000000000..c612bee7cc --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/product/personas.md @@ -0,0 +1,65 @@ +# Knative Serving Personas + +When discussing user actions, it is often helpful to +[define specific user roles]() +who might want to do the action. + +## Knative Serving Compute + +### Developer Personas + +The developer personas are software engineers looking to build and run a +stateless application without concern about the underlying infrastructure. +Developers expect to have tools which integrate with their native language +tooling or business processes. + +- Hobbyist +- Backend SWE +- Full stack SWE +- SRE + +User stories: + +- Deploy some code +- Update environment +- Roll back the last change +- Debug an error in code +- Monitor my application + +### Operator Personas + +The operator personas are focused on deploying and managing both Knative Serving +and the underlying Kubernetes cluster, as well as applying organization policy +and security patches. + +- Hobbyist / Contributor +- Cluster administrator +- Security Engineer / Auditor +- Capacity Planner + +User stories: + +- Create an Knative Serving cluster +- Apply policy / RBAC +- Control or charge back for resource usage +- Choose logging or monitoring plugins +- Audit or patch running Revisions + +## Contributors + +Contributors are an important part of the Knative Serving project. As such, we +will also consider how various infrastructure encourages and enables +contributors to the project, as well as the impact on end-users. + +- Hobbyist or newcomer +- Motivated user +- Corporate (employed) maintainer +- Consultant + +User stories: + +- Check out the code +- Build and run the code +- Run tests +- View test status +- Run performance tests diff --git a/test/vendor/knative.dev/serving/docs/resources-overview.md b/test/vendor/knative.dev/serving/docs/resources-overview.md new file mode 100644 index 0000000000..614fe44ca2 --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/resources-overview.md @@ -0,0 +1,116 @@ +# Resources Overview + +This document provides a high-level description of the resources deployed to a +Kubernetes cluster in order to run Knative Serving. The exact list of resources +is going to change frequently during the current phase of active development. In +order to keep this document from becoming out-of-date frequently it doesn't +describe the exact individual resources but instead the higher level objects +which they form. + +## Dependencies + +Knative Serving depends on [Istio](https://istio.io/) in order to function. +Istio is responsible for setting up the network routing both inside the cluster +and ingress into the cluster. + +## Components + +There are four primary components to the Knative Serving system. The first is +the _Controller_ which is responsible for updating the state of the cluster +based on user input. The second is the _Webhook_ component which handles +validation of the objects and actions performed. The third is an _Activator_ +component which brings back scaled-to-zero pods and forwards requests. The +fourth is the _Autoscaler_ which scales pods as requests come in. + +The controller processes a series of state changes in order to move the system +from its current, actual state to the state desired by the user. + +All of the Knative Serving components are deployed into the `knative-serving` +namespace. You can see the various objects in this namespace by running +`kubectl -n knative-serving get all` +([minus some admin-level resources like service accounts](https://github.com/kubernetes/kubectl/issues/151)). +To see only objects of a specific type, for example to see the webhook and +controller deployments inside Knative Serving, you can run +`kubectl -n knative-serving get deployments`. + +The Knative Serving controller creates Kubernetes and Istio resources when +Knative Serving resources are created and updated. It will also create Build +resources when provided in the Configuration spec. These sub-resources will be +created in the same namespace as their parent Knative Serving resource, _not_ +the `knative-serving` namespace. For example, if you create a Knative Serivce in +namespace 'foo' the corresponding Istio resources will also be in namespace +'foo'. + +All of these components are run as a non-root user (uid: 1337) and disallow +privilege escalation. + +## Kubernetes Resource Configs + +The various Kubernetes resource configurations are organized as follows: + +```plain +# Knative Serving resources +config/*.yaml + +# Istio release configuration +third_party/istio-*/install/kubernetes/... + +# Knative Serving Monitoring configs (Optional) +config/monitoring/... + +# Knative Build resources (Optional) +third_party/config/build/release.yaml + +``` + +## Viewing resources after deploying Knative Serving + +### Custom Resource Definitions + +To view all of the custom resource definitions created, run +`kubectl get customresourcedefinitions`. These resources are named according to +their group, i.e. custom resources created by Knative Serving end with +`serving.knative.dev` or `internal.knative.dev`. + +### Deployments + +View the Knative Serving specific deployments by running +`kubectl -n knative-serving get deployments`. These deployments will ensure that +the correct number of pods are running for that specific deployment. + +For example, given: + +```console +$ kubectl -n knative-serving get deployments +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +activator 1 1 1 1 5d +autoscaler 1 1 1 1 5d +controller 1 1 1 1 5d +webhook 1 1 1 1 5d +``` + +Based on the desired state shown above, we expect there to be a single pod +running for each of the deployments shown above. We can verify this by running +and seeing similar output as shown below: + +```console +$ kubectl -n knative-serving get pods +NAME READY STATUS RESTARTS AGE +activator-c8495dc9-z7xpz 2/2 Running 0 5d +autoscaler-66897845df-t5cwg 2/2 Running 0 5d +controller-699fb46bb5-xhlkg 1/1 Running 0 5d +webhook-76b87b8459-tzj6r 1/1 Running 0 5d +``` + +Similarly, you can run the same commands in the istio (`istio-system`) +namespaces to view the running deployments. To view all namespaces, run +`kubectl get namespaces`. + +### Service Accounts and RBAC policies + +To view the service accounts configured for Knative Serving, run +`kubectl -n knative-serving get serviceaccounts`. + +To view all cluster role bindings, run `kubectl get clusterrolebindings`. +Unfortunately there is currently no mechanism to fetch the cluster role bindings +that are tied to a service account. diff --git a/test/vendor/knative.dev/serving/docs/roadmap/scaling-2019.md b/test/vendor/knative.dev/serving/docs/roadmap/scaling-2019.md new file mode 100644 index 0000000000..bb65de3bf4 --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/roadmap/scaling-2019.md @@ -0,0 +1,193 @@ +# 2019 Autoscaling Roadmap + +This is what we hope to accomplish in 2019. + +## Performance + +### Tests and reliable reporting + +As an overarching goal, we want all aspects of our performance continuously +measured and reliably reported. All of the following aspects will include +in-depth testing and reporting to make sure that advancements are reproducible +on the CI systems and to avoid unwanted regressions. + +**Goal**: All relevant performance numbers are tracked and reported. + +**Project**: No seperate project for now. + +### Sub-Second Cold Start + +As a serverless framework, Knative should only run code when it needs to. +Including scaling to zero when the Revision is not being used. However the +Revison must also come back quickly, otherwise the illusion of "serverless" is +broken--it must seem as if it was always there. Generally less than one second +is a good start. + +Today cold-starts are between 10 and 15 seconds which is an order of magnitude +too slow. The time is spent starting the pod, waiting for Envoy to initialize, +and setting up routing. Without the Istio mesh (just routing request to +individual pods as they come up) still takes about 4 seconds. We've poked at +this problem in 2018 ([#1297](https://github.com/knative/serving/issues/1297)) +but haven't made significant progress. This area requires some dedicated effort. + +**Goal**: Achieve sub-second average cold-starts of disk-warm Revisions. + +**Project**: [Project 8](https://github.com/knative/serving/projects/8) + +### Overload Handling + +Knative Serving provides concurrency controls to limit the number of requests a +container can handle simultaneously. Additionally, each pod has a queue for +holding requests when the container concurrency limit has been reached. When the +pod-level queue overflows, subsequent request are rejected with 503 "overload". + +This is desirable to protect the pod from being overloaded. But the aggregate +behavior is not ideal for situations when autoscaling needs some time to react +to sudden increases in request load. This could happen when the Revision is +scaled to zero or when the Revision is already running some pods, but not nearly +enough. + +The goal of Overload Handling is to enqueue requests at a Revision-level. +Scale-from-zero should not overload if autoscaling can react in a reasonable +amount of time to provide additional pods. When new pods come online, they +should be able to take load from the existing pods. Even when scaled above zero, +brief spikes of overload should be handled by enqueuing requests at a +Revision-level. The depth of the Revision-level queue should also be +configurable because even the Revision as a whole needs to guard against +overload. + +The overall problem touches on both networking and autoscaling, two different +working groups. Much of the overload handling will be implemented in the +Activator, which is a part of ingress. So this project is shared jointly between +the two working groups. + +**Goal**: Requests can be enqueued at the Revision-level in response to high +load. + +**Project**: [Project 7](https://github.com/knative/serving/projects/7) + +## Reliability + +### Autoscaling Availabilty + +Because Knative scales to zero, the autoscaling system is in the critical-path +for serving requests. If the Autoscaler or Activator isn't available when an +idle Revision receives a request, that request will not be served. The Activator +is stateless and can be easily scaled horizontally. Any Activator pod can proxy +any request for any Revision. But the Autoscaler pod is stateful. It maintains +request statistics over a window of time. Moreover, the relationship between +Activator and Autoscaler is N:1 currently because of how the Activator pushes +metrics into the Autoscaler via a Websocket connection. + +We need a way for autoscaling to have higher availability than that of a single +pod. When an Autoscaler pod fails, another one should take over, quickly. And +the new Autoscaler pod should make equivalent scaling decisions. + +**Goal**: The autoscaling is more highly available than a single pod. + +**Project**: TBD + +### Autoscaling Scalability + +The Autoscaler process maintains pod metric data points over a window of time +and calculates average concurrency every 2 seconds. As the number and size of +Revisions deployed to a cluster increases, so does the load on the Autoscaler. + +We need some way to have sub-linear load on a given Autoscaler pod as the +Revision count increases. This could be a sharding scheme or simply deploying +separate Autoscalers per namespace. + +**Goal**: The autoscaling system can scale sub-linearly with the number of +Revisions and number of Revision pods. + +**Project**: TBD + +## Extendability + +### Pluggability + +It is possible to replace the entire autoscaling system by implementing an +alternative PodAutoscaler reconciler (see the +[Yolo controller](https://github.com/josephburnett/kubecon18)). However that +requires collecting metrics, running an autoscaling process, and actuating the +recommendations. + +We should be able to swap out smaller pieces of the autoscaling system. For +example, the HPA should be able to make use of the metrics that Knative +collects. + +**Goal**: The autoscaling decider and metrics collection components can be +replaced independently. + +**Project**: TBD + +### HPA Integration + +The current Knative integration with K8s HPA only supports CPU autoscaling. +However it should be able to scale on concurrency as well. Ultimately, the HPA +may be able to replace the Knative Autoscaler (KPA) entirely (see +["make everything better"](https://github.com/knative/serving/blob/master/docs/roadmap/scaling-2018.md#references)). +Additionally, HPA should be able to scale on user-provided custom metrics as +well. + +**Goal**: Knative HPA-class PodAutoscalers support concurrency autoscaling. + +**Project**: TBD + +## User Experience + +### Migrating Kubernetes Deployments to Knative + +We need documentation and examples to help Kubernetes users with existing +Kubernetes Deployments migrate some of those to Knative to take advantage of +request-based autoscaling and scale-to-zero. + +**Goal**: Increase Knative adoption by making migration from Kubernetes +Deployments simple. + +## What We Are Not Doing Yet + +### Removing the Queue Proxy Sidecar + +There are two sidecars injected into Knative pods, Envoy and the Queue Proxy. +The queue-proxy sidecar is where we put everything we wish Envoy/Istio could do, +but doesn't yet. For example, enforcing single-threaded request or reporting +concurrency metrics in the way we want. Ultimately we should push these features +upstream and get rid of the queue-proxy sidecar. + +However we're not doing that yet because the requirements haven't stablized +enough yet. And it's still useful to have a component to innovate within. + +See +[2018 What We Are Not Doing Yet](https://github.com/knative/serving/blob/master/docs/roadmap/scaling-2018.md#what-we-are-not-doing-yet) + +### Vertical Pod Autoscaling Beta + +A serverless system should be able to run code efficiently. Knative has default +resources request and it supports resource requests and limits from the user. +But if the user doesn't want to spend their time "tuning" resources (which is +very "serverful") then Knative should be able to just "figure it out". That is +Vertical Pod Autoscaling (VPA). + +Knative +[previously integrated with VPA Alpha](https://github.com/knative/serving/issues/839#issuecomment-389387311). +Now it needs to reintegrate with VPA Beta. In addition to creating VPA resources +for each Revision, we need to do a little bookkeeping for the unique +requirements of serverless workloads. For example, the window for VPA +recommendations is 2 weeks. But a serverless function might be invoked once per +year (e.g. when the fire alarm gets pulled). The pods should come back with the +correct resource requests and limits. The way VPA is architected, it "injects" +the correct recommendations via mutating webhook. It will decline to update +resources requests after 2 weeks of inactivity and the Revision would fall back +to defaults. Knative needs to remember what that recommendation was and make +sure new pods start at the right levels. + +Additionally, the next Revision should learn from the previous. But it must not +taint the previous Revision's state. For example, when a Service is in runLatest +mode, the next Revision should start from the resource recommendations of the +previous. Then VPA will apply learning on top of that to adjust for changes in +the application behavior. However if the next Revision goes crazy because of bad +recommendations, a quick rollback to the previous should pick up the good ones. +Again, this requires a little bit of bookkeeping in Knative. + +**Project**: [Project 18](https://github.com/knative/serving/projects/18) diff --git a/test/vendor/knative.dev/serving/docs/runtime-contract.md b/test/vendor/knative.dev/serving/docs/runtime-contract.md new file mode 100644 index 0000000000..9020c7103b --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/runtime-contract.md @@ -0,0 +1,538 @@ +# Knative Runtime Contract + +## Abstract + +The Knative serverless compute infrastructure extends the +[Open Container Initiative Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/master/spec.md) +to describe the functionality and requirements for serverless execution +workloads. In contrast to general-purpose containers, stateless +request-triggered (i.e. on-demand) autoscaled containers have the following +properties: + +- Little or no long-term runtime state (especially in cases where code might be + scaled to zero in the absence of request traffic). +- Logging and monitoring aggregation (telemetry) is important for understanding + and debugging the system, as containers might be created or deleted at any + time in response to autoscaling. +- Multitenancy is highly desirable to allow cost sharing for bursty applications + on relatively stable underlying hardware resources. + +This contract does not define the control surfaces over the runtime environment +except by [reference to the Knative Kubernetes resources](spec/spec.md). +Similarly, this contract does not define the implementation of metrics or +logging aggregation, except to provide a contract for the collection of logging +data. It is expected that access to the aggregated telemetry will be provided by +the platform operator. + +## Background + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", +"SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" are to be +interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). + +The +[OCI specification](https://github.com/opencontainers/runtime-spec/blob/master/spec.md) +([v1.0.1](https://github.com/opencontainers/runtime-spec/blob/v1.0.1/spec.md)) +is the basis for this document. When this document and the OCI specification +conflict, this document is assumed to override the general OCI recommendations. +Where this document does not specify behavior, runtime implementations SHOULD be +OCI compliant with respect to those features. Additionally, the core Knative +definition assumes the +[Linux Container Configuration](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md). + +In particular, the default Knative implementation relies on Kubernetes behavior +to implement container operation. In some cases, current Kubernetes behavior in +2018 is not as performant as envisioned in this documentation. The goal of the +Knative authors is to push as much of the needed functionality into Kubernetes +and/or HTTP routers as possible, rather than implementing reach-around layers. + +This document considers two users of a given Knative environment, and is +particularly concerned with the expectations of _developers_ (and _language and +tooling developers_, by extension) running code in the environment. + +- **Developers** write code which is packaged into a container which is run on + the Knative cluster. + - **Language and tooling developers** typically write tools used by + _developers_ to package code into containers. As such, they are concerned + that tooling which wraps developer code complies with this runtime contract. +- **Operators** (also known as **platform providers**) provision the compute + resources and manage the software configuration of Knative and the underlying + abstractions (for example, Linux, Kubernetes, Istio, etc). + +## Runtime and Lifecycle + +Knative aims to minimize the amount of tuning and production configuration +needed to run a service. Some of these production-friendly features include: + +1. Stateless computation at request-scale or event-scale granularity. +1. Automatic scaling between 0 and many instances (the process scale-out + model). +1. Automatic adjustment of resource requirements based on observed behavior, + where possible. + +In order to achieve these properties, containers which are operated as part of a +serverless platform are expected to observe the following properties: + +- Fast startup time (<1s until a request or event can be processed, given + container image layer caching), +- Minimize local state (in support of autoscaling and scale to zero), +- CPU usage only while requests are active (see + [this issue](https://github.com/knative/serving/issues/848) for reasons an + operator might want to de-allocate CPU between requests). + +### State + +In a highly-shared environment, containers might experience the following: + +- Containers with `status` of `stopped` MAY be immediately reclaimed by the + system. +- The container process MAY be started as pid 0, through the use of PID + namespaces or other processes. + +### Lifecycle + +- The container MAY be killed when the container is inactive. Containers MUST be + considered "active" while they are handling at least one request, but other + conditions MAY also be used to determine that a container is active. The + container is sent a `SIGTERM` signal when it is killed via the + [OCI specification's `kill`](https://github.com/opencontainers/runtime-spec/blob/master/runtime.md#kill) + command to allow for a graceful shutdown of existing resources and + connections. If the container has not shut down after a defined grace period, + the container is forcibly killed via a `SIGKILL` signal. +- The environment MAY restrict the use of `prestart`, `poststart`, and + `poststop` hooks to platform operators rather than developers. All of these + hooks are defined in the context of the runtime namespace, rather than the + container namespace, and might expose system-level information (and are + non-portable). +- Failures of the developer-specified process MUST be logged to a + developer-visible logging system. + +In addition, some serverless environments MAY use an execution model other than +docker in linux (for example, [runv](https://github.com/hyperhq/runv) or +[Kata Containers](https://katacontainers.io/)). Implementations using an +execution model beyond docker in linux MAY alter the lifecycle contract beyond +the OCI specification as long as: + +1. An OCI-compliant lifecycle contract is the default, regardless of how many + extensions are provided. +1. The implementation of an extended execution model or lifecycle MUST provide + documentation about the extended model or lifecycle **and** documentation + about how to opt in to the extended lifecycle contract. + +### Errors + +- Platforms MAY provide mechanisms for post-mortem viewing of filesystem + contents from a particular execution. Because containers (particularly failing + containers) can experience frequent starts, operators or platform providers + SHOULD limit the total space consumed by these failures. + +### Warnings + +As specified by OCI. + +### Operations + +It is expected that containers do not have direct access to the +[OCI interface](https://github.com/opencontainers/runtime-spec/blob/v1.0.0-rc3/runtime.md#operations) +as providing access allows containers to circumvent runtime restrictions that +are enforced by the Knative control plane. The operator or platform provider MAY +have the ability to directly interact with the OCI interface, but that is beyond +the scope of this specification. + +An OPTIONAL method of invoking the `kill` operation MAY be exposed to developers +to provide signalling to the container. + +### Hooks + +Operation hooks +[SHOULD NOT](https://github.com/knative/serving/blob/master/test/conformance/runtime/container_test.go) +be configurable by the Knative developer. Operators or platform providers MAY +use hooks to implement their own lifecycle controls. + +### Linux Runtime + +#### File descriptors + +A read from the `stdin` file descriptor on the container +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/file_descriptor_test.go) +always result in `EOF`. The `stdout` and `stderr` file descriptors on the +container SHOULD be collected and retained in a developer-accessible logging +repository. (TODO:[docs#902](https://github.com/knative/docs/issues/902)). + +Within the container, pipes and file descriptors can be used to communicate +between processes running in the same container. + +#### Dev symbolic links + +As specified by OCI. + +## Network Environment + +For request-response functions, 0->many scaling is enabled by control of the +inbound request path to enable capturing and stalling inbound requests until an +autoscaled container is available to serve that request. + +### Inbound network connectivity + +Inbound network connectivity is assumed to use HTTP/1.1 compatible transport. + +#### Protocols and Ports + +The container MUST accept HTTP/1.1 requests from the environment. The +environment SHOULD +[offer an HTTP/2.0 upgrade option](https://http2.github.io/http2-spec/#discover-http) +(`Upgrade: h2c` on either the initial request or an `OPTIONS` request) on the +same port as HTTP/1.1. The developer MAY specify this port at deployment; if the +developer does not specify a port, the platform provider MUST provide a default. +Only one inbound `containerPort` +[SHALL](https://github.com/knative/serving/blob/master/test/conformance/runtime/container_test.go) +be specified in the +[`core.v1.Container`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#containerport-v1-core) +specification. The `hostPort` parameter +[SHOULD NOT](https://github.com/knative/serving/blob/master/test/conformance/runtime/container_test.go) +be set by the developer or the platform provider, as it can interfere with +ingress autoscaling. Regardless of its source, the selected port will be made +available in the `PORT` environment variable. + +The platform provider SHOULD configure the platform to perform HTTPS termination +and protocol transformation e.g. between QUIC or HTTP/2 and HTTP/1.1. Developers +ought not need to implement multiple transports between the platform and their +code. Unless overridden by setting the +[`name`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#containerport-v1-core) +field on the inbound port, the platform will perform automatic detection as +described above. If the +[`core.v1.Container.ports[0].name`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#containerport-v1-core) +is set to one of the following values, HTTP negotiation will be disabled and the +following protocol will be used: + +- `http1`: HTTP/1.1 transport and will not attempt to upgrade to h2c.. +- `h2c`: HTTP/2 transport, as described in + [section 3.4 of the HTTP2 spec (Starting HTTP/2 with Prior Knowledge)](https://http2.github.io/http2-spec/#known-http) + +Developers ought to use automatic content negotiation where available, and MUST +NOT set the `name` field to arbitrary values, as additional transports might be +defined in the future. Developers can assume all traffic is intermediated by an +L7 proxy. Developers can not assume a direct network connection between their +server process and client processes. + +#### Headers + +As requests to the container will be proxied by the platform, all inbound +request headers +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/header_test.go) +be set to the same values as the incoming request. Some implementations MAY +strip certain HTTP headers for security or other reasons; such implementations +SHOULD document the set of stripped headers. Because the full set of HTTP +headers is constantly evolving, it is RECOMMENDED that platforms which strip +headers define a common prefix which covers all headers removed by the platform. + +In addition, the following base set of HTTP/1.1 headers +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/header_test.go) +be set on the request: + +- `Host` - As specified by + [RFC 7230 Section 5.4](https://tools.ietf.org/html/rfc7230#section-5.4) + +Also, the following proxy-specific request headers +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/header_test.go) +be set: + +- `Forwarded` - As specified by [RFC 7239](https://tools.ietf.org/html/rfc7239). + +Additionally, the following legacy headers +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/header_test.go) +be set for compatibility with client software: + +- `X-Forwarded-For` +- `X-Forwarded-Proto` + +In addition, the following headers +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/header_test.go) +be set to enable tracing and observability features: + +- Trace headers - Platform providers SHOULD provide and document headers needed + to propagate trace contexts, + [in the absence of w3c standardization](https://www.w3.org/2018/04/distributed-tracing-wg-charter.html). + +Operators and platform providers MAY provide additional headers to provide +environment specific information. + +#### Meta Requests + +The +[`core.v1.Container`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#container-v1-core) +object allows specifying both a +[`readinessProbe`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#define-readiness-probes) +and a +[`livenessProbe`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#define-a-liveness-http-request). +If not provided, container startup and listening on the declared HTTP socket is +considered sufficient to declare the container "ready" and "live" (see the probe +definition below). If specified, liveness and readiness probes are REQUIRED to +be of the `httpGet` or `tcpSocket` types, and +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/container_test.go) +target the inbound container port; platform providers SHOULD disallow other +probe methods. + +Because serverless platforms automatically scale instances based on inbound +requests, and because noncompliant (or even failing) containers might be +provided by developers, the following defaults SHOULD be applied by the platform +provider if not set by the developer. The probes are intended to be trivially +supportable by naive conforming containers while preventing interference with +developer code. These settings apply to both `livenessProbe` and +`readinessProbe`: + +- `tcpSocket` set to the container's port +- `initialDelaySeconds` set to 0 +- `periodSeconds` set to platform-specific value + +Setting `initialDelaySeconds` to a value greater than 0 impacts container +startup time (aka cold start time) as a container will not serve traffic until +the probe succeeds. + +##### Deployment probe + +On the initial deployment, platform providers +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/errorcondition_test.go) +start an instance of the container to validate that the container is valid and +will become ready. This startup +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/errorcondition_test.go) +occur even if the container would not serve any user requests. If a container +cannot satisfy the `readinessProbe` during deployment startup, the Revision +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/errorcondition_test.go) +be marked as failed. + +Initial readiness probes allow the platform to avoid attempting to later +provision or scale deployments (Revisions) which cannot become healthy, and act +as a backstop to developer testing (via CI/CD or otherwise) which has been +performed on the supplied container. Common causes of these failures can +include: malformed dynamic code not tested in the container, environment +differences between testing and deployment environment, and missing or +misconfigured backends. This also provides an opportunity for the container to +be run at least once despite scale-to-zero guarantees. + +### Outbound network connectivity + +OCI does not specify any properties of the network environment in which a +container runs. The following items are OPTIONAL additions to the runtime +contract which describe services which might be of particular value to platform +providers. + +#### DNS + +Platform providers SHOULD override the DNS related configuration files under +`/etc` to enable local DNS lookups in the target environment (see +[Default Filesystems](#default-filesystems)). + +#### Metadata Services + +Platform providers MAY provide a network service to provide introspection and +environment information to the running process. Such a network service SHOULD be +an HTTP server with an operator- or provider-defined URL schema. If a metadata +service is provided, the schema MUST be documented. Sample use cases for such +metadata include: + +- Container information or control interfaces. +- Host information, including maintenance or capability information. +- Access to external configuration stores (such as the Kubernetes ConfigMap + APIs). +- Access to secrets or identity tokens, to enable key rotation. + +## Configuration + +### Root + +Platform providers MAY set the `readonly` bit on the container to `true` in +order to reduce the possible disk space provisioning and management of +serverless workloads. Containers MUST use the provided temporary storage areas +(see [Default Filesystems](#default-filesystems)) for working files and caches. + +### Mounts + +In general, stateless applications package their dependencies within the +container and do not rely on mutable external state for templates, logging +configuration, etc. In some cases, it might be necessary for certain application +settings to be overridden at deploy time (for example, database backends or +authentication credentials). When these settings need to be loaded via a file, +read-only mounts of application configuration and secrets are supported by +`ConfigMap` and `Secrets` volumes. Platform providers MAY apply updates to +`Secrets` and `ConfigMaps` while the application is running; these updates could +complicate rollout and rollback. It is up to the developer to choose appropriate +policies for mounting and updating `ConfigMap` and `Secrets` which are mounted +as volumes. + +As serverless applications are expected to scale horizontally and statelessly, +per-container volumes are likely to introduce state and scaling bottlenecks and +are NOT RECOMMENDED. + +### Process + +Serverless applications which scale horizontally are expected to be managed in a +declarative fashion, and individual instances SHOULD NOT be interacted with or +connected directly. + +- The `terminal` property + [SHOULD NOT](https://github.com/knative/serving/blob/master/test/conformance/runtime/filesystem_test.go) + be set to `true`. +- The linux process specific properties MUST NOT be configurable by the + developer, and MAY set by the operator or platform provider. + +The following environment variables +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/envvars_test.go) +be set: + +| Name | Meaning | +| ------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| `PORT` | Ingress `containerPort` for ingress requests and health checks. See [Inbound network connectivity](#inbound-network-connectivity) for more details. | + +The following environment variables +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/envvars_test.go) +be set: + +| Name | Meaning | +| ----------------- | ---------------------------------------------------------------------------------------------------------------- | +| `K_REVISION` | Name of the current Revision. | +| `K_CONFIGURATION` | Name of the Configuration that created the current Revision. | +| `K_SERVICE` | If the current Revision has been created by manipulating a Knative Service object, name of this Knative Service. | + +Platform providers MAY set additional environment variables. Standardization of +such variables will follow demonstrated usage and utility. + +### User + +Developers MAY specify that containers be run as a specific user or group ID +using the `runAsUser` container property. If specified, the runtime +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/user_test.go) +run the container as the specified user ID if allowed by the platform (see +below). If no `runAsUser` is specified, a platform-specific default SHALL be +used. Platform Providers SHOULD document this default behavior. + +Operators and Platform Providers MAY prohibit certain user IDs, such as `root`, +from executing code. In this case, if the identity selected by the developer is +invalid, the container execution +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/container_test.go) +be failed. + +### Default Filesystems + +The OCI specification describes a default container environment which can be +used for many different purposes, including containerization of existing legacy +or stateful processes which might store substantial amounts of on-disk state. In +a scaled-out, stateless environment, container startup and teardown is +accelerated when on-disk resources are kept to a minimum. Additionally, +developers might not have access to the container's filesystems (or the +containers might be rapidly recycled), so log aggregation SHOULD be provided. + +In addition to the filesystems recommended in the OCI, the following filesystems +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/filesystem_perm_test.go) +be provided: + +| Mount | Description | +| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `/tmp` | MUST be Read-write.

SHOULD be backed by tmpfs if disk load is a concern. | +| `/var/log` | MUST be a directory with write permissions for logs storage. Implementations MAY permit the creation of additional subdirectories and log rotation and renaming. | + +To enable DNS resolution, the following files might be overwritten at runtime: + +| File | Description | +| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `/etc/hosts` | MAY be overridden to provide host mappings for well-known or provider-specific resources. | +| `/etc/hostname` | some environments MAY set this to a different value for each container, but other environments might use the same value for all containers. | +| `/etc/resolv.conf` | SHOULD be set to a valid cluster-specific recursive resolver. Providers MAY provide additional default search domains to improve customer experience in the cluster. | + +Platform providers MAY provide additional platform-specific mount points +(example: shared read-only object stores or DB connection brokers). If provided, +the location and contents of the mount points SHOULD be documented by the +platform provider. + +### Namespaces + +The namespace configuration MUST be provided by the operator or platform +provider; developers or container providers MUST NOT set or assume a particular +namespace configuration. + +### Devices + +Developers MUST NOT use OCI `devices` to request additional devices beyond the +[OCI specification "Default Devices"](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices). + +### Control Groups + +Control group (cgroups) controllers +[MUST](https://github.com/knative/serving/blob/master/test/conformance/runtime/cgroup_test.go) +be selected and configured by the operator or platform provider. The cgroup +devices +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/cgroup_test.go) +be mounted as read-only. + +#### Memory and CPU limits + +The serverless platform MAY automatically adjust the resource limits (e.g. CPU) +based on observed resource usage. The limits enforced to a container +[SHOULD](https://github.com/knative/serving/blob/master/test/conformance/runtime/cgroup_test.go) +be exposed in + +- `/sys/fs/cgroup/memory/memory.limit_in_bytes` +- `/sys/fs/cgroup/cpu/cpu.cfs_period_us` +- `/sys/fs/cgroup/cpu/cpu.cfs_quota_us` + +Additionally, operators or the platform MAY restrict or prevent CPU scheduling +for instances when no requests are active, +[where this capability is available](https://github.com/knative/serving/issues/848). +The Knative authors are currently discussing the best implementations options +for this feature with the Kubernetes SIG-Node team. + +### Sysctl + +The sysctl parameter applies system-wide kernel parameter tuning, which could +interfere with other workloads on the host system. This is not appropriate for a +shared environment, and +[SHOULD NOT](https://github.com/knative/serving/blob/master/test/conformance/runtime/sysctl_test.go) +be exposed for developer tuning. + +### Seccomp + +Seccomp provides a mechanism for further restricting the set of linux syscalls +permitted to the processes running inside the container environment. A seccomp +sandbox MAY be enforced by the platform operator; any such application profiles +SHOULD be configured and applied in a consistent mechanism outside of the +container specification. A seccomp policy MAY be part of the platform security +configuration that operators can tune over time as the threat environment +changes. + +### Rootfs Mount Propagation + +From the OCI spec: + +> `rootfsPropagation` (string, OPTIONAL) sets the rootfs's mount propagation. +> Its value is either slave, private, shared or unbindable. The +> [Shared Subtrees](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) +> article in the kernel documentation has more information about mount +> propagation. + +This option MAY be set by the operator or platform provider, and MUST NOT be +configurable by the developer. Mount propagation MAY be part of the platform +security configuration that operators can tune over time as the threat +environment changes. + +### Masked Paths + +This option MAY be set by the operator or platform provider, and MUST NOT be +configurable by the developer. Masked paths MAY be part of the platform security +configuration that operators can tune over time as the threat environment +changes. + +### Readonly Paths + +This option MAY only be set by the operator or platform provider, and MUST NOT +be configurable by the developer. + +### Posix-platform Hooks + +Operation hooks +[SHOULD NOT](https://github.com/knative/serving/blob/master/test/conformance/runtime/container_test.go) +be configurable by the developer. Operators or platform providers MAY use hooks +to implement their own lifecycle controls. + +### Annotations + +As specified by OCI. diff --git a/test/vendor/knative.dev/serving/docs/scaling/DEVELOPMENT.md b/test/vendor/knative.dev/serving/docs/scaling/DEVELOPMENT.md new file mode 100644 index 0000000000..db769af26b --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/scaling/DEVELOPMENT.md @@ -0,0 +1,207 @@ +# Autoscaling + +Knative Serving Revisions are automatically scaled up and down according +incoming traffic. + +## Definitions + +- Knative Serving **Revision** -- a custom resource which is a running snapshot + of the user's code (in a Container) and configuration. +- Knative Serving **Route** -- a custom resource which exposes Revisions to + clients via an Istio ingress rule. +- Kubernetes **Deployment** -- a k8s resource which manages the lifecycle of + individual Pods running Containers. One of these is running user code in each + Revision. +- Knative Serving **Autoscaler** -- another k8s Deployment running a single Pod + which watches request load on the Pods running user code. It increases and + decreases the size of the Deployment running the user code in order to + compensate for higher or lower traffic load. +- Knative Serving **Activator** -- a k8s Deployment running a single, + multi-tenant Pod (one per Cluster for all Revisions) which catches requests + for Revisions with no Pods. It brings up Pods running user code (via the + Revision controller) and forwards caught requests. +- **Concurrency** -- the number of requests currently being served at a given + moment. More QPS or higher latency means more concurrent requests. + +## Behavior + +Revisions have three autoscaling states which are: + +1. **Active** when they are actively serving requests, +1. **Reserve** when they are scaled down to 0 Pods but is still in service, and +1. **Retired** when they will no longer receive traffic. + +When a Revision is actively serving requests it will increase and decrease the +number of Pods to maintain the desired average concurrent requests per Pod. When +requests are no longer being served, the Revision will be put in a Reserve +state. When the first request arrives, the Revision is put in an Active state, +and the request is queued until it becomes ready. + +In the Active state, each Revision has a Deployment which maintains the desired +number of Pods. It also has an Autoscaler (one per Revision for single-tenancy; +one for all Revisions for multi-tenancy) which watches traffic metrics and +adjusts the Deployment's desired number of pods up and down. Each Pod reports +its number of concurrent requests each second to the Autoscaler. + +In the Reserve state, the Revision has no scheduled Pods and consumes no CPU. +The Istio route rule for the Revision points to the single multi-tenant +Activator which will catch traffic for all Reserve Revisions. When the Activator +catches a request for a Reserve Revision, it will flip the Revision to an Active +state and then forward requests to the Revision when it ready. + +In the Retired state, the Revision has provisioned resources. No requests will +be served for the Revision. + +Note: Retired state is currently not set anywhere. See +[issue 1203](https://github.com/knative/serving/issues/1203). + +## Context + +The following diagram illustrates the mechanics of the autoscaler: + +```diagram + +---------------------+ + | ROUTE | + | | + | +-------------+ | + | | Istio Route |---------------+ + | +-------------+ | | + | | | | + +---------|-----------+ | + | | + | | + | inactive | active + | route | route + | | + | | + | +------|---------------------------------+ + V watch | V | + +-----------+ first | +- ----+ create +------------+ | + | Activator |------------->| Pods |<----------| Deployment |<--------------+ + +-----------+ | +------+ +------------+ | | + | | | | | resize + | activate | | | | + +--------------->| | | | + | | metrics | +------------+ + | +----------------------------------->| Autoscaler | + | | +------------+ + | | + | REVISION | + +----------------------------------------+ + +``` + +## Design Goals + +1. **Make it fast**. Revisions should be able to scale from 0 to 1000 concurrent + requests in 30 seconds or less. +1. **Make it light**. Wherever possible the system should be able to figure out + the right thing to do without the user's intervention or configuration. +1. **Make everything better**. Creating custom components is a short-term + strategy to get something working now. The long-term strategy is to make the + underlying components better so that custom code can be replaced with + configuration. E.g. Autoscaler should be replaced with the K8s + [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) + and + [Custom Metrics](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics). + +### Slow Brain / Fast Brain + +The Knative Serving Autoscaler is split into two parts: + +1. **Fast Brain** that maintains the desired level of concurrent requests per + Pod (satisfying [Design Goal #1](#design-goals)), and the +1. **Slow Brain** that comes up with the desired level based on CPU, memory and + latency statistics (satisfying [Design Goal #2](#design-goals)). + +## Fast Brain Implementation + +This is subject to change as the Knative Serving implementation changes. + +### Code + +- [Autoscaler Library](../../pkg/autoscaler/autoscaler.go) +- [Autoscaler Binary](../../cmd/autoscaler/main.go) +- [Queue Proxy Binary](../../cmd/queue/main.go) +- [Statistics Server](../../pkg/autoscaler/statserver/server.go) + +### Autoscaler + +There is a proxy in the Knative Serving Pods (`queue-proxy`) which is +responsible for enforcing request queue parameters (single or multi threaded), +and reporting concurrent client metrics to the Autoscaler. If we can get rid of +this and just use [Envoy](https://www.envoyproxy.io/docs/envoy/latest/), that +would be great (see [Design Goal #3](#design-goals)). The Knative Serving +controller injects the identity of the Revision into the queue proxy environment +variables. When the queue proxy wakes up, it will find the Autoscaler for the +Revision and establish a websocket connection. Every 1 second, the queue proxy +pushes a gob serialized struct with the observed number of concurrent requests +at that moment. + +The Autoscaler runs a controller which monitors +["KPA"](../../pkg/apis/autoscaling/v1alpha1/pa_types.go) resources and monitors +and scales the embedded object reference via the `/scale` sub-resource. + +The Autoscaler provides a websocket-enabled Statistics Server. Queue proxies +send their metrics to the Autoscaler's Statistics Server and the Autoscaler +maintains a 60-second sliding window of data points. + +The Autoscaler implements a scaling algorithm with two modes of operation: +Stable Mode and Panic Mode. + +#### Stable Mode + +In Stable Mode the Autoscaler adjusts the size of the Deployment to achieve the +desired average concurrency per Pod (currently +[hardcoded](https://github.com/knative/serving/blob/c4a543ecce61f5cac96b0e334e57db305ff4bcb3/cmd/autoscaler/main.go#L36), +later provided by the Slow Brain). It calculates the observed concurrency per +pod by averaging all data points over the 60 second window. When it adjusts the +size of the Deployment it bases the desired Pod count on the number of observed +Pods in the metrics stream, not the number of Pods in the Deployment spec. This +is important to keep the Autoscaler from running away (there is delay between +when the Pod count is increased and when new Pods come online to serve requests +and provide a metrics stream). + +#### Panic Mode + +The Autoscaler evaluates its metrics every 2 seconds. In addition to the +60-second window, it also keeps a 6-second window (the panic window). If the +6-second average concurrency reaches 2 times the desired average, then the +Autoscaler transitions into Panic Mode. In Panic Mode the Autoscaler bases all +its decisions on the 6-second window, which makes it much more responsive to +sudden increases in traffic. Every 2 seconds it adjusts the size of the +Deployment to achieve the stable, desired average (or a maximum of 10 times the +current observed Pod count, whichever is smaller). To prevent rapid fluctuations +in the Pod count, the Autoscaler will only increase Deployment size during Panic +Mode, never decrease. 60 seconds after the last Panic Mode increase to the +Deployment size, the Autoscaler transitions back to Stable Mode and begins +evaluating the 60-second windows again. + +#### Deactivation + +When the Autoscaler has observed an average concurrency per pod of 0.0 for some +time ([#305](https://github.com/knative/serving/issues/305)), it will transition +the Revision into the Reserve state. This scales the Deployment to 0, stops any +single tenant Autoscaler associated with the Revision, and routes all traffic +for the Revision to the Activator. + +### Activator + +The Activator is a single multi-tenant component that catches traffic for all +Reserve Revisions. It is responsible for activating the Revisions and then +proxying the caught requests to the appropriate Pods. It woud be preferable to +have a hook in Istio to do this so we can get rid of the Activator (see +[Design Goal #3](#design-goals)). When the Activator gets a request for a +Reserve Revision, it calls the Knative Serving control plane to transition the +Revision to an Active state. It will take a few seconds for all the resources to +be provisioned, so more requests might arrive at the Activator in the meantime. +The Activator establishes a watch for Pods belonging to the target Revision. +Once the first Pod comes up, all enqueued requests are proxied to that Pod. +Concurrently, the Knative Serving control plane will update the Istio route +rules to take the Activator back out of the serving path. + +## Slow Brain Implementation + +_Currently the Slow Brain is not implemented and the desired concurrency level +is hardcoded at 1.0 +([code](https://github.com/knative/serving/blob/01787a8cd70eab441d28872621bc26ad061bf14b/config/config-autoscaler.yaml#L47))._ diff --git a/test/vendor/knative.dev/serving/docs/scaling/OWNERS b/test/vendor/knative.dev/serving/docs/scaling/OWNERS new file mode 100644 index 0000000000..690ff0e48e --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/scaling/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers + +reviewers: +- autoscaling-reviewers + +labels: +- area/autoscale diff --git a/test/vendor/knative.dev/serving/docs/scaling/README.md b/test/vendor/knative.dev/serving/docs/scaling/README.md new file mode 100644 index 0000000000..12e74113bc --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/scaling/README.md @@ -0,0 +1,30 @@ +# Knative Serving Scaling + +TODO: write developer/operator facing documentation. + +## Scale Bounds + +There are cases when Operators need to set lower and upper bounds on the number +of pods serving their apps (e.g. avoiding cold-start, control compute costs, +etc). + +The following annotations can be used on `spec.template.metadata.annotations` +(propagated to `PodAutoscaler` objects) to do exactly that: + +```yaml +# +optional +# When not specified, the revision can scale down to 0 pods +autoscaling.knative.dev/minScale: "2" +# +optional +# When not specified, there's no upper scale bound +autoscaling.knative.dev/maxScale: "10" +``` + +You can also use these annotations directly on `PodAutoscaler` objects. + +**NOTE**: These annotations apply for the full lifetime of a `revision`. Even +when a `revision` is not referenced by any `route`, the minimal pod count +specified by `autoscaling.knative.dev/minScale` will still be provided. Keep in +mind that non-routeable `revisions` may be garbage collected, which enables +Knative to reclaim the resources. **These annotations are specific to Autoscaler +implementations but NOT subject to Conformance.** diff --git a/test/vendor/knative.dev/serving/docs/spec/README.md b/test/vendor/knative.dev/serving/docs/spec/README.md new file mode 100644 index 0000000000..fe441dab58 --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/spec/README.md @@ -0,0 +1,13 @@ +# Knative Serving Specification + +The Knative Serving specifications have been moved. See the links below for +document locations: + +- [Knative Serving API Specification](https://github.com/knative/docs/blob/master/docs/serving/spec/). +- [Knative Serving Runtime Specification](https://github.com/knative/serving/blob/master/docs/runtime-contract.md) +- [Knative Serving Conformance Tests](/test/conformance) + +Docs in this directory: + +- [Motivation and goals](motivation.md) +- [Resource type overview](overview.md) diff --git a/test/vendor/knative.dev/serving/docs/spec/errors.md b/test/vendor/knative.dev/serving/docs/spec/errors.md new file mode 100644 index 0000000000..5042d20cff --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/spec/errors.md @@ -0,0 +1,9 @@ +# Error Conditions and Reporting + +This document has been replaced by the +[Error Signalling](https://github.com/knative/docs/blob/master/docs/serving/spec/knative-api-specification-1.0.md#error-signalling) +section of the +[Knative API Specification](https://github.com/knative/docs/blob/master/docs/serving/spec/knative-api-specification-1.0.md). + +Specification documents can be found in the +[Knative Docs Repository](https://github.com/knative/docs/tree/master/docs/serving/spec). diff --git a/test/vendor/knative.dev/serving/docs/spec/images/object_model.png b/test/vendor/knative.dev/serving/docs/spec/images/object_model.png new file mode 100644 index 0000000000000000000000000000000000000000..0c58f1504f18c402ef333dad06fdfdcf7660e680 GIT binary patch literal 16672 zcmbVzcQl+|*Y>1qgAjxaB1%N>nGhvLjUHW;D1#_5i4xH!qD32mXwimX^xlIQ5z%`m zena#Y(aZ4NL!Re--gmv<`quj94`;dWbDy&JIoIC%I{O4`sw;v?7)U@M5Lj7B?gPYU5|BGcch4 z(_UJY+ zy}!j<1m$Jy1<=2XUY6@b*Fd1#kjuV^e-|Ju@Co5D_7UZO6#u;vp$6fb|3{^>cK=5m ztS{oM-P@442-j)jWS-S84<9&nKWJN85XmyzIi%KTL}EAK2l)GJHjQ8IEiKo8wPc_m z5GBeeIKq2rFIrpONl884O}5`{j63T7m!10qMR(PUR3@YkTWF{~q@7OjSC%2s6VhAi zau;xbHlcRxrVsY+KN+iD`SA2wg^GxEXHrV}m-IK-jis{bBciCcXOpkE5d{XLtsLtX z+ebc+1x6s>%%$xfRsQ@ixs8@@i_#?xdLLr3v*5$eNOiDXB^~o+Yu!Hj>F^gx$@%W^ zQ{FcSn~_NT&z!>KFy=2$PKk6d+#B8<)F0;&*&RDCL@CtJ9yoy(55;T}a#ziAr|!gf0fm6Nh`t{r6xWj{FgU`gZp zAN?%)V0Y;b<8NyPMGx+GS(8`R5+q3dR&#Dqy#nH zJdCp$cB_77Aa4k+*W*=U!S#yO>){o=q^3SirB3-R-^Y4tGX{+pRd0gN*ZIdvzMy5+ zL9M5ca>SkL`Dmk}EU@U_-f(c9vF8L0Oo*ujF={XI8r{1SGW99&H6=M=`EanrFlx4% z+WPW;%1mblRl_!#VhL#4{n;}N61VZrhXtwr2i5IsM1PVr`q$fQLDy<1Up zFfp)AK}4-=JjIz78Z=?nyvLd8ssUX}eZ_LIY@DQMawHqpyi7f2PSe-!AvXstIb6*p zLfj@kSW8WjQ>7idn3O9)Z1Y(O5rgk_^S?ycJ<7)8lUXHp*1>X*k}HXQOK;<9A&I&( zUXDSRzYW(DkxB`)f4*c`Qc^`bWl(6W3@kN@bk*>adzc#jVp!i z?TbuzXKMt`MXHqrqq^-I$%)C(o4=hGUFki9wl`CA9g0*zqfz;h)k-|2DFb)=nUfJh zJcnT%+yfRGlLensub%;M7}qvJSn@qj_X~`(u4K@O$B5;G=w!N0VE)nk@`=qy|23Sj z7Mq~B)Jca~Yp$dZUr2xu!}tr0$f3QCjYgKKtgRK-q@7|n$!WHR*EA-}o>Lhq^-mUY zcH(+4x^WGSoQj2XTZ~+**>2@XHe*A>az(NteFP72-%;ht@)yH^)LLmzCkEO629HYB zeJP&pvIle9fg)Q3RFWAY4iXm{x@IM*rOTKfXqdmHA09M5xVgmdXg{EDf1f_`Ek=#v z8EMbGM=$9kQX+(j%?+}eg#=$-b6PXU2b<S$hgDVen z=_SVRH!#yZgzGi)crb?g2O6$E+^ylQKIngRlyg&W&#muck@AY}>Ue+@^I8#}>kaL> zux=c8{FIOIrjT%SMc~M;FL zfT52nI>Xz-NF~ro!Jis$m69v>asx}9?0Q}ME83PK546mp|r7-X!-40 z1=9rIQZdU-##xOO`W1TA2I5vJJCV)WEo;JeWGz+utqGnR$enZAPdaB|xx=HPt* zE0fz37P}xV4mb^IZT;o0tr}(Yv4Qs+Hc~PZDGLy zcOb7g0}y1dBbFB{rj;(+e#W950!*KRm2T=!?gzR|j29I$ut5xAcMaDJH>iQ3zrDu~ zS#}ZivS{X_A_LzyuyK(8NNcY5?2do)vM{Zisi_dIcSRuddf>Xokx>&5VH;^HKzUXo zE20+M0T~)HGAQ(!?L<-3Gp&@5zJfzbf6lrKK|C4_7mCS(XbMoeP@N9K#+1<|4mufk z(-O427tk!O48OmvJ0RW44SDIiVi3@5YBQ9f^mY4Z!3iJeT9a%WG}*7L5(<{V>hL z_+0H+FWakOCo?0G0RcrB^@{Zx1s~B{c6Ro|Wm?+@w#+6Y%L4hoMeSR3DptJOnc!f* z#YDqa>k4}{3Vqzpt@AapYL~$vb+c`|x5R(>e(7-SZG}A@Bd{`Dtx6v7h|;+HS$V!d zo@*^Av3*a0r88U~61YunNeb7Sp)orX!ed-9?&=Vum87jh`E@BjIng^GPvYKW21+Ro z<%L-lP@Z9yEHpo*Sq}@*7}Yk?Z4~TJxc9ABv&3#V27dP12ugJEY#W>vAb&p@=)U7M zz%Kre$F~U(>+kosAP**Z1t8pQB0?j73rg~fxHD&g{M~;P|9bncxZz#_zd89(aFpE;H0=few=2V| zzT{BWn#H!3mYT)RmRMtqu<=*L%fN_cGat}J%vrd7uM}tg{?pi@qb0<+{7)aS^krDs zD7l^cgaR!;y($@;6K2VkuqCP-+UmOq>?AoDjQyI4l!{=T|3d7S?KulYKD`ACa_0LW z(APn^gkY1JMb3ePk81`=p=x~6a+V^sO$n4hcNjr&u78+ftG#w%r2}(6e1J!TWQm5Q zJ&Mj(CL@+5Xy+~bI&6%{+7zj!`G9_wEgkuG>yCdbWM(W?xBIMaPFMY&1*Cy7X7Jni zD2T~R_Z+Er`(w!lf8PXV z#i?}_rmeg~DcU9V{1@xJ$8~hVymdaKx~-~=K(`?>p>WYH!LY=!zJz1C*0yD&IXrq%W;ijnbH|f03$gB-c+2!j54rsZjNNjq3o!$f!Mw z2w^TH5_x%W=STgCXQ*q#>>79+RfC2JPBlnA+^ya)hP^3vW_rInMdP|m<-67=MM6`- z7gkt5ITy!EQB>EDoMu#R8#Z6>gMTrrTmM6K>VVy=`%3!5^T9K{j;2M2=e9K5`^0Wo|cG-C8SS#X;BiDpnkJ*G!roR2|~Q zy%+51V;dI7x9}x(_?q%*5~KqUM8fNcq|wYTE_{DH=(xUzT=*@?*aV2~U{J@i_jRpKeya@2(X5yb}ZxFn8JS_DH>T^v*;qNr)DQ z*-A#Yy#;@QaNUMX(T_;qAXY5v5rU9+k1bra;|`gsvJEL5nhhNG`__lsOJ0M|x)-a_ z2Y#g4FPmPHX7$!nfi&}jK$;xGN8N}qXpb-xu9&Pz?7qOt=O*x=TbLeHq#T{zZ+U{} zeR)aP+T#wOgQ8o)61BsDGF(Qaa>kf@Z=NnoY_Xo~)6ncYJ*$jEul0Gxw-y1*CoCwC zlUg&R(XN#X;@U7U{pn?Qp&&^s_Ri5-h`(i3oUZ|`c!slqq;hQh^L?A}o{;i=%bA>7 zUis2{AK(A%@HriG*zE@>27x={$7Wle9f1s~-0?XU$z%>x2p` zMyE%#kP%Oh_q-b}i4E8I)`qA2gNGmj=t%N7#E*M&dm5{*9D4wK0qMyy|ANbp{Ds`| z!`)qscZpNg#Ub%i{cSP+%eVY2MRZC;eh9hLt>)C`-6`p7NW|ve%bUQz>}ry1*&%kW zAI;zV^Q`p7h;b)*cU?15y6)>jeOqYg&k;L)PmSJ>4|zj$Iz8N@L~gjDirJK!ZS{|?Rm%|z(~$!3sK^BA57q^ z05T_n9`3BeJaD6pkFufx4o;M|jq*(#6v|#SFZP}!XQXatV;5=M; zDte94>&C2tC?xsCKTCx!fpai=RPhhJclk5pnT5N{#(-CEDx$+SwsM-0(xrG&H;fI| zQp2^byKBkHObn8Nr_n;(a=TCzs4hV6-3#K`_s&EjS~F!Wd@oKtSB{qHKoK- zNS5~oNYzjTtm(>-jM5R1dC+&r>}@Jr{%1+4lv&No0i@aGZx}`^cj=ida*x)dZzIHq zVm6?)yQ@VS8VCoak@TrXcfCL)(t$Xbhdeot(6Ww_chW@ZQoCA^HODc=pD%%OuZG`c z-bCtZWna*I!#OaD`$Mra@VjHWwy9^VCMJRilsh4Y;J$=)s z88|y~a%9#MFE~q-+V!nHtg7|%W?cC|$e308U`rh`OSCYxO0*eSq?NGdM9y!iYf$>y}TAL3Iz~>&s z<8Cj0hBdDn=d73J<5+RM%U90)l($0Y9pT-YrcNqRPDmK~`|B?wOR2pQ_*G|H@ulrw8$+Ti&z5oaepe5!kN;;%D zOq=ql&wM%1WA!S!@D{Hd=P`CnRO~-TSEb1Rz6&OisX7X~8WDUh`nORTEw$Sbc2`O- zoDY1i^Tptn;5maRlha!|>!EY3Cq;5DKF%v@eJfx-X_xe|yAQdT&WHBB>kdDix!QLw zxl88{9=GV$?CbKKu+r&oy#!i{zOLbLI`C`0`&P7k<5ui1XS2f%FAkz>gEyW2q<`4o zmtrRVI!KLjdRWE#<_J8((Zw$*k)U^<_rWPm;|HEb{P3^4r&~3|H7BfQ#~ItT*-0L+ zUbQ)yB{iJiY-&m}P}?enK^8-LuF>P3rtd#}eUbqtykYIX=+j$bAb z$n=LXrn~O83t;@r_AB!SDznNG`%#UPdx*YUKHLee{rSHXrKg6bvsRW<*rY!G?0Xy~ zuK&gU$AY-!cQXHsDCp+5CH_wFrC+RCCDmYt(q1U^5(VFfdMtT(n!SEQ+cy#QU?7Cu^ZyN-Gs zx;I(#aE*s_NEJgpv|E1-`4HpX8j+%2yN-XB2(EF$w+8(Pneh$Z%XXy~-=)5_uCg~I zh-)xA`Z??(4NkVbsW|;q;n_5L;i%_&%3jgwNsCqU!btYQ(La(K8Vy^=_Xm0##1)Q4 z?Y+aVaKEUs$E+1bOnzuUwWy3dv7Inu@-go%4j$j8AH%Om($od*CwVk_XyDu=^!D;L zcBCqbe!l*~Phfcgu9=1(Qe^ij+8RpYYO$<2)QA74#pY;Y+o+jWTs?vK$kdf?(+uBk zgyEBeu`;A+J%y^4&7TyWINR>Cbh_9wuOabjp+`MA-9OoWbmv&|Be#yrySnYEC_9?x z9g#wxO^PPQ&7vEE1hmnIG*h8%-?-;H*H3?bR=!eLYf~{n7+>u*;y$=j(#Yw9#j?IV z@ziFL=-Jjz%k_sP$BWnX4SHe*w8Mk&nf&xxck`VCkX;gHVTFy| zYPK;;niFfx27*>f>`YNVsL0z;<2wB23p#GHe*~Funw)j$*@9VQE3ZobuIA?$bIl3f z4ksozCH562HqMuyKC74Z`}C~@Lb&f8w7+hcoQ&84h{rD(u+n+bqLrO|V-j@{>;rBv}tH^!P5l?~5v1|e+Sm73LZ(UD3fFLyrm<3(IVXmQ0O zwL!9}1@-s+FP#CJ-DBvWaxN@9NQp-Wl;b^kU=+w|uk`hoygX*E8-a;@b;lpplF)8# zt5uF$cNL*W<(o5fATX%QxSO$!X9)Y2i<*3h2y>|{f#xM(q|`_3IM{Abg;od34p_M| z(HO<5KvnH@(u^=yvHs0=j5?naD=L2|`8QiP#^eUV`e~60v{Ycn0Xz$&`H$I|?a+i{ z(fI*g0$LxeMhh?Y{w`WCW*y`KN0Y-9MZ9$wUE;yOhacM+YLjRbQ{aS)QSzT{Qe;w^j+5QX?vZO<@d`nhsZqQ6NP?e+NUAW1@K~+9S@gCYjwmQYr@{mc zAN^~c2Q;5}=wHh_Byw$FNaw5^_RDg5{II-e`EIG+fR5*voADK`j9Gy$Gh%SI(r&cp z&~jF7<@%#NdWxJ;GU{Y)*i8>J4`gCuuJgEETFd%CDaEdosJ8sx=dH!~;&u7y6{hmk zA=|~#eIvKiTK=i%b+rw4)3%D<5)Y)|+92ZW3`s_pNKsUNxcOHMqt?|7%}wqm9vT9v z09XSB-CwyZ5b*SGr6+?jDYN1!v!63(Fj%lUT%XLhjj{9^*i01z2()63^9 zUh*0FRUR6<{IXyHDwE{^!v(Yz7U#;>IFJ2c>~bI$+=?@X?5$aRqyQWUP6m0X0>eVm z(I1w+VISpEhl8W&qP0VMfZ`RTjlIU`_F(DH2-lSYQ?c5N-JKd}VrXodqYYnW_^go{ zfti31oDkGK>O+m)FyS8c!uk^vyx?g_6~{o<5WBFAD4W^-azwiaUiyT&Co^j6>v!Jc zhjxn4!G!h1w=1iJrWsA4JOHMmXM2RI{m57gKRTeZvd-G&mTHkE7fitY~@A=g%?o9Vk?0$+1 z?P{vAu^H5e&v)Q`A}rXtF|twwAA6Ve&xx3PdM{Jb)3IQAEd}eKxVHtq-$q3}GWMoE zerPhh;r+rn#0pt?XutMbI;dFhbB7AbLXD3B0NS*`jI00{%UJJjHNGr$I`+$K?6gK1 z3%9g8@=pOXel2aSr{On6o8xOQo{z!#yE_pMYGx~*$*>XkfsalaX06RoT!qviUh@y0 z?({?I#C`y*`KosVhq;OXPf7o4|78LF{qC}!(03n1SDz&WDl|Hp@sS^FO!$!>RMU|s zdsZ>$+JdcMBjf8E&Ej+LD?*p2P8g1k_X-}(r}d;xe>3-Oto&)89d~`LN`$hQX}Xfd z9q9)4iBXq)#oIJXg4HGlt?tsgP=US8}#mcRczQ|4eV}-gyT=YnhInKGbI` zy`z7vZtv9j@UZpTiD->oU->LOqNxF`=@$;0?}YkUXsA))eLnW}WZzoXf82cXyld1+|3l@~)0%?tpA#o{ znk5ZkT-^2L{?gH7CzB>;3r3V1qTzGI|0Ka(cS0WTxZ! zRvMHAjO+DJyv%rEzQV&!Y9@^^6{u|NLFGKg`}v?0EiV3+)eR#*OahRX1Arbu6j5BB5^$?|d{&ez&d0VGDQX2+|K`5=al=86xI5Ulyaj}Ta2 z@3*Ej?=FF6Vqlbd{9@j|JlnBKP;pYo{qgiZzbis_B>Qaq3Fpo78AIFvS&S^yp^?;V=hM;ytfrX4M11MfS2zRQ1cQ(rc~sh0=ZDxn zu@Le%0d)Rd?35^A+c790A~I-D`&;yRh#QG^x2%27xlKD8CHX(M$20?B@+mlkLadwt zhPJs|*c4YRHU%3NXCVR4{odlvL3G~hHOy<4hI~P-JpATs?w`@eJd#|(6rlVyD{PB= zZv$kiQEf77>LP-Mr;Zv3g8e8eydw$dg8}1UG7;w$ffJx&jn~ig8eNsq&2GL z+^aU?9V=T)pX~;1m5HK`7|17lJ}YHABJs?-12&&RiCSrq(%g7S$GxL9USEX&#{J_- z)X|urp~!)~Bl+iyHj1s)&D0gyQ*S2)6EFdL=jK+3S})Uv+g%MI5g9rNa;3@L)1+ln z(bQR_50U4&%R$vl9Mo-wF_oK`S)tCLMRi{cc<*WoB0nV8X8aRnn`)iP2%VQW+G%KB z3$fN;-8Art5XEAZ{I}K7+o(Q}f z(qXd3>BkS$H%HeKuict8zB{*GDp)$j9E>h+Y++7)zQIHZr-=e8nOp(Qa{qLV7*9`U z(X(<;4KJzO5s}w>M@inaSaG=}P%7ok$-?uiY_#MwnbvS&2ZgZVe~O>ih@Z~9K7N5L zAZjYp0c;jQ16PM}0*^HDffXjmEl0m)?wBhdjQx)|*H8R{2Jq!3M@ybhM_6x=2ZuVw zr5R+w)|?Ezw$kjMyM?@i_g*Il43K|D*!CR=oXGVzF5Tp1S3qTuwk0n&nnpo4qhy{s zg1p_~TSvykDwXm(Hra*HIk}ONtpFHwvw}^K=MXsHCy4zZ4 z+~6JN7bf+=iauZC+hMO-v$OcuI)_ruaAxqk@X65JNOSLiNd+oE595%{tw1n|z_8Md zVO$=Q}-`owb;>_O%--F=$6{tdu(bR{*pa<^aDH}J@{`; zeDP9}rLAk~5xW4254jDQ4G%hXQLNReZWzWMX}MV*h8=F#a36cM0_WMy+#dZ)eD{C* z50jttTzGqrWWb{J7Q^E=0PczIq--10O4`fA2IMvL;8~?WC15@ zMC6Y&c#P*Uk!Kfac+K_z!B!;_mnYr+#U>d>ff(p*+n74}(CGp07+*}_BT{UmJ4u}V zBW$ugx^sz7dH^nVnnB3PxdH=N@iB4yZ=9o@lYJvw*A1nMMm7DOT zefGhM$Cr&HOHDQ0boJzqmU2owct7+BydN)Do3?*?{1%$5y`ZHq53KdnpPyt)sn)X< zM^+%?*KnLGk24aNN}Ir)E{1dQOBX>E#DG|+m%k2`*humY1LsR@y0NS`V%B=_Y93#N zZka1IGqEs-yxoe3LH?OQ_DyH429KNQ!0z-R`dC+_0pmRGwlGDpUA1H^o?Xb?cuhH& zjR+SXnc1bQQ~MR@Wo+rJmn-ravjx*XmSfbQGqO*>S{Q;uFr2E~8>#THt_3wRq6uZY zvst0)Fyp4?q-)(`%H5n!ru-!}(9DG#eCof}{sO~@i~nttW1H1M1*i%h+R@{ZvvA4{ zuq|1(N@5|ztyx{u^m=SOtY{AsI1m^l@ZgAqY0W)3X>s+2b^Z!RCJ3B;zfq)s$hQo~ z)agI5SvNaD>;OmIc`a}b=N>3E$D z^F4^7@rEx#XL7(obU!dpAv=Owemu8XtR$@3~S-7MK)T(ez>ZvmK= zcPt-6d>+>ZF2l!XxxncDiFnG(f8x3`)Jfe1xmSUh zJnfx#F$0f`UK)=%=mo+RPH?93wEaF@g#Nm}Wq(NmpX7^YSt{97-P`1te z!^$oXF;Gzz2yq?Y7M@aLuUiP4?AH1w0B1$WVu1yb)PwsrV#&jbss&JdxpqTWqmO^a z9oT?jkDb(OrTO%6*@|06aj=!QzugZLZvaELTXMT3Y*Nc8Y!aB(efJ=aj>^KWr87a; z<6y+>RXpv}T$Is((vX(^){#NwG5-05=M|yUSV6n_Lx)!LRJ_4l84G}~zKE~C{19Xx zQSKi0qU03tA(z__?W>=b(#0cK8`fbv%;9H=D+C-?d{CUg_jKy3Y8?BEkS-TOM8-+4 z8p=-OZ)qShkU0SCjir~`R#~J#qcUx2wlP3#pw+i(|8RG|(Lm=&kb+=p&SDi#y2PRM zh3Zk&xQfE>*0qFGytBYchjtB6X@`_PtFQ@Rl`cKLI$%M35y$y}k?@!=E_JE@%oRGT z9CHcKTMJ>^FX!5xbxH`3h!kDX349%sz$m*4NzU7j2}8mN_8{Ii(JQ?5h4gCrc*Fr= zT7x2IHb*cA7|A6<7eGKm2-paJ(~NZz$St5M+*&H z0ZdGql@}F(56Fnp(#o-}#r)8bChc<3+I%iauxZ+t32i-l2dq{J!k7XI!_KF6#@b|w zF_;=IUA3-taDm4!y#u04O`&*N6<`uW1IqxA>ezbxn6s%Z2gsEOn6$TecpuY<=kJ7H zUr9AsAev+$3V?7h_r~iHz)lQSQmG(TTzENmID58W;GO3Uv@y zdDqN+`{#-KwC4-e%!~#fkFn3Q>lBlT-)$p2x6u(C8|z>A9-L_OutGFFfHQUsm5xsB zkGW7)U5DDKxlj#QQ{?;OP-wNjVwN0O$`VB3 zm4~dW^j>Z52ufO+m=Mbv0nSI(vWfQk1lmlh8igf`0@Ys_9uBk6$6i~nW8Ly`J-Jo* zG_td%SahlSINhbuvT=Vi($ZCJ$oI?ZgQ#tWEj}N|SErIy8b;qgzefyiaRWz_89-(p zxXiYCZh_B?`{hGAY3GBWPEq}Igh;Blg3!@%9GGh~<}GGdma$TH+Fm-EoY=3M{_%U+ z1S;@A{N=@m=uRau7p7zl7>9U-X`boqST^(0w1yCRD`>%IQ9ExyjM(3Qrq*7fe^x|i zaO7w*&GJ*Xt!fWILpHE;io5YNOTej6H8uMdd?$%!w}4~j2>0Xhj=&jxQz?&I8jkyW zfR=)oj}f(M^knwhiiNc(MC^|SW~mQu>p;?ro`P(v@T3;EH+WjD;$53Fiazp+d0HfcOwtAXRP&xs^nfg4Bd;oumP;t}NT{`mH&$@KAL zF?KV&fC8Muu62Wo94@l$ajKC5d1c1H_o4Vk2Z~d~C3?toU98Y;{ zXtUbm!4;U4uyy(6wajrb{_)Dp_9`tA5|Cyv7Og&ZS!O0p3@(giAc8Xu^3fB|$+4Id zOE?V45z;6ZNCMbYpyfroT9q^H+_xQEwT>75h9unWogtb`Nz8g5%9V&_ z7=jI;`L}h`(az6_z8~DH!(B8t#9mKro%tE@CELiKHB`Ze8^|Mhn@EEyK4FRRpXzxi z^;s;#0OK6Ts@=b7|fCCdT@}Pgh*$)gRJ`(;*e15 z06-)KQ|5*=#9>0T8jdF}9^`FG0PW|)a4{;z7CAN0L|2vBT8$y1!C^Sd#W$#Ywl09O zsu2dmk0zR1s@SeF%p;o0^kd|Ed17T@44a7%LoAC{FWF5x$q{y`*Rdi>0uI*U0mECG zv6%)<_ppLF+ttTmQLlbi9YyBw1$;IXR>qo#^NbZ^ z`|};`w6kOvx;;>#zcPBYtt%P>^EeW%nBL*+@S!tv$okRS4r%!} zDUGL6$f-VTxd9>LC?13xWv<}exkG8qI=^2jZz>-1!GMmj>K-(%aw(@3_s%N3!O@Wo z0wi=qg|077K4rxzz1xcaXFH6w14um~x~64~8~w;auK0!0AFbZNP3L%W6d>PSH=^>c zgEDC91ET!ugoM|`-RLvEutkJWurWtNR0x+3`9#r2UcwSxFVPFA_a&G{MSe4?0XlF?)(nXLk;6)LLIM@;b}kH>wDDV12kn7+(HK$a81R|YTd{u1 zX?lHcgQ%1|cc^%#-}U5Zir@*xA^@-2KYyq6C0jd!(=XjTX6NMa=olXkk0m)vS*3*2 zqWJWycg+)ej8?a?y;P1)Te77cNXO91kt5YsH)ifjW0W3JA%Tw*g$fH++kw0blh-A2 zp#m!<{TpQiPWJS(Heb&*_AFEDe31d8B=9D0%@5dt;R0 z{06KKMsGdp)6A7@Q( zY(t`dAFz4Qw&(t{^DqvHQl5czB=r#QO3qAB)HT5bb7>y@Q^mM90xl~1$vi!~x3s%g zDp+eHX%9bz2KW=E@s^mD+;x5Bi^$>|4sX8Gq!>i%`t7Z>YZ%9^qm_Gwp7q8`plyff zp_3$!7a@|uaNmtz_p6Va|C#qZAZlrngY_u)37d@LPij*IqF?obrOEt(WJ{1vJi;M# zymFsRAr?_yhmI(5+L~~La@Z@ut0?Fgv+=Q4v49edM=h7=6i3Vmw;MpfLj5-Oj_Lk> z$!|1bu*SC)Yj10bzGc+CAL!Nkj-RE&nd1!BCGcZSyYdZ=FRtU_O*?8KZ^kHG&xF>O^Fk zg#r2iTyU%w`SwjuUaG8sMp~A$^mkuGOyYwVzcUu^-Z}>hxsN14v64=%FnvZEfO8bz zSG#%+&H3r*pfbk2CoenaMtU$?6LsF6Kqe4y=t0zq>Gd0Ri6H&r#Y6lVwn)#VGW9j` zvHg`vVS+j-$eU=OYs(s$_(-CLNovgO&boe= z-E!xzR0vlo&cmp)t4jo}-G;neJ)B<%{6_HAAg9?29UfCfjt8m?mNnD-4?WH<1Zb*Z zae-1x!r@uM4AHI34cQJO1x2YTfa`7VxpQR+zr*Bpm7sJC2d)}dd!-GXDwX^lFxKn@ zBQCUn9d?6^Y(u9cJ;$)NBSk&Hu)*soD&iPEdp^G;mAA%hKDR!{%oF@t2Q{z!!kVLm)&U@K-`IH$9xW#4ch;k} z5fp`=X;q%hxtlFYjdl=865(FU*PTZ9?zQ{JD@`JM)|MXVx-+C`FnR-F08LB+UiyuY zWJGs1OSH77mt+6S1No&I*`?x21Gd54_3o8^T-r71m-erAz})Kx9m5A0kVlC>{baQokfgZf99zigTK{q*Z*^({Ffi2w@= z?KW&PaU7P79CZNK8MONs%6L!XLy>#aK?xC4HP9P!D#dG>H&Bj%oB08Y?jF7@gJvP_ zxH-B#dWrwx$FUOb!5kGbti$)!Ewka%L$pSLdW0@*MkM0l@jpQW-XbDT93H>mfkd&_ z#}0lwGWG15Tk_OuHRY6`O0lF8h7@(SXxBhvL(OamD@d z_H9(n&t%u^OZadDNnBa}tg)o7o$X})@^QK0y$w_s;4GzgAW?mr%fCf`rK7<^x@7v1 zxuZagSKc7ok^!VhP}97jrA8Htv5_TbxBIhxPM8i6Z2nCGk`W~e68EN~d6LUHc)$|z zAxS@h3a%cr776_Z11>SydOp;ag9$VbAtp<=3zqDI)rKQO3LQo7>T&>Qqd*FBCBncY zuu@sdVL69HVK!ZhI>wzOmv-lsk6m7%PT;3k+&PiP2K4~}LX76#N<3hn$-ngLI%B zNNxfV-4GWX6Klz{prw+|X^#U4N6iH1h!T}UCi*`h=6{76!j(w?fRY6W zB>`j=X=K2_62Srt#aWvEf0XA}3IBfa_sY^;0^*-T{uRyUzZeFzCGb4SXFTV096$|f zfO~o#e)p8-^pKFZ55h>fZCwY-fB(o7NeK=TDCSm(0-?ka-VksU^voch;J5tyC%~f` z3M6&EErk&rNT6Hb&zZ{k?xq-2RTaJ%H@9eS(LZOofQ-ZU!k^SvR8Hms{Ns%To#(?$ zPj-N2vA~?pCwq+ypfb+^IEInX`T0TDxp_Fx;wLz8=0-q1<$rv2KH&?^e|>ZrXgm42 znfZ&C0Dtr^VSg{=E z+w*(fU_aO@dDyEM{WPwg_ncHA%v&cE@ry5+fR4{4H!`0Sh==hkPMg-B?#pe@!*;88 zM@-@ehtyP15Dy>=Ox6L%3W#3@7ir+{Sbr^5xv;L1bt+3$?m?mFVzx%M;P|YI(_DAM zxpY+DA~*>k{CqxRZHSI+Yu_y}+JH?@uMyQSUuf9|e7?y*_WBW!kjRQe*o&y-8j26Gox zTlKzVU^pC)?I9c2XkAXs)gms+@ZJLq6p`YZKm@-q1BxHm>zBnN2h{ zetTpN;~%b3QQAHP2p4*nhOvc4o`ObBHsR@?m4cG%S+@$+d-5Dw2sDu+X{d@NPSZ`+ z@Fv`{;rC+w^IaG{`BasiWK0WDgziKV>C=Ig=Vom|<7|(kc_5B82C<=cUu|v~A3OPQ zep7wXy1duW;zP@*lcOhkH8bDsz^gn>5 zI@}xEYrfm>EV-RCbR_^*amW1XH(6O~5-lD`gtV8&p;0C8Pyx7L^s^WshO3AZG+86tQCW;g;;vfxvfWdoj)Vz#L z;w-HBQ>r5*t4tK6%SOH2+ng9?0BxAuYTG^@F?D{^hYL4=@-$PC*BRJC6(;h#+3cYT zEa*17)URZU_IxKlR07+s!FU0C^%RTdc^k-jqg_^seDFii(Y*mJC3PMLGjP$__0{V` z24woo=|+^`@I?doT{bdYq*AQNVucV-!0;iFFar$_M8SkF_SZ~}@v+&zY)K)mm>jyg z+E}uKE?)yI^|*8>8w!Dup(h9DVQ68k0b^0JSo79pqgqsDoozM1?6?%>qZi0)E$3{MHqV z*u^#_Ih`}Yg28J#2?SsRYze1B;Cnl6tgh6MZ)-qu;*xnwCN0-PS05T}a5&lhx#3^8 zjhkWVY4q$ZKqi*TjJ+*X5g;qL&Tv&RfH;VRS630elwTHlvF@?UsGuqXew z{|JByeD)v6_Fs+sefPJ?|Nh^3lYhHQyYpb~SMXYc+GW)qq| literal 0 HcmV?d00001 diff --git a/test/vendor/knative.dev/serving/docs/spec/motivation.md b/test/vendor/knative.dev/serving/docs/spec/motivation.md new file mode 100644 index 0000000000..d1ba87050e --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/spec/motivation.md @@ -0,0 +1,19 @@ +# Motivation + +The goal of the Knative Serving project is to provide a common toolkit and API +framework for serverless workloads. + +We define serverless workloads as computing workloads that are: + +- Stateless +- Amenable to the process scale-out model +- Primarily driven by application level (L7 -- HTTP, for example) request + traffic + +While Kubernetes provides basic primitives like Deployment, and Service in +support of this model, our experience suggests that a more compact and richer +opinionated model has substantial benefit for developers. In particular, by +standardizing on higher-level primitives which perform substantial amounts of +automation of common infrastructure, it should be possible to build consistent +toolkits that provide a richer experience than updating yaml files with +`kubectl`. diff --git a/test/vendor/knative.dev/serving/docs/spec/normative_examples.md b/test/vendor/knative.dev/serving/docs/spec/normative_examples.md new file mode 100644 index 0000000000..b4f11c05d7 --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/spec/normative_examples.md @@ -0,0 +1,4 @@ +# Sample API Usage + +Serving client documentation can now be found in the +[Knative Client Repository](https://github.com/knative/client). diff --git a/test/vendor/knative.dev/serving/docs/spec/overview.md b/test/vendor/knative.dev/serving/docs/spec/overview.md new file mode 100644 index 0000000000..ebdf6e17c8 --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/spec/overview.md @@ -0,0 +1,89 @@ +# Resource Types + +The primary resources in the Knative Serving API are Routes, Revisions, +Configurations, and Services: + +- A **Route** provides a named endpoint and a mechanism for routing traffic to + +- **Revisions**, which are immutable snapshots of code + config, created by a + +- **Configuration**, which acts as a stream of environments for Revisions. + +- **Service** acts as a top-level container for managing a Route and + Configuration which implement a network service. + +![Object model](images/object_model.png) + +## Route + +**Route** provides a network endpoint for a user's service (which consists of a +series of software and configuration Revisions over time). A kubernetes +namespace can have multiple routes. The route provides a long-lived, stable, +named, HTTP-addressable endpoint that is backed by one or more **Revisions**. +The default configuration is for the route to automatically route traffic to the +latest revision created by a **Configuration**. For more complex scenarios, the +API supports splitting traffic on a percentage basis, and CI tools could +maintain multiple configurations for a single route (e.g. "golden path" and +“experiments”) or reference multiple revisions directly to pin revisions during +an incremental rollout and n-way traffic split. The route can optionally assign +addressable subdomains to any or all backing revisions. + +## Revision + +**Revision** is an immutable snapshot of code and configuration. A revision +references a container image. Revisions are created by updates to a +**Configuration**. + +Revisions that are not addressable via a Route may be garbage collected and all +underlying K8s resources will be deleted. Revisions that are addressable via a +Route will have resource utilization proportional to the load they are under. + +## Configuration + +A **Configuration** describes the desired latest Revision state, and creates and +tracks the status of Revisions as the desired state is updated. A configuration +will reference a container image and associated execution metadata needed by the +Revision. On updates to a Configuration's spec, a new Revision will be created; +the Configuration's controller will track the status of created Revisions and +makes the most recently created and most recently _ready_ Revisions available in +the status section. + +## Service + +A **Service** encapsulates a **Route** and **Configuration** which together +provide a software component. Service exists to provide a singular abstraction +which can be access controlled, reasoned about, and which encapsulates software +lifecycle decisions such as rollout policy and team resource ownership. Service +acts only as an orchestrator of the underlying Route and Configuration (much as +a kubernetes Deployment orchestrates ReplicaSets). Its usage is optional but +recommended. + +The Service's controller will track the statuses of its owned Configuration and +Route, reflecting their statuses and conditions as its own. + +The owned Configuration's Ready conditions are surfaced as the Service's +ConfigurationsReady condition. The owned Routes' Ready conditions are surfaced +as the Service's RoutesReady condition. + +## Orchestration + +Revisions are created indirectly when a Configuration is created or updated. +This provides: + +- a single referenceable resource for the route to perform automated rollouts +- a single resource that can be watched to see a history of all the revisions + created +- PATCH semantics for revisions implemented server-side, minimizing + read-modify-write implemented across multiple clients, which could result in + optimistic concurrency errors +- the ability to rollback to a known good configuration + +Update operations on the service enable scenarios such as: + +- _"Push image, keep config":_ Specifying a new revision with updated image, + inheriting configuration such as env vars from the configuration. +- _"Update config, keep image"_: Specifying a new revision as just a change to + configuration, such as updating an env variable, inheriting all other + configuration and image. +- _"Execute a controlled rollout"_: Updating the service's traffic spec allows + testing of revisions before making them live, and controlled rollouts. diff --git a/test/vendor/knative.dev/serving/docs/spec/spec.md b/test/vendor/knative.dev/serving/docs/spec/spec.md new file mode 100644 index 0000000000..e8aa3f0465 --- /dev/null +++ b/test/vendor/knative.dev/serving/docs/spec/spec.md @@ -0,0 +1,4 @@ +# Knative Serving API spec + +The API Specification has been moved to the +[Knative Docs Repository](https://github.com/knative/docs/tree/master/docs/serving/spec). diff --git a/test/vendor/knative.dev/serving/hack/OWNERS b/test/vendor/knative.dev/serving/hack/OWNERS new file mode 100644 index 0000000000..c50adc8493 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/test/vendor/knative.dev/serving/hack/README.md b/test/vendor/knative.dev/serving/hack/README.md new file mode 100644 index 0000000000..6138085975 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/README.md @@ -0,0 +1,13 @@ +# Assorted scripts for development + +This directory contains several scripts useful in the development process of +Knative Serving. + +- `boilerplate/add-boilerplate.sh` Adds license boilerplate to _txt_ or _go_ + files in a directory, recursively. +- `generate-yamls.sh` Builds all the YAMLs that Knative Serving publishes. +- `release.sh` Creates a new release of Knative Serving. +- `update-codegen.sh` Updates auto-generated client libraries. +- `update-deps.sh` Updates Go dependencies. +- `verify-codegen.sh` Verifies that auto-generated client libraries are + up-to-date. diff --git a/test/vendor/knative.dev/serving/hack/boilerplate/add-boilerplate.sh b/test/vendor/knative.dev/serving/hack/boilerplate/add-boilerplate.sh new file mode 100755 index 0000000000..6ba526ac04 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/boilerplate/add-boilerplate.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +USAGE=$(cat <.txt to all . files missing it in a directory. + +Usage: (from repository root) + ./hack/boilerplate/add-boilerplate.sh

+ +Example: (from repository root) + ./hack/boilerplate/add-boilerplate.sh go cmd +EOF +) + +set -e + +if [[ -z $1 || -z $2 ]]; then + echo "${USAGE}" + exit 1 +fi + +grep -r -L -P "Copyright \d+ The Knative Authors" $2 \ + | grep -P "\.$1\$" \ + | xargs -I {} sh -c \ + "cat hack/boilerplate/boilerplate.$1.txt {} > /tmp/boilerplate && mv /tmp/boilerplate {}" diff --git a/test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.go.txt b/test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.go.txt new file mode 100644 index 0000000000..6f818683bd --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.sh.txt b/test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.sh.txt new file mode 100755 index 0000000000..c7c207f223 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/boilerplate/boilerplate.sh.txt @@ -0,0 +1,15 @@ +#!/bin/bash + +# Copyright 2020 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/test/vendor/knative.dev/serving/hack/generate-yamls.sh b/test/vendor/knative.dev/serving/hack/generate-yamls.sh new file mode 100755 index 0000000000..ba734cabee --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/generate-yamls.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script builds all the YAMLs that Knative serving publishes. It may be +# varied between different branches, of what it does, but the following usage +# must be observed: +# +# generate-yamls.sh +# repo-root-dir the root directory of the repository. +# generated-yaml-list an output file that will contain the list of all +# YAML files. The first file listed must be our +# manifest that contains all images to be tagged. + +# Different versions of our scripts should be able to call this script with +# such assumption so that the test/publishing/tagging steps can evolve +# differently than how the YAMLs are built. + +# The following environment variables affect the behavior of this script: +# * `$KO_FLAGS` Any extra flags that will be passed to ko. +# * `$YAML_OUTPUT_DIR` Where to put the generated YAML files, otherwise a +# random temporary directory will be created. **All existing YAML files in +# this directory will be deleted.** +# * `$KO_DOCKER_REPO` If not set, use ko.local as the registry. + +set -o errexit +set -o pipefail + +readonly YAML_REPO_ROOT=${1:?"First argument must be the repo root dir"} +readonly YAML_LIST_FILE=${2:?"Second argument must be the output file"} + +# Set output directory +if [[ -z "${YAML_OUTPUT_DIR:-}" ]]; then + readonly YAML_OUTPUT_DIR="$(mktemp -d)" +fi +rm -fr ${YAML_OUTPUT_DIR}/*.yaml + +# Generated Knative component YAML files +readonly SERVING_YAML=${YAML_OUTPUT_DIR}/serving.yaml +readonly SERVING_CORE_YAML=${YAML_OUTPUT_DIR}/serving-core.yaml +readonly SERVING_HPA_YAML=${YAML_OUTPUT_DIR}/serving-hpa.yaml +readonly SERVING_CRD_YAML=${YAML_OUTPUT_DIR}/serving-crds.yaml +readonly SERVING_CERT_MANAGER_YAML=${YAML_OUTPUT_DIR}/serving-cert-manager.yaml +readonly SERVING_ISTIO_YAML=${YAML_OUTPUT_DIR}/serving-istio.yaml +readonly SERVING_NSCERT_YAML=${YAML_OUTPUT_DIR}/serving-nscert.yaml + +readonly MONITORING_YAML=${YAML_OUTPUT_DIR}/monitoring.yaml +readonly MONITORING_METRIC_PROMETHEUS_YAML=${YAML_OUTPUT_DIR}/monitoring-metrics-prometheus.yaml +readonly MONITORING_TRACE_ZIPKIN_YAML=${YAML_OUTPUT_DIR}/monitoring-tracing-zipkin.yaml +readonly MONITORING_TRACE_ZIPKIN_IN_MEM_YAML=${YAML_OUTPUT_DIR}/monitoring-tracing-zipkin-in-mem.yaml +readonly MONITORING_TRACE_JAEGER_YAML=${YAML_OUTPUT_DIR}/monitoring-tracing-jaeger.yaml +readonly MONITORING_TRACE_JAEGER_IN_MEM_YAML=${YAML_OUTPUT_DIR}/monitoring-tracing-jaeger-in-mem.yaml +readonly MONITORING_LOG_ELASTICSEARCH_YAML=${YAML_OUTPUT_DIR}/monitoring-logs-elasticsearch.yaml + +# Flags for all ko commands +KO_YAML_FLAGS="-P" +[[ "${KO_DOCKER_REPO}" != gcr.io/* ]] && KO_YAML_FLAGS="" +readonly KO_YAML_FLAGS="${KO_YAML_FLAGS} ${KO_FLAGS}" + +if [[ -n "${TAG}" ]]; then + LABEL_YAML_CMD=(sed -e "s|serving.knative.dev/release: devel|serving.knative.dev/release: \"${TAG}\"|") +else + LABEL_YAML_CMD=(cat) +fi + +: ${KO_DOCKER_REPO:="ko.local"} +export KO_DOCKER_REPO + +cd "${YAML_REPO_ROOT}" + +echo "Building Knative Serving" +ko resolve ${KO_YAML_FLAGS} -R -f config/300-imagecache.yaml -f config/core/ | "${LABEL_YAML_CMD[@]}" > "${SERVING_CORE_YAML}" + +# These don't have images, but ko will concatenate them for us. +ko resolve ${KO_YAML_FLAGS} -f config/core/resources/ -f config/300-imagecache.yaml | "${LABEL_YAML_CMD[@]}" > "${SERVING_CRD_YAML}" + +# Create hpa-class autoscaling related yaml +ko resolve ${KO_YAML_FLAGS} -f config/hpa-autoscaling/ | "${LABEL_YAML_CMD[@]}" > "${SERVING_HPA_YAML}" + +# Create cert-manager related yaml +ko resolve ${KO_YAML_FLAGS} -f config/cert-manager/ | "${LABEL_YAML_CMD[@]}" > "${SERVING_CERT_MANAGER_YAML}" + +# Create Istio related yaml +ko resolve ${KO_YAML_FLAGS} -f config/istio-ingress/ | "${LABEL_YAML_CMD[@]}" > "${SERVING_ISTIO_YAML}" + +# Create nscert related yaml +ko resolve ${KO_YAML_FLAGS} -f config/namespace-wildcard-certs | "${LABEL_YAML_CMD[@]}" > "${SERVING_NSCERT_YAML}" + +# Create serving.yaml with all of the default components +cat "${SERVING_CORE_YAML}" > "${SERVING_YAML}" +cat "${SERVING_HPA_YAML}" >> "${SERVING_YAML}" +cat "${SERVING_ISTIO_YAML}" >> "${SERVING_YAML}" + +echo "Building Monitoring & Logging" +# Use ko to concatenate them all together. +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/100-namespace.yaml \ + -f third_party/config/monitoring/logging/elasticsearch \ + -f config/monitoring/logging/elasticsearch \ + -f third_party/config/monitoring/metrics/prometheus \ + -f config/monitoring/metrics/prometheus \ + -f config/monitoring/tracing/zipkin | "${LABEL_YAML_CMD[@]}" > "${MONITORING_YAML}" + +# Metrics via Prometheus & Grafana +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/100-namespace.yaml \ + -f third_party/config/monitoring/metrics/prometheus \ + -f config/monitoring/metrics/prometheus | "${LABEL_YAML_CMD[@]}" > "${MONITORING_METRIC_PROMETHEUS_YAML}" + +# Logs via ElasticSearch, Fluentd & Kibana +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/100-namespace.yaml \ + -f third_party/config/monitoring/logging/elasticsearch \ + -f config/monitoring/logging/elasticsearch | "${LABEL_YAML_CMD[@]}" > "${MONITORING_LOG_ELASTICSEARCH_YAML}" + +# Traces via Zipkin when ElasticSearch is installed +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/tracing/zipkin | "${LABEL_YAML_CMD[@]}" > "${MONITORING_TRACE_ZIPKIN_YAML}" + +# Traces via Zipkin in Memory when ElasticSearch is not installed +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/tracing/zipkin-in-mem | "${LABEL_YAML_CMD[@]}" > "${MONITORING_TRACE_ZIPKIN_IN_MEM_YAML}" + +# Traces via Jaeger when ElasticSearch is installed +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/tracing/jaeger/elasticsearch -f config/monitoring/tracing/jaeger/105-zipkin-service.yaml | "${LABEL_YAML_CMD[@]}" > "${MONITORING_TRACE_JAEGER_YAML}" + +# Traces via Jaeger in Memory when ElasticSearch is not installed +ko resolve ${KO_YAML_FLAGS} -R -f config/monitoring/tracing/jaeger/memory -f config/monitoring/tracing/jaeger/105-zipkin-service.yaml | "${LABEL_YAML_CMD[@]}" > "${MONITORING_TRACE_JAEGER_IN_MEM_YAML}" + +echo "All manifests generated" + +# List generated YAML files, with serving.yaml first. + +cat << EOF > ${YAML_LIST_FILE} +${SERVING_YAML} +${SERVING_CORE_YAML} +${SERVING_HPA_YAML} +${SERVING_CRD_YAML} +${SERVING_CERT_MANAGER_YAML} +${SERVING_ISTIO_YAML} +${SERVING_NSCERT_YAML} +${MONITORING_YAML} +${MONITORING_METRIC_PROMETHEUS_YAML} +${MONITORING_TRACE_ZIPKIN_YAML} +${MONITORING_TRACE_ZIPKIN_IN_MEM_YAML} +${MONITORING_TRACE_JAEGER_YAML} +${MONITORING_TRACE_JAEGER_IN_MEM_YAML} +${MONITORING_LOG_ELASTICSEARCH_YAML} +EOF diff --git a/test/vendor/knative.dev/serving/hack/release.sh b/test/vendor/knative.dev/serving/hack/release.sh new file mode 100755 index 0000000000..38413c1026 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/release.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Documentation about this script and how to use it can be found +# at https://github.com/knative/test-infra/tree/master/ci + +source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/release.sh + +function build_release() { + # Run `generate-yamls.sh`, which should be versioned with the + # branch since the detail of building may change over time. + local YAML_LIST="$(mktemp)" + export TAG + $(dirname $0)/generate-yamls.sh "${REPO_ROOT_DIR}" "${YAML_LIST}" + ARTIFACTS_TO_PUBLISH=$(cat "${YAML_LIST}" | tr '\n' ' ') + if (( ! PUBLISH_RELEASE )); then + # Copy the generated YAML files to the repo root dir if not publishing. + cp ${ARTIFACTS_TO_PUBLISH} ${REPO_ROOT_DIR} + fi +} + +main $@ diff --git a/test/vendor/knative.dev/serving/hack/update-codegen.sh b/test/vendor/knative.dev/serving/hack/update-codegen.sh new file mode 100755 index 0000000000..50842db347 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/update-codegen.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +if [ -z "${GOPATH:-}" ]; then + export GOPATH=$(go env GOPATH) +fi + +source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh + +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 $(dirname $0)/../vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} + +KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 $(dirname $0)/../vendor/knative.dev/pkg 2>/dev/null || echo ../pkg)} + +# generate the code with: +# --output-base because this script should also be able to run inside the vendor dir of +# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir +# instead of the $GOPATH directly. For normal projects this can be dropped. +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + knative.dev/serving/pkg/client knative.dev/serving/pkg/apis \ + "serving:v1alpha1,v1beta1,v1 autoscaling:v1alpha1 networking:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Knative Injection +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + knative.dev/serving/pkg/client knative.dev/serving/pkg/apis \ + "serving:v1alpha1,v1beta1,v1 autoscaling:v1alpha1 networking:v1alpha1" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Generate our own client for istio (otherwise injection won't work) +${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ + knative.dev/serving/pkg/client/istio istio.io/client-go/pkg/apis \ + "networking:v1alpha3" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Knative Injection (for istio) +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + knative.dev/serving/pkg/client/istio istio.io/client-go/pkg/apis \ + "networking:v1alpha3" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Generate our own client for cert-manager (otherwise injection won't work) +${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ + knative.dev/serving/pkg/client/certmanager github.com/jetstack/cert-manager/pkg/apis \ + "certmanager:v1alpha2 acme:v1alpha2" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Knative Injection (for cert-manager) +${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \ + knative.dev/serving/pkg/client/certmanager github.com/jetstack/cert-manager/pkg/apis \ + "certmanager:v1alpha2 acme:v1alpha2" \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt + +# Depends on generate-groups.sh to install bin/deepcopy-gen +${GOPATH}/bin/deepcopy-gen \ + -O zz_generated.deepcopy \ + --go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt \ + -i knative.dev/serving/pkg/apis/config \ + -i knative.dev/serving/pkg/reconciler/ingress/config \ + -i knative.dev/serving/pkg/reconciler/certificate/config \ + -i knative.dev/serving/pkg/reconciler/gc/config \ + -i knative.dev/serving/pkg/reconciler/revision/config \ + -i knative.dev/serving/pkg/reconciler/route/config \ + -i knative.dev/serving/pkg/activator/config \ + -i knative.dev/serving/pkg/autoscaler \ + -i knative.dev/serving/pkg/deployment \ + -i knative.dev/serving/pkg/gc \ + -i knative.dev/serving/pkg/logging \ + -i knative.dev/serving/pkg/metrics \ + -i knative.dev/serving/pkg/network + +# Make sure our dependencies are up-to-date +${REPO_ROOT_DIR}/hack/update-deps.sh diff --git a/test/vendor/knative.dev/serving/hack/update-deps.sh b/test/vendor/knative.dev/serving/hack/update-deps.sh new file mode 100755 index 0000000000..a8c8a45db2 --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/update-deps.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +readonly ROOT_DIR=$(dirname $0)/.. +source ${ROOT_DIR}/vendor/knative.dev/test-infra/scripts/library.sh + +set -o errexit +set -o nounset +set -o pipefail + +cd ${ROOT_DIR} + +# Ensure we have everything we need under vendor/ +dep ensure $@ + +rm -rf $(find vendor/ -name 'OWNERS') +rm -rf $(find vendor/ -name '*_test.go') + +update_licenses third_party/VENDOR-LICENSE "./cmd/*" + +remove_broken_symlinks ./vendor diff --git a/test/vendor/knative.dev/serving/hack/verify-codegen.sh b/test/vendor/knative.dev/serving/hack/verify-codegen.sh new file mode 100755 index 0000000000..d1e4e69e1d --- /dev/null +++ b/test/vendor/knative.dev/serving/hack/verify-codegen.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh + +readonly TMP_DIFFROOT="$(mktemp -d ${REPO_ROOT_DIR}/tmpdiffroot.XXXXXX)" + +cleanup() { + rm -rf "${TMP_DIFFROOT}" +} + +trap "cleanup" EXIT SIGINT + +cleanup + +# Save working tree state +mkdir -p "${TMP_DIFFROOT}/pkg" +cp -aR "${REPO_ROOT_DIR}/Gopkg.lock" "${REPO_ROOT_DIR}/pkg" "${REPO_ROOT_DIR}/vendor" "${TMP_DIFFROOT}" + +# We symlink a few testdata files from config, so copy it as well. +mkdir -p "${TMP_DIFFROOT}/config" +cp -a "${REPO_ROOT_DIR}/config"/* "${TMP_DIFFROOT}/config" + +# TODO(mattmoor): We should be able to rm -rf pkg/client/ and vendor/ + +"${REPO_ROOT_DIR}/hack/update-codegen.sh" +echo "Diffing ${REPO_ROOT_DIR} against freshly generated codegen" +ret=0 +diff -Nupr --no-dereference "${REPO_ROOT_DIR}/pkg" "${TMP_DIFFROOT}/pkg" || ret=1 +diff -Nupr --no-dereference "${REPO_ROOT_DIR}/vendor" "${TMP_DIFFROOT}/vendor" || ret=1 + +# Restore working tree state +rm -fr "${TMP_DIFFROOT}/config" +rm -fr "${REPO_ROOT_DIR}/Gopkg.lock" "${REPO_ROOT_DIR}/pkg" "${REPO_ROOT_DIR}/vendor" +cp -aR "${TMP_DIFFROOT}"/* "${REPO_ROOT_DIR}" + +if [[ $ret -eq 0 ]] +then + echo "${REPO_ROOT_DIR} up to date." +else + echo "ERROR: ${REPO_ROOT_DIR} is out of date. Please run ./hack/update-codegen.sh" + exit 1 +fi diff --git a/test/vendor/knative.dev/serving/install/CONFIG.md b/test/vendor/knative.dev/serving/install/CONFIG.md new file mode 100644 index 0000000000..16b577650a --- /dev/null +++ b/test/vendor/knative.dev/serving/install/CONFIG.md @@ -0,0 +1,56 @@ +# Configuring Knative Serving + +## Cluster local routes + +Routes assigned the domain `.svc.cluster.local` will not be exposed to an +Ingress with an external IP address. This can be done by specifying a custom +label selector rule in the following section. + +In addition to that, the label `serving.knative.dev/visibility` can be set to +`cluster-local` in order to achieve the same effect. + +## Serving multiple domains + +Different domain suffixes can be configured based on the route labels. In order +to do this, update the config map named `config-domain` in the namespace +`knative-serving`. + +In that config map, each entry maps a domain name to an equality-based label +selector. If your route has labels that meet all requirement of the selector it +will use the corresponding domain as a suffix to its domain name. If there are +multiple selectors matching your route labels, the one that is most specific +(has the most number of requirements) will be chosen. + +For example, if your config map looks like + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-domain + namespace: knative-serving +data: + prod.domain.com: | + selector: + app: prod + v2.staging.domain.com: | + selector: + app: staging + version: v2 + # Default domain, provided without selector. + default.domain.com: | +``` + +then + +- when your route has label `app=prod`, then route domain will have the suffix + `prod.domain.com` +- when your route has labels `app=staging, version=v2`, then route domain will + have the suffix `v2.staging.domain.com` +- otherwise, it falls back to `default.domain.com`. + +We require that at least one domain is provided without any selector as the +default domain suffix option. + +The label `serving.knative.dev/visibility`, if set, will take precedence over +all the custom label selectors specified in the config map. diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/Dockerfile.in b/test/vendor/knative.dev/serving/openshift/ci-operator/Dockerfile.in new file mode 100644 index 0000000000..d82a18bf42 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/Dockerfile.in @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD ${bin} /ko-app/${bin} +ENTRYPOINT ["/ko-app/${bin}"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/build-image/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/build-image/Dockerfile new file mode 100644 index 0000000000..5b4a5da33f --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/build-image/Dockerfile @@ -0,0 +1,11 @@ +# Dockerfile to bootstrap build and test in openshift-ci + +FROM openshift/origin-release:golang-1.13 + +# Add kubernetes repository +ADD openshift/ci-operator/build-image/kubernetes.repo /etc/yum.repos.d/ + +RUN yum install -y kubectl ansible + +# Allow runtime users to add entries to /etc/passwd +RUN chmod g+rw /etc/passwd diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/build-image/kubernetes.repo b/test/vendor/knative.dev/serving/openshift/ci-operator/build-image/kubernetes.repo new file mode 100644 index 0000000000..65eda50b5b --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/build-image/kubernetes.repo @@ -0,0 +1,7 @@ +[kubernetes] +name=Kubernetes +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/generate-ci-config.sh b/test/vendor/knative.dev/serving/openshift/ci-operator/generate-ci-config.sh new file mode 100755 index 0000000000..05273bbf4a --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/generate-ci-config.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +branch=${1-'knative-v0.3'} + +cat < $target_dir/$image_base/Dockerfile + done +} + +generate_dockefiles $@ diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/activator/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/activator/Dockerfile new file mode 100644 index 0000000000..234e546c2f --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/activator/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD activator /ko-app/activator +ENTRYPOINT ["/ko-app/activator"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler-hpa/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler-hpa/Dockerfile new file mode 100644 index 0000000000..034363ac63 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler-hpa/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD autoscaler-hpa /ko-app/autoscaler-hpa +ENTRYPOINT ["/ko-app/autoscaler-hpa"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler/Dockerfile new file mode 100644 index 0000000000..0cf17a622c --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/autoscaler/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD autoscaler /ko-app/autoscaler +ENTRYPOINT ["/ko-app/autoscaler"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/certmanager/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/certmanager/Dockerfile new file mode 100644 index 0000000000..739b9ae7aa --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/certmanager/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD certmanager /ko-app/certmanager +ENTRYPOINT ["/ko-app/certmanager"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/controller/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/controller/Dockerfile new file mode 100644 index 0000000000..3335c42d6a --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/controller/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD controller /ko-app/controller +ENTRYPOINT ["/ko-app/controller"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/istio/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/istio/Dockerfile new file mode 100644 index 0000000000..215ed64808 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/istio/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD istio /ko-app/istio +ENTRYPOINT ["/ko-app/istio"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/nscert/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/nscert/Dockerfile new file mode 100644 index 0000000000..962d2783c8 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/nscert/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD nscert /ko-app/nscert +ENTRYPOINT ["/ko-app/nscert"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/queue/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/queue/Dockerfile new file mode 100644 index 0000000000..35582082b9 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/queue/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD queue /ko-app/queue +ENTRYPOINT ["/ko-app/queue"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/webhook/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/webhook/Dockerfile new file mode 100644 index 0000000000..0671b0bd7c --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-images/webhook/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD webhook /ko-app/webhook +ENTRYPOINT ["/ko-app/webhook"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/autoscale/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/autoscale/Dockerfile new file mode 100644 index 0000000000..a7d5f88d39 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/autoscale/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD autoscale /ko-app/autoscale +ENTRYPOINT ["/ko-app/autoscale"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/failing/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/failing/Dockerfile new file mode 100644 index 0000000000..25363fe923 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/failing/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD failing /ko-app/failing +ENTRYPOINT ["/ko-app/failing"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/flaky/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/flaky/Dockerfile new file mode 100644 index 0000000000..9e5e56d6db --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/flaky/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD flaky /ko-app/flaky +ENTRYPOINT ["/ko-app/flaky"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/grpc-ping/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/grpc-ping/Dockerfile new file mode 100644 index 0000000000..462211eb6e --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/grpc-ping/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD grpc-ping /ko-app/grpc-ping +ENTRYPOINT ["/ko-app/grpc-ping"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/hellovolume/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/hellovolume/Dockerfile new file mode 100644 index 0000000000..2618179d72 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/hellovolume/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD hellovolume /ko-app/hellovolume +ENTRYPOINT ["/ko-app/hellovolume"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/helloworld/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/helloworld/Dockerfile new file mode 100644 index 0000000000..7ee9b90be8 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/helloworld/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD helloworld /ko-app/helloworld +ENTRYPOINT ["/ko-app/helloworld"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/httpproxy/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/httpproxy/Dockerfile new file mode 100644 index 0000000000..7546d53846 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/httpproxy/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD httpproxy /ko-app/httpproxy +ENTRYPOINT ["/ko-app/httpproxy"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/observed-concurrency/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/observed-concurrency/Dockerfile new file mode 100644 index 0000000000..983bf77985 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/observed-concurrency/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD observed-concurrency /ko-app/observed-concurrency +ENTRYPOINT ["/ko-app/observed-concurrency"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv1/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv1/Dockerfile new file mode 100644 index 0000000000..191cd44097 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv1/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD pizzaplanetv1 /ko-app/pizzaplanetv1 +ENTRYPOINT ["/ko-app/pizzaplanetv1"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv2/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv2/Dockerfile new file mode 100644 index 0000000000..5fb0acba88 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/pizzaplanetv2/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD pizzaplanetv2 /ko-app/pizzaplanetv2 +ENTRYPOINT ["/ko-app/pizzaplanetv2"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/runtime/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/runtime/Dockerfile new file mode 100644 index 0000000000..7c1a6f7ba7 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/runtime/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD runtime /ko-app/runtime +ENTRYPOINT ["/ko-app/runtime"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/singlethreaded/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/singlethreaded/Dockerfile new file mode 100644 index 0000000000..23f9052f86 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/singlethreaded/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD singlethreaded /ko-app/singlethreaded +ENTRYPOINT ["/ko-app/singlethreaded"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/timeout/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/timeout/Dockerfile new file mode 100644 index 0000000000..3793303de9 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/timeout/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD timeout /ko-app/timeout +ENTRYPOINT ["/ko-app/timeout"] diff --git a/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/wsserver/Dockerfile b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/wsserver/Dockerfile new file mode 100644 index 0000000000..6a16154edd --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/ci-operator/knative-test-images/wsserver/Dockerfile @@ -0,0 +1,6 @@ +# Do not edit! This file was generated via Makefile +FROM registry.svc.ci.openshift.org/openshift/origin-v3.11:base +USER 65532 + +ADD wsserver /ko-app/wsserver +ENTRYPOINT ["/ko-app/wsserver"] diff --git a/test/vendor/knative.dev/serving/openshift/e2e-tests-openshift.sh b/test/vendor/knative.dev/serving/openshift/e2e-tests-openshift.sh new file mode 100755 index 0000000000..c5be86ccb3 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/e2e-tests-openshift.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC1090 +source "$(dirname "$0")/../test/e2e-common.sh" +source "$(dirname "$0")/release/resolve.sh" + +set -x + +readonly SERVING_NAMESPACE=knative-serving +readonly SERVICEMESH_NAMESPACE=knative-serving-ingress + +# A golang template to point the tests to the right image coordinates. +# {{.Name}} is the name of the image, for example 'autoscale'. +readonly TEST_IMAGE_TEMPLATE="registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-test-{{.Name}}" + +# The OLM global namespace was moved to openshift-marketplace since v4.2 +# ref: https://jira.coreos.com/browse/OLM-1190 +readonly OLM_NAMESPACE="openshift-marketplace" + +env + +function scale_up_workers(){ + local cluster_api_ns="openshift-machine-api" + + oc get machineset -n ${cluster_api_ns} --show-labels + + # Get the name of the first machineset that has at least 1 replica + local machineset + machineset=$(oc get machineset -n ${cluster_api_ns} -o custom-columns="name:{.metadata.name},replicas:{.spec.replicas}" | grep " 1" | head -n 1 | awk '{print $1}') + # Bump the number of replicas to 6 (+ 1 + 1 == 8 workers) + oc patch machineset -n ${cluster_api_ns} "${machineset}" -p '{"spec":{"replicas":6}}' --type=merge + wait_until_machineset_scales_up ${cluster_api_ns} "${machineset}" 6 +} + +# Waits until the machineset in the given namespaces scales up to the +# desired number of replicas +# Parameters: $1 - namespace +# $2 - machineset name +# $3 - desired number of replicas +function wait_until_machineset_scales_up() { + echo -n "Waiting until machineset $2 in namespace $1 scales up to $3 replicas" + for _ in {1..150}; do # timeout after 15 minutes + local available + available=$(oc get machineset -n "$1" "$2" -o jsonpath="{.status.availableReplicas}") + if [[ ${available} -eq $3 ]]; then + echo -e "\nMachineSet $2 in namespace $1 successfully scaled up to $3 replicas" + return 0 + fi + echo -n "." + sleep 6 + done + echo - "Error: timeout waiting for machineset $2 in namespace $1 to scale up to $3 replicas" + return 1 +} + +# Waits until the given hostname resolves via DNS +# Parameters: $1 - hostname +function wait_until_hostname_resolves() { + echo -n "Waiting until hostname $1 resolves via DNS" + for _ in {1..150}; do # timeout after 15 minutes + local output + output=$(host -t a "$1" | grep 'has address') + if [[ -n "${output}" ]]; then + echo -e "\n${output}" + return 0 + fi + echo -n "." + sleep 6 + done + echo -e "\n\nERROR: timeout waiting for hostname $1 to resolve via DNS" + return 1 +} + +# Loops until duration (car) is exceeded or command (cdr) returns non-zero +function timeout() { + SECONDS=0; TIMEOUT=$1; shift + while eval $*; do + sleep 5 + [[ $SECONDS -gt $TIMEOUT ]] && echo "ERROR: Timed out" && return 1 + done + return 0 +} + +function install_knative(){ + header "Installing Knative" + + oc new-project $SERVING_NAMESPACE + + # Install CatalogSource in OLM namespace + envsubst < openshift/olm/knative-serving.catalogsource.yaml | oc apply -n $OLM_NAMESPACE -f - + timeout 900 '[[ $(oc get pods -n $OLM_NAMESPACE | grep -c serverless) -eq 0 ]]' || return 1 + wait_until_pods_running $OLM_NAMESPACE + + # Deploy Serverless Operator + deploy_serverless_operator + + # Wait for the CRD to appear + timeout 900 '[[ $(oc get crd | grep -c knativeservings) -eq 0 ]]' || return 1 + + # Install Knative Serving + cat <<-EOF | oc apply -f - +apiVersion: serving.knative.dev/v1alpha1 +kind: KnativeServing +metadata: + name: knative-serving + namespace: ${SERVING_NAMESPACE} +EOF + + # Wait for 4 pods to appear first + timeout 900 '[[ $(oc get pods -n $SERVING_NAMESPACE --no-headers | wc -l) -lt 4 ]]' || return 1 + wait_until_pods_running $SERVING_NAMESPACE || return 1 + + wait_until_service_has_external_ip $SERVICEMESH_NAMESPACE istio-ingressgateway || fail_test "Ingress has no external IP" + wait_until_hostname_resolves "$(kubectl get svc -n $SERVICEMESH_NAMESPACE istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')" + + header "Knative Installed successfully" +} + +function deploy_serverless_operator(){ + local name="serverless-operator" + local operator_ns + operator_ns=$(kubectl get og --all-namespaces | grep global-operators | awk '{print $1}') + + cat <<-EOF | oc apply -f - +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ${name}-subscription + namespace: ${operator_ns} +spec: + source: ${name} + sourceNamespace: $OLM_NAMESPACE + name: ${name} + channel: techpreview +EOF +} + +function run_e2e_tests(){ + echo ">> Creating test resources for OpenShift (test/config/)" + # Removing unneeded test resources. + rm test/config/100-istio-default-domain.yaml + oc apply -f test/config + + oc adm policy add-scc-to-user privileged -z default -n serving-tests + oc adm policy add-scc-to-user privileged -z default -n serving-tests-alt + # adding scc for anyuid to test TestShouldRunAsUserContainerDefault. + oc adm policy add-scc-to-user anyuid -z default -n serving-tests + + header "Running tests" + failed=0 + + # Needed because tests assume that istio is found in "istio-system" + export GATEWAY_NAMESPACE_OVERRIDE="$SERVICEMESH_NAMESPACE" + + report_go_test \ + -v -tags=e2e -count=1 -timeout=35m -short -parallel=3 \ + ./test/e2e \ + --kubeconfig "$KUBECONFIG" \ + --imagetemplate "$TEST_IMAGE_TEMPLATE" \ + --resolvabledomain || failed=1 + + report_go_test \ + -v -tags=e2e -count=1 -timeout=35m -parallel=3 \ + ./test/conformance/runtime/... \ + --kubeconfig "$KUBECONFIG" \ + --imagetemplate "$TEST_IMAGE_TEMPLATE" \ + --resolvabledomain || failed=1 + + report_go_test \ + -v -tags=e2e -count=1 -timeout=35m -parallel=3 \ + ./test/conformance/api/... \ + --kubeconfig "$KUBECONFIG" \ + --imagetemplate "$TEST_IMAGE_TEMPLATE" \ + --resolvabledomain || failed=1 + + return $failed +} + +scale_up_workers || exit 1 + +failed=0 +(( !failed )) && install_knative || failed=1 +(( !failed )) && run_e2e_tests || failed=1 +(( failed )) && dump_cluster_state +(( failed )) && exit 1 + +success diff --git a/test/vendor/knative.dev/serving/openshift/olm/README.md b/test/vendor/knative.dev/serving/openshift/olm/README.md new file mode 100644 index 0000000000..7bbca41327 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/olm/README.md @@ -0,0 +1,49 @@ + +This is the `CatalogSource` for the [knative-serving-operator](https://github.com/openshift-knative/knative-serving-operator). + +WARNING: The `knative-serving` operator refers to some Istio CRD's, so +either install istio or... + + kubectl apply -f https://github.com/knative/serving/releases/download/v0.5.1/istio-crds.yaml + +To install this `CatalogSource`: + + OLM=$(kubectl get pods --all-namespaces | grep olm-operator | head -1 | awk '{print $1}') + kubectl apply -n $OLM -f https://raw.githubusercontent.com/openshift/knative-serving/release-v0.6.0/openshift/olm/knative-serving.catalogsource.yaml + +To install Knative Serving, either use the console, or apply the +following yaml: + +``` +cat <<-EOF | kubectl apply -f - +--- +apiVersion: v1 +kind: Namespace +metadata: + name: knative-serving +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: knative-serving + namespace: knative-serving +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: knative-serving-operator-sub + generateName: knative-serving-operator- + namespace: knative-serving +spec: + source: knative-serving-operator + sourceNamespace: $OLM + name: knative-serving-operator + channel: alpha +--- +apiVersion: serving.knative.dev/v1alpha1 +kind: KnativeServing +metadata: + name: knative-serving + namespace: knative-serving +EOF +``` diff --git a/test/vendor/knative.dev/serving/openshift/olm/knative-serving.catalogsource.yaml b/test/vendor/knative.dev/serving/openshift/olm/knative-serving.catalogsource.yaml new file mode 100644 index 0000000000..b6981a21bb --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/olm/knative-serving.catalogsource.yaml @@ -0,0 +1,498 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: serverless-operator + +data: + customResourceDefinitions: |- + - apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: knativeservings.serving.knative.dev + spec: + additionalPrinterColumns: + - JSONPath: .status.version + name: Version + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" + name: Reason + type: string + group: serving.knative.dev + names: + kind: KnativeServing + listKind: KnativeServingList + plural: knativeservings + singular: knativeserving + shortNames: + - ks + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Schema for the knativeservings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of KnativeServing + properties: + config: + additionalProperties: + additionalProperties: + type: string + type: object + description: A means to override the corresponding entries in the upstream + configmaps + type: object + type: object + status: + description: Status defines the observed state of KnativeServing + properties: + conditions: + description: The latest available observations of a resource's current + state. + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transitioned from one status to another. We use VolatileTime + in place of metav1.Time to exclude this from creating equality.Semantic + differences (all other things held constant). + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + severity: + description: Severity with which to treat failures of this type + of condition. When this is not specified, it defaults to Error. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - type + - status + type: object + type: array + version: + description: The version of the installed release + type: string + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + clusterServiceVersions: |- + - apiVersion: operators.coreos.com/v1alpha1 + kind: ClusterServiceVersion + metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "serving.knative.dev/v1alpha1", + "kind": "KnativeServing", + "metadata": { + "name": "knative-serving" + }, + "spec": { + "config": { + "autoscaler": { + "container-concurrency-target-default": "100", + "container-concurrency-target-percentage": "1.0", + "enable-scale-to-zero": "true", + "max-scale-up-rate": "10", + "panic-threshold-percentage": "200.0", + "panic-window": "6s", + "panic-window-percentage": "10.0", + "scale-to-zero-grace-period": "30s", + "stable-window": "60s", + "tick-interval": "2s" + }, + "defaults": { + "revision-cpu-limit": "1000m", + "revision-cpu-request": "400m", + "revision-memory-limit": "200M", + "revision-memory-request": "100M", + "revision-timeout-seconds": "300" + }, + "deployment": { + "registriesSkippingTagResolving": "ko.local,dev.local" + }, + "gc": { + "stale-revision-create-delay": "24h", + "stale-revision-lastpinned-debounce": "5h", + "stale-revision-minimum-generations": "1", + "stale-revision-timeout": "15h" + }, + "logging": { + "loglevel.activator": "info", + "loglevel.autoscaler": "info", + "loglevel.controller": "info", + "loglevel.queueproxy": "info", + "loglevel.webhook": "info" + }, + "observability": { + "logging.enable-var-log-collection": "false", + "metrics.backend-destination": "prometheus" + }, + "tracing": { + "backend": "none", + "sample-rate": "0.1" + } + } + } + } + ] + capabilities: Seamless Upgrades + categories: Networking,Integration & Delivery,Cloud Provider,Developer Tools + certified: "false" + containerImage: quay.io/openshift-knative/serverless-operator:v1.2.0 + createdAt: "2019-07-27T17:00:00Z" + description: |- + Provides a collection of API's based on Knative to support deploying and serving + of serverless applications and functions. + repository: https://github.com/openshift-knative/serverless-operator + support: Red Hat, Inc. + name: serverless-operator.v1.2.0 + namespace: placeholder + spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Represents an installation of a particular version of Knative Serving + displayName: Knative Serving + kind: KnativeServing + name: knativeservings.serving.knative.dev + statusDescriptors: + - description: The version of Knative Serving installed + displayName: Version + path: version + - description: Conditions of Knative Serving installed + displayName: Conditions + path: conditions + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes.conditions' + version: v1alpha1 + required: + - description: A list of namespaces in Service Mesh + displayName: Istio Service Mesh Member Roll + kind: ServiceMeshMemberRoll + name: servicemeshmemberrolls.maistra.io + version: v1 + - description: An Istio control plane installation + displayName: Istio Service Mesh Control Plane + kind: ServiceMeshControlPlane + name: servicemeshcontrolplanes.maistra.io + version: v1 + description: |- + The Red Hat Serverless Operator provides a collection of API's to + install various "serverless" services. + This is a **[Tech Preview release](https://access.redhat.com/support/offerings/techpreview)!** + # Knative Serving + Knative Serving builds on Kubernetes to support deploying and + serving of serverless applications and functions. Serving is easy + to get started with and scales to support advanced scenarios. The + Knative Serving project provides middleware primitives that + enable: + - Rapid deployment of serverless containers + - Automatic scaling up and down to zero + - Routing and network programming for Istio components + - Point-in-time snapshots of deployed code and configurations + ## Prerequisites + The Serverless Operator's provided APIs such as Knative Serving + have certain requirements with regards to the size of the underlying + cluster and a working installation of Service Mesh. See the [installation + section](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.1/html-single/serverless/index#installing-openshift-serverless) + of the Serverless documentation for more info. + ## Further Information + For documentation on using Knative Serving, see the + [serving section](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.1/html-single/serverless/index#knative-serving_serverless-architecture) of the + [Serverless documentation site](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.1/html-single/serverless/index). + displayName: OpenShift Serverless Operator + icon: + - base64data: PHN2ZyBpZD0iTGF5ZXJfMSIgZGF0YS1uYW1lPSJMYXllciAxIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAxOTIgMTQ1Ij48ZGVmcz48c3R5bGU+LmNscy0xe2ZpbGw6I2UwMDt9PC9zdHlsZT48L2RlZnM+PHRpdGxlPlJlZEhhdC1Mb2dvLUhhdC1Db2xvcjwvdGl0bGU+PHBhdGggZD0iTTE1Ny43Nyw2Mi42MWExNCwxNCwwLDAsMSwuMzEsMy40MmMwLDE0Ljg4LTE4LjEsMTcuNDYtMzAuNjEsMTcuNDZDNzguODMsODMuNDksNDIuNTMsNTMuMjYsNDIuNTMsNDRhNi40Myw2LjQzLDAsMCwxLC4yMi0xLjk0bC0zLjY2LDkuMDZhMTguNDUsMTguNDUsMCwwLDAtMS41MSw3LjMzYzAsMTguMTEsNDEsNDUuNDgsODcuNzQsNDUuNDgsMjAuNjksMCwzNi40My03Ljc2LDM2LjQzLTIxLjc3LDAtMS4wOCwwLTEuOTQtMS43My0xMC4xM1oiLz48cGF0aCBjbGFzcz0iY2xzLTEiIGQ9Ik0xMjcuNDcsODMuNDljMTIuNTEsMCwzMC42MS0yLjU4LDMwLjYxLTE3LjQ2YTE0LDE0LDAsMCwwLS4zMS0zLjQybC03LjQ1LTMyLjM2Yy0xLjcyLTcuMTItMy4yMy0xMC4zNS0xNS43My0xNi42QzEyNC44OSw4LjY5LDEwMy43Ni41LDk3LjUxLjUsOTEuNjkuNSw5MCw4LDgzLjA2LDhjLTYuNjgsMC0xMS42NC01LjYtMTcuODktNS42LTYsMC05LjkxLDQuMDktMTIuOTMsMTIuNSwwLDAtOC40MSwyMy43Mi05LjQ5LDI3LjE2QTYuNDMsNi40MywwLDAsMCw0Mi41Myw0NGMwLDkuMjIsMzYuMywzOS40NSw4NC45NCwzOS40NU0xNjAsNzIuMDdjMS43Myw4LjE5LDEuNzMsOS4wNSwxLjczLDEwLjEzLDAsMTQtMTUuNzQsMjEuNzctMzYuNDMsMjEuNzdDNzguNTQsMTA0LDM3LjU4LDc2LjYsMzcuNTgsNTguNDlhMTguNDUsMTguNDUsMCwwLDEsMS41MS03LjMzQzIyLjI3LDUyLC41LDU1LC41LDc0LjIyYzAsMzEuNDgsNzQuNTksNzAuMjgsMTMzLjY1LDcwLjI4LDQ1LjI4LDAsNTYuNy0yMC40OCw1Ni43LTM2LjY1LDAtMTIuNzItMTEtMjcuMTYtMzAuODMtMzUuNzgiLz48L3N2Zz4= + mediatype: image/svg+xml + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' + serviceAccountName: knative-serving-operator + - rules: + - apiGroups: + - "" + resources: + - pods + - services + - events + - configmaps + verbs: + - "*" + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - apps + resources: + - deployments + - replicasets + verbs: + - "*" + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - "*" + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - networking.internal.knative.dev + resources: + - clusteringresses + - clusteringresses/status + - clusteringresses/finalizers + - ingresses + - ingresses/status + - ingresses/finalizers + verbs: + - "*" + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + - routes/status + - routes/finalizers + verbs: + - "*" + - apiGroups: + - serving.knative.dev + resources: + - knativeservings + - knativeservings/finalizers + verbs: + - '*' + - apiGroups: + - maistra.io + resources: + - servicemeshmemberrolls + verbs: + - '*' + serviceAccountName: knative-openshift-ingress + deployments: + - name: knative-serving-operator + spec: + replicas: 1 + selector: + matchLabels: + name: knative-serving-operator + strategy: {} + template: + metadata: + labels: + name: knative-serving-operator + spec: + containers: + - command: + - knative-serving-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: knative-serving-operator + - name: IMAGE_QUEUE + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-queue + - name: IMAGE_activator + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-activator + - name: IMAGE_autoscaler + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-autoscaler + - name: IMAGE_autoscaler-hpa + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-autoscaler-hpa + - name: IMAGE_controller + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-controller + - name: IMAGE_networking-istio + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-istio + - name: IMAGE_webhook + value: registry.svc.ci.openshift.org/${OPENSHIFT_BUILD_NAMESPACE}/stable:knative-serving-webhook + image: quay.io/openshift-knative/knative-serving-operator:v0.9.0-1.2.0-05 + args: + - --filename=https://raw.githubusercontent.com/openshift/knative-serving/release-v0.12.1/openshift/release/knative-serving-ci.yaml + imagePullPolicy: Always + name: knative-serving-operator + resources: {} + serviceAccountName: knative-serving-operator + - name: knative-openshift-ingress + spec: + replicas: 1 + selector: + matchLabels: + name: knative-openshift-ingress + template: + metadata: + labels: + name: knative-openshift-ingress + spec: + serviceAccountName: knative-openshift-ingress + containers: + - name: knative-openshift-ingress + image: quay.io/openshift-knative/knative-openshift-ingress:v0.1.2 + command: + - knative-openshift-ingress + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + value: "" # watch all namespaces for ClusterIngress + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "knative-openshift-ingress" + permissions: + - rules: + - apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - '*' + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - '*' + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - apiGroups: + - apps + resourceNames: + - knative-serving-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - serving.knative.dev + resources: + - '*' + verbs: + - '*' + serviceAccountName: knative-serving-operator + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - serverless + - FaaS + - microservices + - scale to zero + - knative + - serving + links: + - name: Documentation + url: https://access.redhat.com/documentation/en-us/openshift_container_platform/4.1/html-single/serverless/index + - name: Source Repository + url: https://github.com/openshift-knative/serverless-operator + maintainers: + - email: serverless-support@redhat.com + name: Serverless Team + maturity: alpha + provider: + name: Red Hat, Inc. + version: 1.2.0 + packages: |- + - packageName: serverless-operator + channels: + - name: techpreview + currentCSV: serverless-operator.v1.2.0 +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: CatalogSource +metadata: + name: serverless-operator +spec: + configMap: serverless-operator + displayName: Serverless Operator + publisher: Red Hat + sourceType: internal \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/openshift/patches/003-routeretry.patch b/test/vendor/knative.dev/serving/openshift/patches/003-routeretry.patch new file mode 100644 index 0000000000..b26cc51bbf --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/patches/003-routeretry.patch @@ -0,0 +1,79 @@ +diff --git a/test/v1/route.go b/test/v1/route.go +index 9c9a89034..353cbb3b4 100644 +--- a/test/v1/route.go ++++ b/test/v1/route.go +@@ -19,6 +19,7 @@ package v1 + import ( + "context" + "fmt" ++ "net/http" + + "github.com/davecgh/go-spew/spew" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +@@ -116,8 +117,13 @@ func IsRouteNotReady(r *v1.Route) (bool, error) { + } + + // RetryingRouteInconsistency retries common requests seen when creating a new route ++// - 404 until the route is propagated to the proxy ++// - 503 to account for Openshift route inconsistency (https://jira.coreos.com/browse/SRVKS-157) + func RetryingRouteInconsistency(innerCheck spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { ++ if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusServiceUnavailable { ++ return false, nil ++ } + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return innerCheck(resp) + } +diff --git a/test/v1alpha1/route.go b/test/v1alpha1/route.go +index ead307fcb..1431d2ef0 100644 +--- a/test/v1alpha1/route.go ++++ b/test/v1alpha1/route.go +@@ -21,6 +21,7 @@ package v1alpha1 + import ( + "context" + "fmt" ++ "net/http" + + "github.com/davecgh/go-spew/spew" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +@@ -51,9 +52,13 @@ func CreateRoute(t pkgTest.T, clients *test.Clients, names test.ResourceNames, f + } + + // RetryingRouteInconsistency retries common requests seen when creating a new route +-// TODO(5573): Remove this. ++// - 404 until the route is propagated to the proxy ++// - 503 to account for Openshift route inconsistency (https://jira.coreos.com/browse/SRVKS-157) + func RetryingRouteInconsistency(innerCheck spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { ++ if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusServiceUnavailable { ++ return false, nil ++ } + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return innerCheck(resp) + } +diff --git a/test/v1beta1/route.go b/test/v1beta1/route.go +index c9c47da64..31121302f 100644 +--- a/test/v1beta1/route.go ++++ b/test/v1beta1/route.go +@@ -19,6 +19,7 @@ package v1beta1 + import ( + "context" + "fmt" ++ "net/http" + + "github.com/davecgh/go-spew/spew" + +@@ -118,8 +119,13 @@ func IsRouteNotReady(r *v1beta1.Route) (bool, error) { + } + + // RetryingRouteInconsistency retries common requests seen when creating a new route ++// - 404 until the route is propagated to the proxy ++// - 503 to account for Openshift route inconsistency (https://jira.coreos.com/browse/SRVKS-157) + func RetryingRouteInconsistency(innerCheck spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { ++ if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusServiceUnavailable { ++ return false, nil ++ } + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return innerCheck(resp) + } diff --git a/test/vendor/knative.dev/serving/openshift/patches/004-grpc.patch b/test/vendor/knative.dev/serving/openshift/patches/004-grpc.patch new file mode 100644 index 0000000000..68fa1b256c --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/patches/004-grpc.patch @@ -0,0 +1,27 @@ +diff --git a/test/e2e/grpc_test.go b/test/e2e/grpc_test.go +index a358b354c..5676d6b66 100644 +--- a/test/e2e/grpc_test.go ++++ b/test/e2e/grpc_test.go +@@ -152,6 +152,7 @@ func streamTest(t *testing.T, resources *v1a1test.ResourceObjects, clients *test + func testGRPC(t *testing.T, f grpcTest, fopts ...rtesting.ServiceOption) { + t.Helper() + t.Parallel() ++ resolvable := false + cancel := logstream.Start(t) + defer cancel() + +@@ -181,12 +182,12 @@ func testGRPC(t *testing.T, f grpcTest, fopts ...rtesting.ServiceOption) { + url, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "gRPCPingReadyToServe", +- test.ServingFlags.ResolvableDomain); err != nil { ++ resolvable); err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't return success: %v", names.Route, url, err) + } + + host := url.Host +- if !test.ServingFlags.ResolvableDomain { ++ if !resolvable { + host = pkgTest.Flags.IngressEndpoint + if pkgTest.Flags.IngressEndpoint == "" { + host, err = ingress.GetIngressEndpoint(clients.KubeClient.Kube) diff --git a/test/vendor/knative.dev/serving/openshift/patches/005-disablehpa.patch b/test/vendor/knative.dev/serving/openshift/patches/005-disablehpa.patch new file mode 100644 index 0000000000..08153fbbb7 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/patches/005-disablehpa.patch @@ -0,0 +1,20 @@ +diff --git a/test/e2e/autoscale_test.go b/test/e2e/autoscale_test.go +index e385327ad..2b8edccd6 100644 +--- a/test/e2e/autoscale_test.go ++++ b/test/e2e/autoscale_test.go +@@ -366,7 +366,6 @@ func TestAutoscaleUpCountPods(t *testing.T) { + t.Parallel() + + classes := map[string]string{ +- "hpa": autoscaling.HPA, + "kpa": autoscaling.KPA, + } + +@@ -399,7 +398,6 @@ func TestRPSBasedAutoscaleUpCountPods(t *testing.T) { + t.Parallel() + + classes := map[string]string{ +- "hpa": autoscaling.HPA, + "kpa": autoscaling.KPA, + } + diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/.gitkeep b/test/vendor/knative.dev/serving/openshift/productization/dist-git/.gitkeep new file mode 100644 index 0000000000..a0db1a1bf8 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/.gitkeep @@ -0,0 +1 @@ +This file serves as a placeholder to ensure this directory is not removed by git. diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.activator b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.activator new file mode 100644 index 0000000000..bba6cbd999 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.activator @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/activator ./cmd/activator + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/activator /ko-app/activator + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-activator-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-activator-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Activator" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Activator" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Activator" + +ENTRYPOINT ["/ko-app/activator"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler new file mode 100644 index 0000000000..e1bb5f54b9 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/autoscaler ./cmd/autoscaler + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/autoscaler /ko-app/autoscaler + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-autoscaler-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-autoscaler-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Autoscaler" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Autoscaler" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Autoscaler" + +ENTRYPOINT ["/ko-app/autoscaler"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler-hpa b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler-hpa new file mode 100644 index 0000000000..abb2a78d31 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.autoscaler-hpa @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/autoscaler-hpa ./cmd/autoscaler-hpa + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/autoscaler-hpa /ko-app/autoscaler-hpa + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-autoscaler-hpa-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-autoscaler-hpa-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Autoscaler HPA" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Autoscaler HPA" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Autoscaler HPA" + +ENTRYPOINT ["/ko-app/autoscaler-hpa"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.controller b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.controller new file mode 100644 index 0000000000..062b9385ae --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.controller @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/controller ./cmd/controller + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/controller /ko-app/controller + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-controller-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-controller-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Controller" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Controller" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Controller" + +ENTRYPOINT ["/ko-app/controller"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-certmanager b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-certmanager new file mode 100644 index 0000000000..dbf5b947ec --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-certmanager @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/networking-certmanager ./cmd/networking/certmanager + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/networking-certmanager /ko-app/networking-certmanager + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-networking-certmanager-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-networking-certmanager-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Networking CertManager" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Networking CertManager" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Networking CertManager" + +ENTRYPOINT ["/ko-app/networking-certmanager"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-istio b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-istio new file mode 100644 index 0000000000..4a41791cc1 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-istio @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/networking-istio ./cmd/networking/istio + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/networking-istio /ko-app/networking-istio + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-networking-istio-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-networking-istio-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Networking Istio" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Networking Istio" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Networking Istio" + +ENTRYPOINT ["/ko-app/networking-istio"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-nscert b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-nscert new file mode 100644 index 0000000000..7295a1586a --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.networking-nscert @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/networking-nscert ./cmd/networking/nscert + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/networking-nscert /ko-app/networking-nscert + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-networking-nscert-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-networking-nscert-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Networking NSCert" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Networking NSCert" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Networking NSCert" + +ENTRYPOINT ["/ko-app/networking-nscert"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.queue b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.queue new file mode 100644 index 0000000000..b38e5bc068 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.queue @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/queue ./cmd/queue + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/queue /ko-app/queue + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-queue-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-queue-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Queue" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Queue" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Queue" + +ENTRYPOINT ["/ko-app/queue"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.webhook b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.webhook new file mode 100644 index 0000000000..577cb06cbf --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/dist-git/Dockerfile.webhook @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/serving +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/webhook ./cmd/webhook + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/webhook /ko-app/webhook + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-serving-webhook-rhel8-container" \ + name="openshift-serverless-1-tech-preview/serving-webhook-rhel8" \ + version="v0.12.1" \ + summary="Red Hat OpenShift Serverless 1 Serving Webhook" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 Serving Webhook" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 Serving Webhook" + +ENTRYPOINT ["/ko-app/webhook"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/Dockerfile.in b/test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/Dockerfile.in new file mode 100644 index 0000000000..ac70325ec3 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/Dockerfile.in @@ -0,0 +1,19 @@ +FROM rhel8/go-toolset:1.13.4 AS builder +WORKDIR /opt/app-root/src/go/src/knative.dev/$COMPONENT +COPY . . +ENV GOFLAGS="-mod=vendor" +RUN go build -o /tmp/$SUBCOMPONENT ./cmd/$GO_PACKAGE + +FROM ubi8-minimal:8-released +COPY --from=builder /tmp/$SUBCOMPONENT /ko-app/$SUBCOMPONENT + +LABEL \ + com.redhat.component="openshift-serverless-1-tech-preview-$COMPONENT-$SUBCOMPONENT-rhel8-container" \ + name="openshift-serverless-1-tech-preview/$COMPONENT-$SUBCOMPONENT-rhel8" \ + version="$VERSION" \ + summary="Red Hat OpenShift Serverless 1 $CAPITALIZED_COMPONENT $CAPITALIZED_SUBCOMPONENT" \ + maintainer="serverless-support@redhat.com" \ + description="Red Hat OpenShift Serverless 1 $CAPITALIZED_COMPONENT $CAPITALIZED_SUBCOMPONENT" \ + io.k8s.display-name="Red Hat OpenShift Serverless 1 $CAPITALIZED_COMPONENT $CAPITALIZED_SUBCOMPONENT" + +ENTRYPOINT ["/ko-app/$SUBCOMPONENT"] diff --git a/test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/gen_dockerfiles.sh b/test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/gen_dockerfiles.sh new file mode 100755 index 0000000000..9bd01bcbd1 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/productization/generate-dockerfiles/gen_dockerfiles.sh @@ -0,0 +1,20 @@ +#!/bin/bash -x + +target_dir=$1 + +component=Serving + +for subcomponent in Activator Autoscaler Autoscaler-HPA Controller \ + Networking-Istio Networking-CertManager Networking-NSCert \ + Queue Webhook; \ +do + export CAPITALIZED_COMPONENT=$component + export CAPITALIZED_SUBCOMPONENT=$(echo -e "$subcomponent" | sed -e 's/-/ /g') + export COMPONENT=$(echo -e "$component" | sed -e 's/\(.*\)/\L\1/g') + export SUBCOMPONENT=$(echo -e "$subcomponent" | sed -e 's/\(.*\)/\L\1/g') + export VERSION=$(git rev-parse --abbrev-ref HEAD | sed -r 's/release-//g') + export GO_PACKAGE=$(echo -e "$SUBCOMPONENT" | sed -e 's/networking-/networking\//g') + envsubst \ + < openshift/productization/generate-dockerfiles/Dockerfile.in \ + > ${target_dir}/Dockerfile.$SUBCOMPONENT +done diff --git a/test/vendor/knative.dev/serving/openshift/release/README.md b/test/vendor/knative.dev/serving/openshift/release/README.md new file mode 100644 index 0000000000..2d7a668fc1 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/README.md @@ -0,0 +1,35 @@ +# Release creation + +## Branching + +As far as branching goes, we have two use-cases: + +1. Creating a branch based off an upstream release tag. +2. Having a branch that follow upstream's HEAD and serves as a vehicle for continuous integration. + +A prerequisite for both scripts is that your local clone of the repository has a remote "upstream" +that points to the upstream repository and a remote "openshift" that points to the openshift fork. + +Run the scripts from the root of the repository. + +### Creating a branch based off an upstream release tag + +To create a clean branch from an upstream release tag, use the `create-release-branch.sh` script: + +```bash +$ ./openshift/release/create-release-branch.sh v0.4.1 release-0.4 +``` + +This will create a new branch "release-0.4" based off the tag "v0.4.1" and add all OpenShift specific +files that we need to run CI on top of it. + +### Updating the release-next branch that follow upstream's HEAD + +To update a branch to the latest HEAD of upstream use the `update-to-head.sh` script: + +```bash +$ ./openshift/release/update-to-head.sh +``` + +That will pull the latest master from upstream, rebase the current fixes on the release-next branch +on top of it, update the Openshift specific files if necessary, and then trigger CI. \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/openshift/release/create-release-branch.sh b/test/vendor/knative.dev/serving/openshift/release/create-release-branch.sh new file mode 100755 index 0000000000..28d204a951 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/create-release-branch.sh @@ -0,0 +1,26 @@ +#!/bin/bash -e + +# Usage: create-release-branch.sh v0.4.1 release-v0.4.1 + +release=$1 +target=$2 +release_regexp="^release-v([0-9]+\.)+([0-9])$" + +if [[ ! $target =~ $release_regexp ]]; then + echo "\"$target\" is wrong format. Must have proper format like release-v0.1.2" + exit 1 +fi + +# Fetch the latest tags and checkout a new branch from the wanted tag. +git fetch upstream --tags +git checkout -b "$target" "$release" + +# Update openshift's master and take all needed files from there. +git fetch openshift master +git checkout openshift/master -- openshift OWNERS_ALIASES OWNERS Makefile content_sets.yml container.yaml +make generate-dockerfiles +make generate-p12n-dockerfiles +make RELEASE=$release generate-release +make RELEASE=ci generate-release +git add openshift OWNERS_ALIASES OWNERS Makefile content_sets.yml container.yaml +git commit -m "Add openshift specific files." diff --git a/test/vendor/knative.dev/serving/openshift/release/generate-release.sh b/test/vendor/knative.dev/serving/openshift/release/generate-release.sh new file mode 100755 index 0000000000..765ffbe455 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/generate-release.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +source $(dirname $0)/resolve.sh + +release=$1 +output_file="openshift/release/knative-serving-${release}.yaml" + +if [ "$release" = "ci" ]; then + image_prefix="image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-" + tag="" +else + image_prefix="quay.io/openshift-knative/knative-serving-" + tag=$release +fi + +resolve_resources config/ "$output_file" "$image_prefix" "$tag" \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/openshift/release/knative-serving-ci.yaml b/test/vendor/knative.dev/serving/openshift/release/knative-serving-ci.yaml new file mode 100644 index 0000000000..49ac259e36 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/knative-serving-ci.yaml @@ -0,0 +1,1396 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: knative-serving + labels: + istio-injection: enabled + serving.knative.dev/release: devel +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-addressable-resolver + labels: + serving.knative.dev/release: devel + duck.knative.dev/addressable: "true" +rules: +- apiGroups: + - serving.knative.dev + resources: + - routes + - routes/status + - services + - services/status + verbs: + - get + - list + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-istio + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" + networking.knative.dev/ingress-provider: istio +rules: + - apiGroups: ["networking.istio.io"] + resources: ["virtualservices", "gateways"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: custom-metrics-server-resources + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +rules: + - apiGroups: ["custom.metrics.k8s.io"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-admin + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-edit + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["create", "update", "patch", "delete"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-view + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-admin + labels: + serving.knative.dev/release: devel +aggregationRule: + clusterRoleSelectors: + - matchLabels: + serving.knative.dev/controller: "true" +rules: [] # Rules are automatically filled in by the controller manager. +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-core + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" +rules: + - apiGroups: [""] + resources: ["pods", "namespaces", "secrets", "configmaps", "endpoints", "services", "events", "serviceaccounts"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: [""] + resources: ["endpoints/restricted"] # Permission for RestrictedEndpointsAdmission + verbs: ["create"] + - apiGroups: ["apps"] + resources: ["deployments", "deployments/finalizers"] # finalizers are needed for the owner reference of the webhook + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["autoscaling"] + resources: ["horizontalpodautoscalers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["serving.knative.dev", "autoscaling.internal.knative.dev", "networking.internal.knative.dev"] + resources: ["*", "*/status", "*/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "deletecollection", "patch", "watch"] + - apiGroups: ["caching.internal.knative.dev"] + resources: ["images"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-podspecable-binding + labels: + serving.knative.dev/release: devel + duck.knative.dev/podspecable: "true" +rules: +- apiGroups: + - serving.knative.dev + resources: + - configurations + - services + verbs: + - list + - watch + - patch +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller + namespace: knative-serving + labels: + serving.knative.dev/release: devel +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: custom-metrics:system:auth-delegator + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hpa-controller-custom-metrics + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-metrics-server-resources +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: knative-serving-controller-admin + labels: + serving.knative.dev/release: devel +subjects: + - kind: ServiceAccount + name: controller + namespace: knative-serving +roleRef: + kind: ClusterRole + name: knative-serving-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: custom-metrics-auth-reader + namespace: kube-system + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: knative-ingress-gateway + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster-local-gateway + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + istio: cluster-local-gateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + version: v1alpha1 + names: + kind: Certificate + plural: certificates + singular: certificate + categories: + - knative-internal + - networking + shortNames: + - kcert + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configurations.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/podspecable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Configuration + plural: configurations + singular: configuration + categories: + - all + - knative + - serving + shortNames: + - config + - cfg + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: LatestCreated + type: string + JSONPath: .status.latestCreatedRevisionName + - name: LatestReady + type: string + JSONPath: .status.latestReadyRevisionName + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: images.caching.internal.knative.dev + labels: + knative.dev/crd-install: "true" +spec: + group: caching.internal.knative.dev + version: v1alpha1 + names: + kind: Image + plural: images + singular: image + categories: + - knative-internal + - caching + shortNames: + - img + scope: Namespaced + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ingresses.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: Ingress + plural: ingresses + singular: ingress + categories: + - knative-internal + - networking + shortNames: + - kingress + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: metrics.autoscaling.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: autoscaling.internal.knative.dev + version: v1alpha1 + names: + kind: Metric + plural: metrics + singular: metric + categories: + - knative-internal + - autoscaling + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: podautoscalers.autoscaling.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: autoscaling.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: PodAutoscaler + plural: podautoscalers + singular: podautoscaler + categories: + - knative-internal + - autoscaling + shortNames: + - kpa + - pa + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: DesiredScale + type: integer + JSONPath: ".status.desiredScale" + - name: ActualScale + type: integer + JSONPath: ".status.actualScale" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: revisions.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Revision + plural: revisions + singular: revision + categories: + - all + - knative + - serving + shortNames: + - rev + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Config Name + type: string + JSONPath: ".metadata.labels['serving\\.knative\\.dev/configuration']" + - name: K8s Service Name + type: string + JSONPath: ".status.serviceName" + - name: Generation + type: string # int in string form :( + JSONPath: ".metadata.labels['serving\\.knative\\.dev/configurationGeneration']" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: routes.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/addressable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Route + plural: routes + singular: route + categories: + - all + - knative + - serving + shortNames: + - rt + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: URL + type: string + JSONPath: .status.url + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: services.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/addressable: "true" + duck.knative.dev/podspecable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Service + plural: services + singular: service + categories: + - all + - knative + - serving + shortNames: + - kservice + - ksvc + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: URL + type: string + JSONPath: .status.url + - name: LatestCreated + type: string + JSONPath: .status.latestCreatedRevisionName + - name: LatestReady + type: string + JSONPath: .status.latestReadyRevisionName + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serverlessservices.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: ServerlessService + plural: serverlessservices + singular: serverlessservice + categories: + - knative-internal + - networking + shortNames: + - sks + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Mode + type: string + JSONPath: ".spec.mode" + - name: ServiceName + type: string + JSONPath: ".status.serviceName" + - name: PrivateServiceName + type: string + JSONPath: ".status.privateServiceName" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: config.webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: validation.webhook.serving.knative.dev +--- +apiVersion: v1 +kind: Secret +metadata: + name: webhook-certs + namespace: knative-serving + labels: + serving.knative.dev/release: devel +--- +apiVersion: caching.internal.knative.dev/v1alpha1 +kind: Image +metadata: + name: queue-proxy + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-queue +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: activator + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + minReplicas: 1 + maxReplicas: 20 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: activator + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: activator + role: activator + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: activator + role: activator + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: activator + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-activator + resources: + requests: + cpu: 300m + memory: 60Mi + limits: + cpu: 1000m + memory: 600Mi + env: + - name: GOGC + value: "500" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: http1 + containerPort: 8012 + - name: h2c + containerPort: 8013 + readinessProbe: &probe + httpGet: + port: 8012 + httpHeaders: + - name: k-kubelet-probe + value: "activator" + livenessProbe: *probe + terminationGracePeriodSeconds: 300 +--- +apiVersion: v1 +kind: Service +metadata: + name: activator-service + namespace: knative-serving + labels: + app: activator + serving.knative.dev/release: devel +spec: + selector: + app: activator + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: http + port: 80 + targetPort: 8012 + - name: http2 + port: 81 + targetPort: 8013 + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler-hpa + namespace: knative-serving + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/autoscaler-provider: hpa +spec: + selector: + matchLabels: + app: autoscaler-hpa + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: autoscaler-hpa + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: autoscaler-hpa + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-autoscaler-hpa + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler-hpa + serving.knative.dev/release: devel + autoscaling.knative.dev/autoscaler-provider: hpa + name: autoscaler-hpa + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: autoscaler-hpa +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + replicas: 1 + selector: + matchLabels: + app: autoscaler + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: autoscaler + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: autoscaler + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-autoscaler + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: websocket + containerPort: 8080 + - name: custom-metrics + containerPort: 8443 + readinessProbe: &probe + httpGet: + port: 8080 + httpHeaders: + - name: k-kubelet-probe + value: "autoscaler" + livenessProbe: *probe + args: + - "--secure-port=8443" + - "--cert-dir=/tmp" +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler + serving.knative.dev/release: devel + name: autoscaler + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: http + port: 8080 + targetPort: 8080 + - name: https-custom-metrics + port: 443 + targetPort: 8443 + selector: + app: autoscaler +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + container-concurrency-target-percentage: "70" + container-concurrency-target-default: "100" + requests-per-second-target-default: "200" + target-burst-capacity: "200" + stable-window: "60s" + panic-window-percentage: "10.0" + panic-threshold-percentage: "200.0" + max-scale-up-rate: "1000.0" + max-scale-down-rate: "2.0" + enable-scale-to-zero: "true" + tick-interval: "2s" + scale-to-zero-grace-period: "30s" + enable-graceful-scaledown: "false" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-defaults + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + revision-timeout-seconds: "300" # 5 minutes + max-revision-timeout-seconds: "600" # 10 minutes + revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU) + revision-memory-request: "100M" # 100 megabytes of memory + revision-cpu-limit: "1000m" # 1 CPU (aka 1000 milli-CPU) + revision-memory-limit: "200M" # 200 megabytes of memory + container-name-template: "user-container" + container-concurrency: "0" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-deployment + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + queueSidecarImage: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-queue + _example: | + registriesSkippingTagResolving: "ko.local,dev.local" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-domain + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + example.com: | + example.org: | + selector: + app: nonprofit + svc.cluster.local: | + selector: + app: secret +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-gc + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + stale-revision-create-delay: "48h" + stale-revision-timeout: "15h" + stale-revision-minimum-generations: "20" + stale-revision-lastpinned-debounce: "5h" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-istio + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +data: + _example: | + gateway.knative-serving.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + local-gateway.mesh: "mesh" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + zap-logger-config: | + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + loglevel.controller: "info" + loglevel.autoscaler: "info" + loglevel.queueproxy: "info" + loglevel.webhook: "info" + loglevel.activator: "info" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-network + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + istio.sidecar.includeOutboundIPRanges: "*" + ingress.class: "istio.ingress.networking.knative.dev" + certificate.class: "cert-manager.certificate.networking.internal.knative.dev" + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}" + tagTemplate: "{{.Tag}}-{{.Name}}" + autoTLS: "Disabled" + httpProtocol: "Enabled" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + logging.enable-var-log-collection: "false" + logging.revision-url-template: | + http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) + logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' + logging.enable-probe-request-log: "false" + metrics.backend-destination: prometheus + metrics.request-metrics-backend-destination: prometheus + metrics.stackdriver-project-id: "" + metrics.allow-stackdriver-custom-metrics: "false" + profiling.enable: "false" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-tracing + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + backend: "none" + zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + stackdriver-project-id: "my-project" + debug: "false" + sample-rate: "0.1" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: controller + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: controller + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: controller + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-controller + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1000m + memory: 1000Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: controller + serving.knative.dev/release: devel + name: controller + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: controller +--- +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.custom.metrics.k8s.io + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +spec: + service: + name: autoscaler + namespace: knative-serving + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: networking-istio + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + matchLabels: + app: networking-istio + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + sidecar.istio.io/inject: "false" + labels: + app: networking-istio + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: networking-istio + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-istio + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: webhook + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: webhook + role: webhook + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: webhook + role: webhook + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: webhook + image: image-registry.openshift-image-registry.svc:5000/knative-serving/knative-serving-webhook + resources: + requests: + cpu: 20m + memory: 20Mi + limits: + cpu: 200m + memory: 200Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: webhook + serving.knative.dev/release: devel + name: webhook + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + role: webhook diff --git a/test/vendor/knative.dev/serving/openshift/release/knative-serving-v0.12.1.yaml b/test/vendor/knative.dev/serving/openshift/release/knative-serving-v0.12.1.yaml new file mode 100644 index 0000000000..185c8a274f --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/knative-serving-v0.12.1.yaml @@ -0,0 +1,1396 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: knative-serving + labels: + istio-injection: enabled + serving.knative.dev/release: devel +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-addressable-resolver + labels: + serving.knative.dev/release: devel + duck.knative.dev/addressable: "true" +rules: +- apiGroups: + - serving.knative.dev + resources: + - routes + - routes/status + - services + - services/status + verbs: + - get + - list + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-istio + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" + networking.knative.dev/ingress-provider: istio +rules: + - apiGroups: ["networking.istio.io"] + resources: ["virtualservices", "gateways"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: custom-metrics-server-resources + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +rules: + - apiGroups: ["custom.metrics.k8s.io"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-admin + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["*"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-edit + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["create", "update", "patch", "delete"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-namespaced-view + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + serving.knative.dev/release: devel +rules: + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] + resources: ["*"] + verbs: ["get", "list", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-admin + labels: + serving.knative.dev/release: devel +aggregationRule: + clusterRoleSelectors: + - matchLabels: + serving.knative.dev/controller: "true" +rules: [] # Rules are automatically filled in by the controller manager. +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-core + labels: + serving.knative.dev/release: devel + serving.knative.dev/controller: "true" +rules: + - apiGroups: [""] + resources: ["pods", "namespaces", "secrets", "configmaps", "endpoints", "services", "events", "serviceaccounts"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: [""] + resources: ["endpoints/restricted"] # Permission for RestrictedEndpointsAdmission + verbs: ["create"] + - apiGroups: ["apps"] + resources: ["deployments", "deployments/finalizers"] # finalizers are needed for the owner reference of the webhook + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["autoscaling"] + resources: ["horizontalpodautoscalers"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["serving.knative.dev", "autoscaling.internal.knative.dev", "networking.internal.knative.dev"] + resources: ["*", "*/status", "*/finalizers"] + verbs: ["get", "list", "create", "update", "delete", "deletecollection", "patch", "watch"] + - apiGroups: ["caching.internal.knative.dev"] + resources: ["images"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: knative-serving-podspecable-binding + labels: + serving.knative.dev/release: devel + duck.knative.dev/podspecable: "true" +rules: +- apiGroups: + - serving.knative.dev + resources: + - configurations + - services + verbs: + - list + - watch + - patch +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller + namespace: knative-serving + labels: + serving.knative.dev/release: devel +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: custom-metrics:system:auth-delegator + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: hpa-controller-custom-metrics + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: custom-metrics-server-resources +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: knative-serving-controller-admin + labels: + serving.knative.dev/release: devel +subjects: + - kind: ServiceAccount + name: controller + namespace: knative-serving +roleRef: + kind: ClusterRole + name: knative-serving-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: custom-metrics-auth-reader + namespace: kube-system + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: controller + namespace: knative-serving +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: knative-ingress-gateway + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: cluster-local-gateway + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + istio: cluster-local-gateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + version: v1alpha1 + names: + kind: Certificate + plural: certificates + singular: certificate + categories: + - knative-internal + - networking + shortNames: + - kcert + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: configurations.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/podspecable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Configuration + plural: configurations + singular: configuration + categories: + - all + - knative + - serving + shortNames: + - config + - cfg + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: LatestCreated + type: string + JSONPath: .status.latestCreatedRevisionName + - name: LatestReady + type: string + JSONPath: .status.latestReadyRevisionName + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: images.caching.internal.knative.dev + labels: + knative.dev/crd-install: "true" +spec: + group: caching.internal.knative.dev + version: v1alpha1 + names: + kind: Image + plural: images + singular: image + categories: + - knative-internal + - caching + shortNames: + - img + scope: Namespaced + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ingresses.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: Ingress + plural: ingresses + singular: ingress + categories: + - knative-internal + - networking + shortNames: + - kingress + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: metrics.autoscaling.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: autoscaling.internal.knative.dev + version: v1alpha1 + names: + kind: Metric + plural: metrics + singular: metric + categories: + - knative-internal + - autoscaling + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: podautoscalers.autoscaling.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: autoscaling.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: PodAutoscaler + plural: podautoscalers + singular: podautoscaler + categories: + - knative-internal + - autoscaling + shortNames: + - kpa + - pa + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: DesiredScale + type: integer + JSONPath: ".status.desiredScale" + - name: ActualScale + type: integer + JSONPath: ".status.actualScale" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: revisions.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Revision + plural: revisions + singular: revision + categories: + - all + - knative + - serving + shortNames: + - rev + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Config Name + type: string + JSONPath: ".metadata.labels['serving\\.knative\\.dev/configuration']" + - name: K8s Service Name + type: string + JSONPath: ".status.serviceName" + - name: Generation + type: string # int in string form :( + JSONPath: ".metadata.labels['serving\\.knative\\.dev/configurationGeneration']" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: routes.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/addressable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Route + plural: routes + singular: route + categories: + - all + - knative + - serving + shortNames: + - rt + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: URL + type: string + JSONPath: .status.url + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: services.serving.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" + duck.knative.dev/addressable: "true" + duck.knative.dev/podspecable: "true" +spec: + group: serving.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + - name: v1beta1 + served: true + storage: false + - name: v1 + served: true + storage: false + names: + kind: Service + plural: services + singular: service + categories: + - all + - knative + - serving + shortNames: + - kservice + - ksvc + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: URL + type: string + JSONPath: .status.url + - name: LatestCreated + type: string + JSONPath: .status.latestCreatedRevisionName + - name: LatestReady + type: string + JSONPath: .status.latestReadyRevisionName + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: serverlessservices.networking.internal.knative.dev + labels: + serving.knative.dev/release: devel + knative.dev/crd-install: "true" +spec: + group: networking.internal.knative.dev + versions: + - name: v1alpha1 + served: true + storage: true + names: + kind: ServerlessService + plural: serverlessservices + singular: serverlessservice + categories: + - knative-internal + - networking + shortNames: + - sks + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Mode + type: string + JSONPath: ".spec.mode" + - name: ServiceName + type: string + JSONPath: ".status.serviceName" + - name: PrivateServiceName + type: string + JSONPath: ".status.privateServiceName" + - name: Ready + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].status" + - name: Reason + type: string + JSONPath: ".status.conditions[?(@.type=='Ready')].reason" +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: config.webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: config.webhook.serving.knative.dev + namespaceSelector: + matchExpressions: + - key: serving.knative.dev/release + operator: Exists +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: webhook.serving.knative.dev +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.webhook.serving.knative.dev + labels: + serving.knative.dev/release: devel +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook + namespace: knative-serving + failurePolicy: Fail + sideEffects: None + name: validation.webhook.serving.knative.dev +--- +apiVersion: v1 +kind: Secret +metadata: + name: webhook-certs + namespace: knative-serving + labels: + serving.knative.dev/release: devel +--- +apiVersion: caching.internal.knative.dev/v1alpha1 +kind: Image +metadata: + name: queue-proxy + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + image: quay.io/openshift-knative/knative-serving-queue:v0.12.1 +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: activator + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + minReplicas: 1 + maxReplicas: 20 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: activator + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: activator + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: activator + role: activator + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: activator + role: activator + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: activator + image: quay.io/openshift-knative/knative-serving-activator:v0.12.1 + resources: + requests: + cpu: 300m + memory: 60Mi + limits: + cpu: 1000m + memory: 600Mi + env: + - name: GOGC + value: "500" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: http1 + containerPort: 8012 + - name: h2c + containerPort: 8013 + readinessProbe: &probe + httpGet: + port: 8012 + httpHeaders: + - name: k-kubelet-probe + value: "activator" + livenessProbe: *probe + terminationGracePeriodSeconds: 300 +--- +apiVersion: v1 +kind: Service +metadata: + name: activator-service + namespace: knative-serving + labels: + app: activator + serving.knative.dev/release: devel +spec: + selector: + app: activator + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: http + port: 80 + targetPort: 8012 + - name: http2 + port: 81 + targetPort: 8013 + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler-hpa + namespace: knative-serving + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/autoscaler-provider: hpa +spec: + selector: + matchLabels: + app: autoscaler-hpa + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: autoscaler-hpa + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: autoscaler-hpa + image: quay.io/openshift-knative/knative-serving-autoscaler-hpa:v0.12.1 + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler-hpa + serving.knative.dev/release: devel + autoscaling.knative.dev/autoscaler-provider: hpa + name: autoscaler-hpa + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: autoscaler-hpa +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + replicas: 1 + selector: + matchLabels: + app: autoscaler + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: autoscaler + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: autoscaler + image: quay.io/openshift-knative/knative-serving-autoscaler:v0.12.1 + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 + - name: websocket + containerPort: 8080 + - name: custom-metrics + containerPort: 8443 + readinessProbe: &probe + httpGet: + port: 8080 + httpHeaders: + - name: k-kubelet-probe + value: "autoscaler" + livenessProbe: *probe + args: + - "--secure-port=8443" + - "--cert-dir=/tmp" +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: autoscaler + serving.knative.dev/release: devel + name: autoscaler + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: http + port: 8080 + targetPort: 8080 + - name: https-custom-metrics + port: 443 + targetPort: 8443 + selector: + app: autoscaler +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + container-concurrency-target-percentage: "70" + container-concurrency-target-default: "100" + requests-per-second-target-default: "200" + target-burst-capacity: "200" + stable-window: "60s" + panic-window-percentage: "10.0" + panic-threshold-percentage: "200.0" + max-scale-up-rate: "1000.0" + max-scale-down-rate: "2.0" + enable-scale-to-zero: "true" + tick-interval: "2s" + scale-to-zero-grace-period: "30s" + enable-graceful-scaledown: "false" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-defaults + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + revision-timeout-seconds: "300" # 5 minutes + max-revision-timeout-seconds: "600" # 10 minutes + revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU) + revision-memory-request: "100M" # 100 megabytes of memory + revision-cpu-limit: "1000m" # 1 CPU (aka 1000 milli-CPU) + revision-memory-limit: "200M" # 200 megabytes of memory + container-name-template: "user-container" + container-concurrency: "0" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-deployment + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + queueSidecarImage: quay.io/openshift-knative/knative-serving-queue:v0.12.1 + _example: | + registriesSkippingTagResolving: "ko.local,dev.local" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-domain + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + example.com: | + example.org: | + selector: + app: nonprofit + svc.cluster.local: | + selector: + app: secret +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-gc + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + stale-revision-create-delay: "48h" + stale-revision-timeout: "15h" + stale-revision-minimum-generations: "20" + stale-revision-lastpinned-debounce: "5h" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-istio + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +data: + _example: | + gateway.knative-serving.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" + local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" + local-gateway.mesh: "mesh" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + zap-logger-config: | + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + loglevel.controller: "info" + loglevel.autoscaler: "info" + loglevel.queueproxy: "info" + loglevel.webhook: "info" + loglevel.activator: "info" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-network + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + istio.sidecar.includeOutboundIPRanges: "*" + ingress.class: "istio.ingress.networking.knative.dev" + certificate.class: "cert-manager.certificate.networking.internal.knative.dev" + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}" + tagTemplate: "{{.Tag}}-{{.Name}}" + autoTLS: "Disabled" + httpProtocol: "Enabled" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + logging.enable-var-log-collection: "false" + logging.revision-url-template: | + http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) + logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' + logging.enable-probe-request-log: "false" + metrics.backend-destination: prometheus + metrics.request-metrics-backend-destination: prometheus + metrics.stackdriver-project-id: "" + metrics.allow-stackdriver-custom-metrics: "false" + profiling.enable: "false" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-tracing + namespace: knative-serving + labels: + serving.knative.dev/release: devel +data: + _example: | + backend: "none" + zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + stackdriver-project-id: "my-project" + debug: "false" + sample-rate: "0.1" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: controller + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: controller + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: controller + image: quay.io/openshift-knative/knative-serving-controller:v0.12.1 + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1000m + memory: 1000Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/internal/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: controller + serving.knative.dev/release: devel + name: controller + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + selector: + app: controller +--- +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.custom.metrics.k8s.io + labels: + serving.knative.dev/release: devel + autoscaling.knative.dev/metric-provider: custom-metrics +spec: + service: + name: autoscaler + namespace: knative-serving + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: networking-istio + namespace: knative-serving + labels: + serving.knative.dev/release: devel + networking.knative.dev/ingress-provider: istio +spec: + selector: + matchLabels: + app: networking-istio + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + sidecar.istio.io/inject: "false" + labels: + app: networking-istio + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: networking-istio + image: quay.io/openshift-knative/knative-serving-istio:v0.12.1 + resources: + requests: + cpu: 30m + memory: 40Mi + limits: + cpu: 300m + memory: 400Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: webhook + namespace: knative-serving + labels: + serving.knative.dev/release: devel +spec: + selector: + matchLabels: + app: webhook + role: webhook + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: webhook + role: webhook + serving.knative.dev/release: devel + spec: + serviceAccountName: controller + containers: + - name: webhook + image: quay.io/openshift-knative/knative-serving-webhook:v0.12.1 + resources: + requests: + cpu: 20m + memory: 20Mi + limits: + cpu: 200m + memory: 200Mi + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/serving + securityContext: + allowPrivilegeEscalation: false + ports: + - name: metrics + containerPort: 9090 + - name: profiling + containerPort: 8008 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + role: webhook + serving.knative.dev/release: devel + name: webhook + namespace: knative-serving +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + role: webhook diff --git a/test/vendor/knative.dev/serving/openshift/release/resolve.sh b/test/vendor/knative.dev/serving/openshift/release/resolve.sh new file mode 100755 index 0000000000..9b838ce417 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/resolve.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +function resolve_resources(){ + local dir=$1 + local resolved_file_name=$2 + local image_prefix=$3 + local image_tag=$4 + + [[ -n $image_tag ]] && image_tag=":$image_tag" + + echo "Writing resolved yaml to $resolved_file_name" + + > "$resolved_file_name" + + for yaml in "$dir"/*.yaml; do + resolve_file "$yaml" "$resolved_file_name" "$image_prefix" "$image_tag" + done +} + +function resolve_file() { + local file=$1 + local to=$2 + local image_prefix=$3 + local image_tag=$4 + + # Skip cert-manager, it's not part of upstream's release YAML either. + if grep -q 'networking.knative.dev/certificate-provider: cert-manager' "$1"; then + return + fi + + # Skip nscert, it's not part of upstream's release YAML either. + if grep -q 'networking.knative.dev/wildcard-certificate-provider: nscert' "$1"; then + return + fi + + echo "---" >> "$to" + # 1. Rewrite image references + # 2. Update config map entry + # 3. Remove comment lines + # 4. Remove empty lines + sed -e "s+\(.* image: \)\(knative.dev\)\(.*/\)\(.*\)+\1${image_prefix}\4${image_tag}+g" \ + -e "s+\(.* queueSidecarImage: \)\(knative.dev\)\(.*/\)\(.*\)+\1${image_prefix}\4${image_tag}+g" \ + -e '/^[ \t]*#/d' \ + -e '/^[ \t]*$/d' \ + "$file" >> "$to" +} diff --git a/test/vendor/knative.dev/serving/openshift/release/update-to-head.sh b/test/vendor/knative.dev/serving/openshift/release/update-to-head.sh new file mode 100755 index 0000000000..a92fa5feb8 --- /dev/null +++ b/test/vendor/knative.dev/serving/openshift/release/update-to-head.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# Synchs the release-next branch to master and then triggers CI +# Usage: update-to-head.sh + +set -e +REPO_NAME=`basename $(git rev-parse --show-toplevel)` + +# Reset release-next to upstream/master. +git fetch upstream master +git checkout upstream/master -B release-next + +# Update openshift's master and take all needed files from there. +git fetch openshift master +git checkout openshift/master openshift OWNERS_ALIASES OWNERS Makefile content_sets.yml container.yaml +make generate-dockerfiles +make RELEASE=ci generate-release +git add openshift OWNERS_ALIASES OWNERS Makefile content_sets.yml container.yaml +git commit -m ":open_file_folder: Update openshift specific files." + +# Apply patches . +git apply openshift/patches/* +git commit -am ":fire: Apply carried patches." + +git push -f openshift release-next + +# Trigger CI +git checkout release-next -B release-next-ci +date > ci +git add ci +git commit -m ":robot: Triggering CI on branch 'release-next' after synching to upstream/master" +git push -f openshift release-next-ci + +if hash hub 2>/dev/null; then + hub pull-request --no-edit -l "kind/sync-fork-to-upstream" -b openshift/${REPO_NAME}:release-next -h openshift/${REPO_NAME}:release-next-ci +else + echo "hub (https://github.com/github/hub) is not installed, so you'll need to create a PR manually." +fi diff --git a/test/vendor/knative.dev/serving/pkg/activator/OWNERS b/test/vendor/knative.dev/serving/pkg/activator/OWNERS new file mode 100644 index 0000000000..4a5322417b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/OWNERS @@ -0,0 +1,13 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers +- networking-approvers + +reviewers: +- autoscaling-reviewers +- networking-reviewers + +labels: +- area/autoscale +- area/networking diff --git a/test/vendor/knative.dev/serving/pkg/activator/README.md b/test/vendor/knative.dev/serving/pkg/activator/README.md new file mode 100644 index 0000000000..5e01959022 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/README.md @@ -0,0 +1,11 @@ +# About the Activator + +The name _activator_ is actually a misnomer, since after Knative 0.2, the +activator no longer activates inactive Revisions. + +The only responsibilities of the activator are: + +- Receiving & buffering requests for inactive Revisions. +- Reporting metrics to the autoscaler. +- Retrying requests to a Revision after the autoscaler scales such Revision + based on the reported metrics. diff --git a/test/vendor/knative.dev/serving/pkg/activator/activator.go b/test/vendor/knative.dev/serving/pkg/activator/activator.go new file mode 100644 index 0000000000..4dcb201b2e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/activator.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activator + +const ( + // Name is the name of the component. + Name = "activator" + // RevisionHeaderName is the header key for revision name. + RevisionHeaderName = "Knative-Serving-Revision" + // RevisionHeaderNamespace is the header key for revision's namespace. + RevisionHeaderNamespace = "Knative-Serving-Namespace" +) diff --git a/test/vendor/github.com/knative/pkg/apis/doc.go b/test/vendor/knative.dev/serving/pkg/activator/config/doc.go similarity index 91% rename from test/vendor/github.com/knative/pkg/apis/doc.go rename to test/vendor/knative.dev/serving/pkg/activator/config/doc.go index 73ae0329ff..2977a19108 100644 --- a/test/vendor/github.com/knative/pkg/apis/doc.go +++ b/test/vendor/knative.dev/serving/pkg/activator/config/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Knative Authors +Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -package apis + +package config diff --git a/test/vendor/knative.dev/serving/pkg/activator/config/store.go b/test/vendor/knative.dev/serving/pkg/activator/config/store.go new file mode 100644 index 0000000000..c4a0629c1f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/config/store.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "net/http" + + "knative.dev/pkg/configmap" + tracingconfig "knative.dev/pkg/tracing/config" +) + +type cfgKey struct{} + +// Config is a configuration for the activator +type Config struct { + Tracing *tracingconfig.Config +} + +// FromContext obtains a Config injected into the passed context +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +func toContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// +k8s:deepcopy-gen=false +// Store loads/unloads our untyped configuration +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a configuration Store +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + return &Store{ + UntypedStore: configmap.NewUntypedStore( + "activator", + logger, + configmap.Constructors{ + tracingconfig.ConfigName: tracingconfig.NewTracingConfigFromConfigMap, + }, + onAfterStore..., + ), + } +} + +// ToContext stores the configuration Store in the passed context +func (s *Store) ToContext(ctx context.Context) context.Context { + return toContext(ctx, s.Load()) +} + +// Load creates a Config for this store +func (s *Store) Load() *Config { + return &Config{ + Tracing: s.UntypedLoad(tracingconfig.ConfigName).(*tracingconfig.Config).DeepCopy(), + } +} + +type storeMiddleware struct { + store *Store + next http.Handler +} + +// ServeHTTP injects Config in to the context of the http request r +func (mw *storeMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := mw.store.ToContext(r.Context()) + mw.next.ServeHTTP(w, r.WithContext(ctx)) +} + +// HTTPMiddleware is a middleware which stores the current config store in the request context +func (s *Store) HTTPMiddleware(next http.Handler) http.Handler { + return &storeMiddleware{ + store: s, + next: next, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/activator/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..dedf27d9ce --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/config/zz_generated.deepcopy.go @@ -0,0 +1,46 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + tracingconfig "knative.dev/pkg/tracing/config" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Tracing != nil { + in, out := &in.Tracing, &out.Tracing + *out = new(tracingconfig.Config) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter.go b/test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter.go new file mode 100644 index 0000000000..8759554c83 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter.go @@ -0,0 +1,136 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + "time" + + "go.uber.org/zap" + + "k8s.io/apimachinery/pkg/types" + + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/autoscaler" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +// ConcurrencyReporter reports stats based on incoming requests and ticks. +type ConcurrencyReporter struct { + logger *zap.SugaredLogger + podName string + + // Ticks with every request arrived/completed respectively + reqCh chan ReqEvent + // Ticks with every stat report request + reportCh <-chan time.Time + // Stat reporting channel + statCh chan []autoscaler.StatMessage + + rl servinglisters.RevisionLister + sr activator.StatsReporter +} + +// NewConcurrencyReporter creates a ConcurrencyReporter which listens to incoming +// ReqEvents on reqCh and ticks on reportCh and reports stats on statCh. +func NewConcurrencyReporter(ctx context.Context, podName string, + reqCh chan ReqEvent, reportCh <-chan time.Time, statCh chan []autoscaler.StatMessage, + sr activator.StatsReporter) *ConcurrencyReporter { + return &ConcurrencyReporter{ + logger: logging.FromContext(ctx), + podName: podName, + reqCh: reqCh, + reportCh: reportCh, + statCh: statCh, + rl: revisioninformer.Get(ctx).Lister(), + sr: sr, + } +} + +func (cr *ConcurrencyReporter) reportToMetricsBackend(key types.NamespacedName, concurrency int64) { + ns := key.Namespace + revName := key.Name + revision, err := cr.rl.Revisions(ns).Get(revName) + if err != nil { + cr.logger.Errorw("Error while getting revision", zap.Error(err)) + return + } + configurationName := revision.Labels[serving.ConfigurationLabelKey] + serviceName := revision.Labels[serving.ServiceLabelKey] + cr.sr.ReportRequestConcurrency(ns, serviceName, configurationName, revName, concurrency) +} + +// Run runs until stopCh is closed and processes events on all incoming channels +func (cr *ConcurrencyReporter) Run(stopCh <-chan struct{}) { + // Contains the number of in-flight requests per-key + outstandingRequestsPerKey := make(map[types.NamespacedName]int64) + // Contains the number of incoming requests in the current + // reporting period, per key. + incomingRequestsPerKey := make(map[types.NamespacedName]int64) + + for { + select { + case event := <-cr.reqCh: + switch event.EventType { + case ReqIn: + incomingRequestsPerKey[event.Key]++ + + // Report the first request for a key immediately. + if _, ok := outstandingRequestsPerKey[event.Key]; !ok { + cr.statCh <- []autoscaler.StatMessage{{ + Key: event.Key, + Stat: autoscaler.Stat{ + // Stat time is unset by design. The receiver will set the time. + PodName: cr.podName, + AverageConcurrentRequests: 1, + RequestCount: float64(incomingRequestsPerKey[event.Key]), + }, + }} + } + outstandingRequestsPerKey[event.Key]++ + case ReqOut: + outstandingRequestsPerKey[event.Key]-- + } + case <-cr.reportCh: + messages := make([]autoscaler.StatMessage, 0, len(outstandingRequestsPerKey)) + for key, concurrency := range outstandingRequestsPerKey { + if concurrency == 0 { + delete(outstandingRequestsPerKey, key) + } else { + messages = append(messages, autoscaler.StatMessage{ + Key: key, + Stat: autoscaler.Stat{ + // Stat time is unset by design. The receiver will set the time. + PodName: cr.podName, + AverageConcurrentRequests: float64(concurrency), + RequestCount: float64(incomingRequestsPerKey[key]), + }, + }) + } + cr.reportToMetricsBackend(key, concurrency) + } + cr.statCh <- messages + + incomingRequestsPerKey = make(map[types.NamespacedName]int64) + case <-stopCh: + return + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter_test.go new file mode 100644 index 0000000000..eda1e4181e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/concurrency_reporter_test.go @@ -0,0 +1,273 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "k8s.io/apimachinery/pkg/types" + + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + servingv1informers "knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" +) + +const ( + requestOpTick = "RequestOpTick" + requestOpStart = "RequestOpStart" + requestOpEnd = "RequestOpEnd" +) + +var ( + pod1 = types.NamespacedName{Namespace: "test", Name: "pod1"} + pod2 = types.NamespacedName{Namespace: "test", Name: "pod2"} + pod3 = types.NamespacedName{Namespace: "test", Name: "pod3"} +) + +type reqOp struct { + op string + key types.NamespacedName + time time.Time +} + +func TestStats(t *testing.T) { + tt := []struct { + name string + ops []reqOp + expectedStats []autoscaler.StatMessage + }{{ + name: "Scale-from-zero sends stat", + ops: []reqOp{{ + op: requestOpStart, + key: pod1, + }, { + op: requestOpStart, + key: pod2, + }}, + expectedStats: []autoscaler.StatMessage{{ + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod2, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, + }}, { + name: "Scale to two", + ops: []reqOp{{ + op: requestOpStart, + key: pod1, + }, { + op: requestOpStart, + key: pod1, + }, { + op: requestOpTick, + }}, + expectedStats: []autoscaler.StatMessage{{ + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 2, + RequestCount: 2, + PodName: "activator", + }}, + }}, { + name: "Scale-from-zero after tick sends stat", + ops: []reqOp{{ + op: requestOpStart, + key: pod1, + }, { + op: requestOpEnd, + key: pod1, + }, { + op: requestOpTick, + }, { + op: requestOpStart, + key: pod1, + }}, + expectedStats: []autoscaler.StatMessage{{ + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, + }}, { + name: "Multiple pods tick", + ops: []reqOp{{ + op: requestOpStart, + key: pod1, + }, { + op: requestOpStart, + key: pod2, + }, { + op: requestOpTick, + }, { + op: requestOpEnd, + key: pod1, + }, { + op: requestOpStart, + key: pod3, + }, { + op: requestOpTick, + }}, + expectedStats: []autoscaler.StatMessage{{ + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod2, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod1, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod2, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod3, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, { + Key: pod2, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 0, + PodName: "activator", + }}, { + Key: pod3, + Stat: autoscaler.Stat{ + AverageConcurrentRequests: 1, + RequestCount: 1, + PodName: "activator", + }}, + }}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + s, cr, ctx, cancel := newTestStats(t) + defer func() { + cancel() + }() + go func() { + cr.Run(ctx.Done()) + }() + + go func() { + // Apply request operations + for _, op := range tc.ops { + switch op.op { + case requestOpStart: + s.reqChan <- ReqEvent{Key: op.key, EventType: ReqIn} + case requestOpEnd: + s.reqChan <- ReqEvent{Key: op.key, EventType: ReqOut} + case requestOpTick: + s.reportBiChan <- op.time + } + } + }() + + // Gather reported stats + stats := make([]autoscaler.StatMessage, 0, len(tc.expectedStats)) + for len(stats) < len(tc.expectedStats) { + stats = append(stats, <-s.statChan...) + } + + // Check the stats we got match what we wanted + sorter := cmpopts.SortSlices(func(a, b autoscaler.StatMessage) bool { + return a.Key.Name < b.Key.Name + }) + if got, want := stats, tc.expectedStats; !cmp.Equal(got, want, sorter) { + t.Errorf("Unexpected stats (-want +got): %s", cmp.Diff(want, got, sorter)) + } + }) + } +} + +// Test type to hold the bi-directional time channels +type testStats struct { + reqChan chan ReqEvent + reportChan <-chan time.Time + statChan chan []autoscaler.StatMessage + reportBiChan chan time.Time +} + +func newTestStats(t *testing.T) (*testStats, *ConcurrencyReporter, context.Context, context.CancelFunc) { + reportBiChan := make(chan time.Time) + ts := &testStats{ + reqChan: make(chan ReqEvent), + reportChan: (<-chan time.Time)(reportBiChan), + statChan: make(chan []autoscaler.StatMessage), + reportBiChan: reportBiChan, + } + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + revisionInformer(ctx, revision(testNamespace, testRevName)) + + cr := NewConcurrencyReporter(ctx, "activator", + ts.reqChan, ts.reportChan, ts.statChan, &fakeReporter{}) + return ts, cr, ctx, cancel +} + +func revisionInformer(ctx context.Context, revs ...*v1alpha1.Revision) servingv1informers.RevisionInformer { + fake := fakeservingclient.Get(ctx) + revisions := fakerevisioninformer.Get(ctx) + + for _, rev := range revs { + fake.ServingV1alpha1().Revisions(rev.Namespace).Create(rev) + revisions.Informer().GetIndexer().Add(rev) + } + + return revisions +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/context_handler.go b/test/vendor/knative.dev/serving/pkg/activator/handler/context_handler.go new file mode 100644 index 0000000000..3a354c05fb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/context_handler.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + "fmt" + "net/http" + + "go.uber.org/zap" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +type revisionKey struct{} +type revIDKey struct{} + +// NewContextHandler creates a handler that extracts the necessary context from the request +// and makes it available on the request's context. +func NewContextHandler(ctx context.Context, next http.Handler) http.Handler { + return &contextHandler{ + nextHandler: next, + revisionLister: revisioninformer.Get(ctx).Lister(), + logger: logging.FromContext(ctx), + } +} + +// contextHandler enriches the request's context with structured data. +type contextHandler struct { + revisionLister servinglisters.RevisionLister + logger *zap.SugaredLogger + nextHandler http.Handler +} + +func (h *contextHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + namespace := r.Header.Get(activator.RevisionHeaderNamespace) + name := r.Header.Get(activator.RevisionHeaderName) + revID := types.NamespacedName{Namespace: namespace, Name: name} + logger := h.logger.With(zap.String(logkey.Key, revID.String())) + + revision, err := h.revisionLister.Revisions(namespace).Get(name) + if err != nil { + logger.Errorw("Error while getting revision", zap.Error(err)) + sendError(err, w) + return + } + + ctx := r.Context() + ctx = logging.WithLogger(ctx, logger) + ctx = context.WithValue(ctx, revisionKey{}, revision) + ctx = context.WithValue(ctx, revIDKey{}, revID) + + h.nextHandler.ServeHTTP(w, r.WithContext(ctx)) +} + +func withRevision(ctx context.Context, rev *v1alpha1.Revision) context.Context { + return context.WithValue(ctx, revisionKey{}, rev) +} + +func revisionFrom(ctx context.Context) *v1alpha1.Revision { + return ctx.Value(revisionKey{}).(*v1alpha1.Revision) +} + +func withRevID(ctx context.Context, revID types.NamespacedName) context.Context { + return context.WithValue(ctx, revIDKey{}, revID) +} + +func revIDFrom(ctx context.Context) types.NamespacedName { + return ctx.Value(revIDKey{}).(types.NamespacedName) +} + +func sendError(err error, w http.ResponseWriter) { + msg := fmt.Sprintf("Error getting active endpoint: %v", err) + if k8serrors.IsNotFound(err) { + http.Error(w, msg, http.StatusNotFound) + return + } + http.Error(w, msg, http.StatusInternalServerError) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/context_handler_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/context_handler_test.go new file mode 100644 index 0000000000..74fce6fe88 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/context_handler_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package handler + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "k8s.io/apimachinery/pkg/types" + + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/serving/pkg/activator" +) + +func TestContextHandler(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + revID := types.NamespacedName{Namespace: testNamespace, Name: testRevName} + revision := revision(revID.Namespace, revID.Name) + revisionInformer(ctx, revision) + + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got := revisionFrom(r.Context()); got != revision { + t.Errorf("revisionFrom() = %v, want %v", got, revision) + } + + if got := revIDFrom(r.Context()); got != revID { + t.Errorf("revIDFrom() = %v, want %v", got, revID) + } + }) + + handler := NewContextHandler(ctx, baseHandler) + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("")) + req.Header.Set(activator.RevisionHeaderNamespace, revID.Namespace) + req.Header.Set(activator.RevisionHeaderName, revID.Name) + handler.ServeHTTP(resp, req) + + if got, want := resp.Code, http.StatusOK; got != want { + t.Errorf("StatusCode = %d, want %d, body: %s", got, want, resp.Body.String()) + } +} + +func TestContextHandlerError(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + revID := types.NamespacedName{Namespace: testNamespace, Name: testRevName} + revision := revision(revID.Namespace, revID.Name) + revisionInformer(ctx, revision) + + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + + handler := NewContextHandler(ctx, baseHandler) + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("")) + req.Header.Set(activator.RevisionHeaderNamespace, "foospace") + req.Header.Set(activator.RevisionHeaderName, "fooname") + handler.ServeHTTP(resp, req) + + if got, want := resp.Code, http.StatusNotFound; got != want { + t.Errorf("StatusCode = %d, want %d", got, want) + } + + if got, want := resp.Body.String(), errMsg(`revision.serving.knative.dev "fooname" not found`); got != want { + t.Errorf("Body = %q, want %q", got, want) + } +} + +func errMsg(msg string) string { + return fmt.Sprintf("Error getting active endpoint: %s\n", msg) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/handler.go b/test/vendor/knative.dev/serving/pkg/activator/handler/handler.go new file mode 100644 index 0000000000..55f137855f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/handler.go @@ -0,0 +1,144 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" + "go.uber.org/zap" + + "knative.dev/pkg/logging" + pkgnet "knative.dev/pkg/network" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/serving/pkg/activator" + activatorconfig "knative.dev/serving/pkg/activator/config" + activatornet "knative.dev/serving/pkg/activator/net" + "knative.dev/serving/pkg/activator/util" + "knative.dev/serving/pkg/apis/serving" + pkghttp "knative.dev/serving/pkg/http" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" + + "k8s.io/apimachinery/pkg/types" +) + +// Throttler is the interface that Handler calls to Try to proxy the user request. +type Throttler interface { + Try(context.Context, types.NamespacedName, func(string) error) error +} + +// activationHandler will wait for an active endpoint for a revision +// to be available before proxing the request +type activationHandler struct { + transport http.RoundTripper + tracingTransport http.RoundTripper + reporter activator.StatsReporter + throttler Throttler + bufferPool httputil.BufferPool +} + +// The default time we'll try to probe the revision for activation. +const defaulTimeout = 2 * time.Minute + +// New constructs a new http.Handler that deals with revision activation. +func New(ctx context.Context, t Throttler, sr activator.StatsReporter) http.Handler { + defaultTransport := pkgnet.AutoTransport + return &activationHandler{ + transport: defaultTransport, + tracingTransport: &ochttp.Transport{Base: defaultTransport}, + reporter: sr, + throttler: t, + bufferPool: network.NewBufferPool(), + } +} + +func (a *activationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + revID := revIDFrom(r.Context()) + logger := logging.FromContext(r.Context()) + tracingEnabled := activatorconfig.FromContext(r.Context()).Tracing.Backend != tracingconfig.None + + tryContext, trySpan := r.Context(), (*trace.Span)(nil) + if tracingEnabled { + tryContext, trySpan = trace.StartSpan(r.Context(), "throttler_try") + } + tryContext, cancel := context.WithTimeout(tryContext, defaulTimeout) + defer cancel() + + err := a.throttler.Try(tryContext, revID, func(dest string) error { + trySpan.End() + + proxyCtx, proxySpan := r.Context(), (*trace.Span)(nil) + if tracingEnabled { + proxyCtx, proxySpan = trace.StartSpan(r.Context(), "proxy") + } + httpStatus := a.proxyRequest(logger, w, r.WithContext(proxyCtx), &url.URL{ + Scheme: "http", + Host: dest, + }, tracingEnabled) + proxySpan.End() + + revision := revisionFrom(r.Context()) + configurationName := revision.Labels[serving.ConfigurationLabelKey] + serviceName := revision.Labels[serving.ServiceLabelKey] + // Do not report response time here. It is reported in pkg/activator/metric_handler.go to + // sum up all time spent on multiple handlers. + a.reporter.ReportRequestCount(revID.Namespace, serviceName, configurationName, revID.Name, httpStatus, 1) + + return nil + }) + if err != nil { + // Set error on our capacity waiting span and end it + trySpan.Annotate([]trace.Attribute{trace.StringAttribute("activator.throttler.error", err.Error())}, "ThrottlerTry") + trySpan.End() + + logger.Errorw("Throttler try error", zap.Error(err)) + + switch err { + case activatornet.ErrActivatorOverload, context.DeadlineExceeded, queue.ErrRequestQueueFull: + http.Error(w, err.Error(), http.StatusServiceUnavailable) + default: + w.WriteHeader(http.StatusInternalServerError) + } + } +} + +func (a *activationHandler) proxyRequest(logger *zap.SugaredLogger, w http.ResponseWriter, r *http.Request, target *url.URL, tracingEnabled bool) int { + network.RewriteHostIn(r) + + // Setup the reverse proxy. + proxy := httputil.NewSingleHostReverseProxy(target) + proxy.BufferPool = a.bufferPool + proxy.Transport = a.transport + if tracingEnabled { + proxy.Transport = a.tracingTransport + } + proxy.FlushInterval = -1 + proxy.ErrorHandler = pkgnet.ErrorHandler(logger) + + r.Header.Set(network.ProxyHeaderName, activator.Name) + + util.SetupHeaderPruning(proxy) + + recorder := pkghttp.NewResponseRecorder(w, http.StatusOK) + proxy.ServeHTTP(recorder, r) + return recorder.ResponseCode +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/handler_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/handler_test.go new file mode 100644 index 0000000000..5c7c522ad2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/handler_test.go @@ -0,0 +1,509 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "net/http/httptest" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.opencensus.io/plugin/ochttp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/ptr" + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/pkg/tracing" + tracingconfig "knative.dev/pkg/tracing/config" + tracetesting "knative.dev/pkg/tracing/testing" + "knative.dev/serving/pkg/activator" + activatorconfig "knative.dev/serving/pkg/activator/config" + anet "knative.dev/serving/pkg/activator/net" + activatortest "knative.dev/serving/pkg/activator/testing" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/network" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + . "knative.dev/pkg/configmap/testing" + "knative.dev/pkg/logging" + _ "knative.dev/pkg/system/testing" +) + +const ( + wantBody = "♫ everything is awesome! ♫" + testNamespace = "real-namespace" + testRevName = "real-name" +) + +type fakeThrottler struct { + err error +} + +func (ft fakeThrottler) Try(ctx context.Context, _ types.NamespacedName, f func(string) error) error { + if ft.err != nil { + return ft.err + } + return f("10.10.10.10:1234") +} + +func TestActivationHandler(t *testing.T) { + tests := []struct { + name string + wantBody string + wantCode int + wantErr error + probeErr error + probeCode int + probeResp []string + throttler Throttler + reporterCalls []reporterCall + }{{ + name: "active endpoint", + wantBody: wantBody, + wantCode: http.StatusOK, + wantErr: nil, + throttler: fakeThrottler{}, + reporterCalls: []reporterCall{{ + Op: "ReportRequestCount", + Namespace: testNamespace, + Revision: testRevName, + Service: "service-real-name", + Config: "config-real-name", + StatusCode: http.StatusOK, + Attempts: 1, + Value: 1, + }}, + }, { + name: "request error", + wantBody: "request error\n", + wantCode: http.StatusBadGateway, + wantErr: errors.New("request error"), + throttler: fakeThrottler{}, + reporterCalls: []reporterCall{{ + Op: "ReportRequestCount", + Namespace: testNamespace, + Revision: testRevName, + Service: "service-real-name", + Config: "config-real-name", + StatusCode: http.StatusBadGateway, + Attempts: 1, + Value: 1, + }}, + }, { + name: "throttler timeout", + wantBody: context.DeadlineExceeded.Error() + "\n", + wantCode: http.StatusServiceUnavailable, + wantErr: nil, + throttler: fakeThrottler{err: context.DeadlineExceeded}, + reporterCalls: nil, + }, { + name: "overflow", + wantBody: "activator overload\n", + wantCode: http.StatusServiceUnavailable, + wantErr: nil, + throttler: fakeThrottler{err: anet.ErrActivatorOverload}, + reporterCalls: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + probeResponses := make([]activatortest.FakeResponse, len(test.probeResp)) + for i := 0; i < len(test.probeResp); i++ { + probeResponses[i] = activatortest.FakeResponse{ + Err: test.probeErr, + Code: test.probeCode, + Body: test.probeResp[i], + } + } + fakeRT := activatortest.FakeRoundTripper{ + ExpectHost: "test-host", + ProbeResponses: probeResponses, + RequestResponse: &activatortest.FakeResponse{ + Err: test.wantErr, + Code: test.wantCode, + Body: test.wantBody, + }, + } + rt := pkgnet.RoundTripperFunc(fakeRT.RT) + + reporter := &fakeReporter{} + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + handler := (New(ctx, test.throttler, reporter)).(*activationHandler) + + // Setup transports. + handler.transport = rt + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + req.Host = "test-host" + + // Set up config store to populate context. + configStore := setupConfigStore(t, logging.FromContext(ctx)) + ctx = configStore.ToContext(ctx) + ctx = withRevision(ctx, revision(testNamespace, testRevName)) + ctx = withRevID(ctx, types.NamespacedName{Namespace: testNamespace, Name: testRevName}) + + handler.ServeHTTP(resp, req.WithContext(ctx)) + + if resp.Code != test.wantCode { + t.Fatalf("Unexpected response status. Want %d, got %d", test.wantCode, resp.Code) + } + + gotBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("Error reading body: %v", err) + } + if string(gotBody) != test.wantBody { + t.Errorf("Unexpected response body. Response body %q, want %q", gotBody, test.wantBody) + } + + // Filter out response time reporter calls + var gotCalls []reporterCall + if reporter.calls != nil { + gotCalls = make([]reporterCall, 0) + for _, gotCall := range reporter.calls { + if gotCall.Op != "ReportResponseTime" { + gotCalls = append(gotCalls, gotCall) + } + } + } + + if diff := cmp.Diff(test.reporterCalls, gotCalls); diff != "" { + t.Errorf("Reporting calls are different (-want, +got) = %v", diff) + } + }) + } +} + +func TestActivationHandlerProxyHeader(t *testing.T) { + interceptCh := make(chan *http.Request, 1) + rt := pkgnet.RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + interceptCh <- r + fake := httptest.NewRecorder() + return fake.Result(), nil + }) + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + + handler := (New(ctx, fakeThrottler{}, &fakeReporter{})).(*activationHandler) + handler.transport = rt + + writer := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + + // Set up config store to populate context. + configStore := setupConfigStore(t, logging.FromContext(ctx)) + ctx = configStore.ToContext(req.Context()) + ctx = withRevision(ctx, revision(testNamespace, testRevName)) + ctx = withRevID(ctx, types.NamespacedName{Namespace: testNamespace, Name: testRevName}) + handler.ServeHTTP(writer, req.WithContext(ctx)) + + select { + case httpReq := <-interceptCh: + if got := httpReq.Header.Get(network.ProxyHeaderName); got != activator.Name { + t.Errorf("Header %q = %q, want: %q", network.ProxyHeaderName, got, activator.Name) + } + case <-time.After(1 * time.Second): + t.Fatal("Timed out waiting for a request to be intercepted") + } +} + +func TestActivationHandlerTraceSpans(t *testing.T) { + testcases := []struct { + name string + wantSpans int + traceBackend tracingconfig.BackendType + }{{ + name: "zipkin trace enabled", + wantSpans: 3, + traceBackend: tracingconfig.Zipkin, + }, { + name: "trace disabled", + wantSpans: 0, + traceBackend: tracingconfig.None, + }} + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // Setup transport + fakeRT := activatortest.FakeRoundTripper{ + RequestResponse: &activatortest.FakeResponse{ + Err: nil, + Code: http.StatusOK, + Body: wantBody, + }, + } + rt := pkgnet.RoundTripperFunc(fakeRT.RT) + + // Create tracer with reporter recorder + reporter, co := tracetesting.FakeZipkinExporter() + oct := tracing.NewOpenCensusTracer(co) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-tracing", + }, + Data: map[string]string{ + "zipkin-endpoint": "localhost:1234", + "backend": string(tc.traceBackend), + "debug": "true", + }, + } + cfg, err := tracingconfig.NewTracingConfigFromConfigMap(cm) + if err != nil { + t.Fatalf("Failed to generate config: %v", err) + } + if err := oct.ApplyConfig(cfg); err != nil { + t.Errorf("Failed to apply tracer config: %v", err) + } + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer func() { + cancel() + reporter.Close() + oct.Finish() + }() + + handler := (New(ctx, fakeThrottler{}, &fakeReporter{})).(*activationHandler) + handler.transport = rt + handler.tracingTransport = &ochttp.Transport{Base: rt} + + // Set up config store to populate context. + configStore := setupConfigStore(t, logging.FromContext(ctx)) + // Update the store with our "new" config explicitly. + configStore.OnConfigChanged(cm) + sendRequest(testNamespace, testRevName, handler, configStore) + + gotSpans := reporter.Flush() + if len(gotSpans) != tc.wantSpans { + t.Errorf("Got %d spans, expected %d", len(gotSpans), tc.wantSpans) + } + + spanNames := []string{"throttler_try", "/", "proxy"} + for i, spanName := range spanNames[0:tc.wantSpans] { + if gotSpans[i].Name != spanName { + t.Errorf("Got span %d named %q, expected %q", i, gotSpans[i].Name, spanName) + } + } + }) + } +} + +func sendRequest(namespace, revName string, handler *activationHandler, store *activatorconfig.Store) *httptest.ResponseRecorder { + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + ctx := store.ToContext(req.Context()) + ctx = withRevision(ctx, revision(namespace, revName)) + ctx = withRevID(ctx, types.NamespacedName{Namespace: namespace, Name: revName}) + handler.ServeHTTP(resp, req.WithContext(ctx)) + return resp +} + +type reporterCall struct { + Op string + Namespace string + Service string + Config string + Revision string + StatusCode int + Attempts int + Value int64 + Duration time.Duration +} + +type fakeReporter struct { + calls []reporterCall + mux sync.Mutex +} + +func (f *fakeReporter) ReportRequestConcurrency(ns, service, config, rev string, v int64) error { + f.mux.Lock() + defer f.mux.Unlock() + f.calls = append(f.calls, reporterCall{ + Op: "ReportRequestConcurrency", + Namespace: ns, + Service: service, + Config: config, + Revision: rev, + Value: v, + }) + + return nil +} + +func (f *fakeReporter) ReportRequestCount(ns, service, config, rev string, responseCode, numTries int) error { + f.mux.Lock() + defer f.mux.Unlock() + f.calls = append(f.calls, reporterCall{ + Op: "ReportRequestCount", + Namespace: ns, + Service: service, + Config: config, + Revision: rev, + StatusCode: responseCode, + Attempts: numTries, + Value: 1, + }) + + return nil +} + +func (f *fakeReporter) ReportResponseTime(ns, service, config, rev string, responseCode int, d time.Duration) error { + f.mux.Lock() + defer f.mux.Unlock() + f.calls = append(f.calls, reporterCall{ + Op: "ReportResponseTime", + Namespace: ns, + Service: service, + Config: config, + Revision: rev, + StatusCode: responseCode, + Duration: d, + }) + + return nil +} + +func revision(namespace, name string) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + serving.ConfigurationLabelKey: "config-" + testRevName, + serving.ServiceLabelKey: "service-" + testRevName, + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + } +} + +func setupConfigStore(t *testing.T, logger *zap.SugaredLogger) *activatorconfig.Store { + configStore := activatorconfig.NewStore(logger) + tracingConfig := ConfigMapFromTestFile(t, tracingconfig.ConfigName) + configStore.OnConfigChanged(tracingConfig) + return configStore +} + +func BenchmarkHandler(b *testing.B) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(&testing.T{}) + defer cancel() + configStore := setupConfigStore(&testing.T{}, logging.FromContext(ctx)) + + // bodyLength is in kilobytes. + for _, bodyLength := range [5]int{2, 16, 32, 64, 128} { + body := []byte(randomString(1024 * bodyLength)) + + rt := pkgnet.RoundTripperFunc(func(*http.Request) (*http.Response, error) { + return &http.Response{ + Body: ioutil.NopCloser(bytes.NewReader(body)), + StatusCode: http.StatusOK, + }, nil + }) + + handler := (New(ctx, fakeThrottler{}, &fakeReporter{})).(*activationHandler) + handler.transport = rt + + request := func() *http.Request { + req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) + req.Host = "test-host" + + reqCtx := configStore.ToContext(context.Background()) + reqCtx = withRevision(reqCtx, revision(testNamespace, testRevName)) + reqCtx = withRevID(reqCtx, types.NamespacedName{Namespace: testNamespace, Name: testRevName}) + return req.WithContext(reqCtx) + } + + test := func(req *http.Request, b *testing.B) { + resp := &responseRecorder{} + handler.ServeHTTP(resp, req) + if resp.code != http.StatusOK { + b.Fatalf("resp.Code = %d, want: StatusOK(200)", resp.code) + } + if got, want := resp.size, int32(len(body)); got != want { + b.Fatalf("|body| = %d, want = %d", got, want) + } + } + + b.Run(fmt.Sprintf("%03dk-resp-len-sequential", bodyLength), func(b *testing.B) { + req := request() + for j := 0; j < b.N; j++ { + test(req, b) + } + }) + + b.Run(fmt.Sprintf("%03dk-resp-len-parallel", bodyLength), func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + req := request() + for pb.Next() { + test(req, b) + } + }) + }) + } +} + +func randomString(n int) string { + var letter = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + b := make([]rune, n) + for i := range b { + b[i] = letter[rand.Intn(len(letter))] + } + return string(b) +} + +// responseRecorder is an implementation of http.ResponseWriter and http.Flusher +// that captures the response code and size. +type responseRecorder struct { + code int + size int32 +} + +func (rr *responseRecorder) Flush() {} + +func (rr *responseRecorder) Header() http.Header { + return http.Header{} +} + +func (rr *responseRecorder) Write(p []byte) (int, error) { + atomic.AddInt32(&rr.size, int32(len(p))) + return ioutil.Discard.Write(p) +} + +func (rr *responseRecorder) WriteHeader(code int) { + rr.code = code +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler.go b/test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler.go new file mode 100644 index 0000000000..67a3652985 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "net/http" + + "go.uber.org/zap" + "knative.dev/serving/pkg/network" +) + +// HealthHandler handles responding to kubelet probes with a provided health check. +type HealthHandler struct { + HealthCheck func() error + NextHandler http.Handler + Logger *zap.SugaredLogger +} + +func (h *HealthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if network.IsKubeletProbe(r) { + if err := h.HealthCheck(); err != nil { + h.Logger.Warnf("Healthcheck failed: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + } else { + w.WriteHeader(http.StatusOK) + } + return + } + + h.NextHandler.ServeHTTP(w, r) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler_test.go new file mode 100644 index 0000000000..dec99c2637 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/healthz_handler_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package handler + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + ktesting "knative.dev/pkg/logging/testing" +) + +func TestHealthHandler(t *testing.T) { + logger := ktesting.TestLogger(t) + examples := []struct { + name string + headers http.Header + passed bool + expectedStatus int + check func() error + }{{ + name: "forward non-kubelet request", + headers: mapToHeader(map[string]string{"User-Agent": "chromium/734.6.5"}), + passed: true, + expectedStatus: http.StatusOK, + }, { + name: "kubelet probe success", + headers: mapToHeader(map[string]string{"User-Agent": "kube-probe/something"}), + passed: false, + expectedStatus: http.StatusOK, + check: func() error { return nil }, + }, { + name: "kubelet probe failure", + headers: mapToHeader(map[string]string{"User-Agent": "kube-probe/something"}), + passed: false, + expectedStatus: http.StatusInternalServerError, + check: func() error { return errors.New("not ready") }, + }} + + for _, e := range examples { + t.Run(e.name, func(t *testing.T) { + wasPassed := false + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wasPassed = true + w.WriteHeader(http.StatusOK) + }) + handler := HealthHandler{HealthCheck: e.check, NextHandler: baseHandler, Logger: logger} + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) + req.Header = e.headers + + handler.ServeHTTP(resp, req) + + if wasPassed != e.passed { + if !e.passed { + t.Error("Request got passed to the next handler unexpectedly") + } else { + t.Error("Request was not passed to the next handler as expected") + } + } + + if resp.Code != e.expectedStatus { + t.Errorf("Unexpected response status. Want %d, got %d", e.expectedStatus, resp.Code) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler.go b/test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler.go new file mode 100644 index 0000000000..19ce520127 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "context" + "net/http" + "time" + + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/serving" + pkghttp "knative.dev/serving/pkg/http" +) + +// NewMetricHandler creates a handler collects and reports request metrics +func NewMetricHandler(ctx context.Context, r activator.StatsReporter, next http.Handler) *MetricHandler { + handler := &MetricHandler{ + nextHandler: next, + reporter: r, + } + + return handler +} + +// MetricHandler sends metrics via reporter +type MetricHandler struct { + reporter activator.StatsReporter + nextHandler http.Handler +} + +func (h *MetricHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + revID := revIDFrom(r.Context()) + revision := revisionFrom(r.Context()) + configurationName := revision.Labels[serving.ConfigurationLabelKey] + serviceName := revision.Labels[serving.ServiceLabelKey] + start := time.Now() + + rr := pkghttp.NewResponseRecorder(w, http.StatusOK) + defer func() { + err := recover() + latency := time.Since(start) + if err != nil { + h.reporter.ReportResponseTime(revID.Namespace, serviceName, configurationName, revID.Name, http.StatusInternalServerError, latency) + panic(err) + } + h.reporter.ReportResponseTime(revID.Namespace, serviceName, configurationName, revID.Name, rr.ResponseCode, latency) + }() + + h.nextHandler.ServeHTTP(rr, r) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler_test.go new file mode 100644 index 0000000000..adce5cc10d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/metric_handler_test.go @@ -0,0 +1,118 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "bytes" + "context" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/types" + + rtesting "knative.dev/pkg/reconciler/testing" +) + +var ignoreDurationOption = cmpopts.IgnoreFields(reporterCall{}, "Duration") + +func TestRequestMetricHandler(t *testing.T) { + testNamespace := "real-namespace" + testRevName := "real-name" + + tests := []struct { + label string + baseHandler http.HandlerFunc + reporterCalls []reporterCall + newHeader map[string]string + wantCode int + wantPanic bool + }{ + { + label: "normal response", + baseHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }), + reporterCalls: []reporterCall{{ + Op: "ReportResponseTime", + Namespace: testNamespace, + Revision: testRevName, + Service: "service-real-name", + Config: "config-real-name", + StatusCode: http.StatusOK, + }}, + wantCode: http.StatusOK, + }, + { + label: "panic response", + baseHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + panic(errors.New("handler error")) + }), + reporterCalls: []reporterCall{{ + Op: "ReportResponseTime", + Namespace: testNamespace, + Revision: testRevName, + Service: "service-real-name", + Config: "config-real-name", + StatusCode: http.StatusInternalServerError, + }}, + wantCode: http.StatusBadRequest, + wantPanic: true, + }, + } + + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer func() { + cancel() + }() + reporter := &fakeReporter{} + handler := NewMetricHandler(ctx, reporter, test.baseHandler) + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("")) + if test.newHeader != nil && len(test.newHeader) != 0 { + for k, v := range test.newHeader { + req.Header.Add(k, v) + } + } + + defer func() { + err := recover() + if test.wantPanic && err == nil { + t.Error("Want ServeHTTP to panic, got nothing.") + } + + if resp.Code != test.wantCode { + t.Errorf("Response Status = %d, want: %d", resp.Code, test.wantCode) + } + if got, want := reporter.calls, test.reporterCalls; !cmp.Equal(got, want, ignoreDurationOption) { + t.Errorf("Reporting calls are different (-want, +got) = %s", cmp.Diff(want, got, ignoreDurationOption)) + } + }() + + reqCtx := withRevision(context.Background(), revision(testNamespace, testRevName)) + reqCtx = withRevID(reqCtx, types.NamespacedName{Namespace: testNamespace, Name: testRevName}) + handler.ServeHTTP(resp, req.WithContext(reqCtx)) + }) + } + +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler.go b/test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler.go new file mode 100644 index 0000000000..c0a341315f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "fmt" + "net/http" + + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/network" +) + +// ProbeHandler handles responding to Knative internal network probes. +type ProbeHandler struct { + NextHandler http.Handler +} + +func (h *ProbeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // If this header is set the request was sent by a Knative component + // probing the network, respond with a 200 and our component name. + if val := r.Header.Get(network.ProbeHeaderName); val != "" { + if val != activator.Name { + http.Error(w, fmt.Sprintf("unexpected probe header value: %q", val), http.StatusBadRequest) + return + } + w.Write([]byte(activator.Name)) + return + } + + h.NextHandler.ServeHTTP(w, r) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler_test.go new file mode 100644 index 0000000000..979cdae286 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/probe_handler_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package handler + +import ( + "net/http" + "net/http/httptest" + "testing" + + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" +) + +func TestProbeHandler(t *testing.T) { + examples := []struct { + label string + headers http.Header + passed bool + expectedStatus int + method string + }{{ + label: "forward a normal POST request", + headers: http.Header{}, + passed: true, + expectedStatus: http.StatusOK, + method: http.MethodPost, + }, { + label: "filter a POST request containing probe header, even if probe is for a different target", + headers: mapToHeader(map[string]string{network.ProbeHeaderName: queue.Name}), + passed: false, + expectedStatus: http.StatusBadRequest, + method: http.MethodPost, + }, { + label: "filter a POST request containing probe header", + headers: mapToHeader(map[string]string{network.ProbeHeaderName: activator.Name}), + passed: false, + expectedStatus: http.StatusOK, + method: http.MethodPost, + }, { + label: "forward a normal GET request", + headers: http.Header{}, + passed: true, + expectedStatus: http.StatusOK, + method: http.MethodGet, + }, { + label: "filter a GET request containing probe header, with wrong target system", + headers: mapToHeader(map[string]string{network.ProbeHeaderName: "not-empty"}), + passed: false, + expectedStatus: http.StatusBadRequest, + method: http.MethodGet, + }, { + label: "filter a GET request containing probe header", + headers: mapToHeader(map[string]string{network.ProbeHeaderName: activator.Name}), + passed: false, + expectedStatus: http.StatusOK, + method: http.MethodGet, + }, { + label: "forward a request containing empty retry header", + headers: mapToHeader(map[string]string{network.ProbeHeaderName: ""}), + passed: true, + expectedStatus: http.StatusOK, + method: http.MethodPost, + }} + + for _, e := range examples { + t.Run(e.label, func(t *testing.T) { + wasPassed := false + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wasPassed = true + w.WriteHeader(http.StatusOK) + }) + handler := ProbeHandler{NextHandler: baseHandler} + + resp := httptest.NewRecorder() + req := httptest.NewRequest(e.method, "http://example.com", nil) + req.Header = e.headers + + handler.ServeHTTP(resp, req) + + if wasPassed != e.passed { + if !e.passed { + t.Error("Request got passed to the next handler unexpectedly") + } else { + t.Error("Request was not passed to the next handler as expected") + } + } + + if resp.Code != e.expectedStatus { + t.Errorf("Unexpected response status. Want %d, got %d", e.expectedStatus, resp.Code) + } + }) + } +} + +func mapToHeader(m map[string]string) http.Header { + h := http.Header{} + for k, v := range m { + h.Add(k, v) + } + return h +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler.go b/test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler.go new file mode 100644 index 0000000000..2bcd77befe --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "net/http" + + "k8s.io/apimachinery/pkg/types" +) + +// ReqEvent represents an incoming/finished request with a given key +type ReqEvent struct { + Key types.NamespacedName + EventType ReqEventType +} + +// ReqEventType specifies the type of event (In/Out) +type ReqEventType int + +const ( + // ReqIn represents an incoming request + ReqIn ReqEventType = iota + // ReqOut represents a finished request + ReqOut +) + +// NewRequestEventHandler creates a handler that sends events +// about incoming/closed http connections to the given channel. +func NewRequestEventHandler(reqChan chan ReqEvent, next http.Handler) *RequestEventHandler { + handler := &RequestEventHandler{ + nextHandler: next, + ReqChan: reqChan, + } + + return handler +} + +// RequestEventHandler sends events to the given channel. +type RequestEventHandler struct { + nextHandler http.Handler + ReqChan chan ReqEvent +} + +func (h *RequestEventHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + revisionKey := revIDFrom(r.Context()) + + h.ReqChan <- ReqEvent{Key: revisionKey, EventType: ReqIn} + defer func() { + h.ReqChan <- ReqEvent{Key: revisionKey, EventType: ReqOut} + }() + h.nextHandler.ServeHTTP(w, r) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler_test.go b/test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler_test.go new file mode 100644 index 0000000000..e326798bde --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/requestevent_handler_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package handler + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/types" +) + +func TestRequestEventHandler(t *testing.T) { + namespace := "testspace" + revision := "testrevision" + + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + reqChan := make(chan ReqEvent, 2) + handler := NewRequestEventHandler(reqChan, baseHandler) + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("")) + ctx := withRevID(context.Background(), types.NamespacedName{Namespace: namespace, Name: revision}) + handler.ServeHTTP(resp, req.WithContext(ctx)) + + in := <-handler.ReqChan + wantIn := ReqEvent{ + Key: types.NamespacedName{Namespace: namespace, Name: revision}, + EventType: ReqIn, + } + + if !cmp.Equal(wantIn, in) { + t.Errorf("Unexpected event (-want +got): %s", cmp.Diff(wantIn, in)) + } + + out := <-handler.ReqChan + wantOut := ReqEvent{ + Key: wantIn.Key, + EventType: ReqOut, + } + + if !cmp.Equal(wantOut, out) { + t.Errorf("Unexpected event (-want +got): %s", cmp.Diff(wantOut, out)) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/handler/testdata/config-tracing.yaml b/test/vendor/knative.dev/serving/pkg/activator/handler/testdata/config-tracing.yaml new file mode 100644 index 0000000000..6677cb59fd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/handler/testdata/config-tracing.yaml @@ -0,0 +1,57 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-tracing + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # + # This may be "zipkin" or "stackdriver", the default is "none" + backend: "none" + + # URL to zipkin collector where traces are sent. + # This must be specified when backend is "zipkin" + zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + + # The GCP project into which stackdriver metrics will be written + # when backend is "stackdriver". If unspecified, the project-id + # is read from GCP metadata when running on GCP. + stackdriver-project-id: "my-project" + + # Enable zipkin debug mode. This allows all spans to be sent to the server + # bypassing sampling. + debug: "false" + + # Percentage (0-1) of requests to trace + sample-rate: "0.1" diff --git a/test/vendor/knative.dev/serving/pkg/activator/images/activator_activeRevision.png b/test/vendor/knative.dev/serving/pkg/activator/images/activator_activeRevision.png new file mode 100644 index 0000000000000000000000000000000000000000..fe94652b18013575ecc07293bd62a837851a26ac GIT binary patch literal 20137 zcmagGcT`hP^e>u(E-gs!L8?^g9YPUQKtQBO6_BDRMQR8o6axr?s7UV$DiB0KS^%j* zT4(|SQl*#Bi?nyp@4f5&?tAM!{>dWe%$_|nyYJ5=#^|;#^(D4TAP|W9<_+yTAP@u! z0)Z>ZNr7)ZDE8b0fo^-=)YiBgFt%O~e*6G$vv}|F+gUv-g^UD?dlCvA{cQF;vEAas zn0xx9ZO^+M%1E8_DWqN*9`16Jlq+O!41mY-XSpfZG4pGinjg9tQ{*7ZynKe~$b2425Py{d)#iB?E!}qxnCc!CNj} zJo}Fo5cMkSl0<=3+ip&Z-1CPuDQJmhTeCIW3o7uIv37mtL1RhA=gaR^U(@R)+jsc> zG7l2FSPI+lzEe($?YK7!^)^}Ta!kzgRpuZ=$2J+wAMgGy@vDTqKA))IB>R_xuk?gH z#=*!U_3nN*+@9=m2>)XNgTorDtlwXiA$SL__->$u(gz+A|TO?)cCvq!veg7FlXzv+AqiF-%Lr3FitGnwq2o6 zis++OUq9>lYwc+WYSKsK(Wg}4Ypx?KUv_+ZUbK20tktXioBt~f>w`9Gj#RJg&nML8 ztP#*cT}9$nO+HON8ZN;L{xhmqQle90lnx4H^#4sc?A>G9+q?;J=x^1?DDWmHM}Xsd zcCO71Y*Cb<6du9hD&5sp2o+$nfCE-Jv825>$VnNtWm`EivhwDh9-N#+?h%WYMp7v~ zT9pZkWSx5Lz*%7Z@UWTI{_&mnAOA{hiObKVLhE+xgt#xX0YfjfSnhv3PqtQ!&&@)m z_|Btxdg2It4A7AMc730T+MIgFM5Xldz<%eB%6qhNiue!qHga>wJL6nC9DCw-+{_-E z07`^HR~}u_^rAMjL4pB=BM-TN;~mxzJuL77AT&{j~R#Z2Wgf zLHD()*Q+6dbI1%VC@g;_y+!VF06F=EVh-kp6?2NTIJWm#?DKC6TJQP&?14gio{kr8 zs1{7cv-&U3yvmdgN;s=MRgCBSiqGVqt?)|5N@-ne1hKaA-oA(SQAkzSxZk-NlLp3z z#OikX=H}7%eNp0OP4+wW`qp@ls9$z-_Zagk*jTnSXER<1O-|*$TeKe&D#0?TlH;{gW(S-1v#+H z!@R|7Jhoh8s#kmO4`B|vx7)+9s>-3ADwOviv$dJWL-CxINCnAUC?z?!me()suJTsz zoxHW`-tzpvXCxl?MA}_k3qBFIoh%*reJ4?{l1T7+(FvbIF$;6M4pUS6U5=y`zViXwZhR+!ynhqdMA-Vx53624h06QwSmV~ zZ$q1Wk3;2fsy1&>7EivQQa;$9Ag$+Ll5hcXv9yHt6=@NY*Mdjen6tI_=|`E1PrVwt z=cMZX1!durBuU5P!rHu4HDVRBjj4Hhf9W1ctGC`?k!XEC7saY$$p^zGs>#tMMiqbA zxOY|wGu&c0c42_3S!{dxOjtZvt49T-wv4J*;FXv->z?4^vyI`0_gJj{ld5Z}_QmP^ zu#+!Cff9r)Iul<^?qZEI=^0GuT=by90g>i_=%P9hscVNfcX_ERs5`Ckx6+ivnbI2> zUJid}44#S7Wx8;efXdZ21}M!`?A79=w;ij_19Te7xz2LC8hOl0zAO+WL@-~jvO#zi zmQU0bWT^pW+2p}k+UL*YcS>xg@_OGh`?|T5}1#BrfDs3;QGiiUP zdhV7aSfckTY@s+VMkusnJ2?+&>GyuU&#rD;ZgYqA;Akkm|Mbzp;9exE@7&Viq+`-# zETMDaIi`vWTliw0D?MkMFqQWQkYLD%#U!fQ&P;8*2yCHe^Y-0|9 ztlv7WXSkBDYENn-Ng4ZgAHqu@iNs!&D8V|_C0qH~Hgz5xR5MTDAH%q8$@1*wHt$a4 zUIPE6TA5IH1I$Brjp_Hp<3BD78k^;Oe%5<$mxKLEb5Wb)gLs{hhPTyzkI1z~g6BU~ zIXAlC;*DPbp(J={XCDgv$m#6y6FFiav~=Fn>h+N&I)t8wXAp3zFY6n!t9z^KtMp|d zAYx)8{J0kU9h$UYtsb$uhg)nLxdd=XCel{%go}iG)yNQj=*IO^ntV_XNdW&K2W&H3 z)q@sX_Eu=_4=YJ)H3f*|p4!!dy0!}Fn`B-1W6PS4GB^LDY$XD7meSS`g?*)WI^>~Z zeN1gHblXA1YvL9k?Cbho0H99I<8+kKbjV-{uS_+O^6W>%ARY;u%C1*KweQG}&hO~k z`M*cWW}UaaD`e(_VO#>JHk<%YPl*m!X_HW?3Y>Lhlnh2ZTcNmFj!S8@_xn&0No4D?iX8v~H2MRNSbmUb7Lrh3g?F zGQAJYZ6CNL*HVBy72{H>?8UciN3W@uw_8%55Wm=eR=zsWLPC!T?0+wcsoBmmlxi^S z?!ZyrJW7e!gHx^5tUOF<=+4aPej>ds0l{l0um;%;yU{~y_ggwBrCsMab;BPH*dEIY z#20s~eIf;Yo4+=ixpR!{%FMfi))a1F*ZezGqB~zPkXzCwuXF?7ay@Y~l)X)QNzl z0D)}KR4Z9^N>-d*&_@~~C3ZPINu(bkH7sQ=%xa|poaDsBAdru&lus(#Xv1jyjGJe}Pbj-A3cxYa&4!4;`YqQ5CQMbSLHo|dFl#6(Ns^YjhtGf4j(ZmBP zP|lN=?159J?ln|Syga<#5>ixyHUl{xX)~16^QzKMkNe9Edsv=dBXzhIP`}!##oiKD zcK6;`&=Vdgk^Jh!I6AW^Ami9 z326*hYe7ChZ@H6&#w$ucm~nPKb|$98aLir1U+N)994Y8VK6mZ1P{iBDOf5J=_fl)BXS>Rf2?u~J| zCI+>lt988)bw!Y?yqwYm$X3phLOL$}C7q>LX1S(wHY*mH*l+i>QT|HPwWp+DFLke9 z%)=3QUf3*8*x?v^B8VK}s`E2cUh231+TINq!zBzh0T*7+h4J#R^3#75Jr@(xwOOvF z$5G`-KsaB{N+sJr-Q6-FLiy!S(H-~clN!AI=<;!ETn+>Hm zWVgz{<1c~6D4dxqvT{4)Pog^)HqS}+bWI=(Yn{nYR_4c5(+5h}Kz^6xt zxb=ewZBYmP_N#1aPz9yla1qC}v9)l0;#OkL&Z-fWUGu3*C0BrJN`0BCKFb$Ta2bLE z(tI#9l0Qdkqu3|A&vl4bqGR1?i8MBxA(9%o?4Umq26tzFu~VB0Aac3|(YRmiGgqdH68VYQLf2E>{X& zR(!8rxnn}!N%Grl!4IQ`bGx?8p$6)64l5jxW~9an$x{f(nk87~24@s)@wlwk#3%lC zTz5`eyLa36-0E$svj9G$gB~IA6rnM%<=nIq4s;-Y%Vq)Qmm3~OnMIFu9&rmARyIES zG;b)giL}(S_7+}rsdpp1@B9%wQTT@l^bN5jt0fe1Ak@h{vjrGE5hIE9RL4)w@_AXi zdaK|mf9+Kw5d1ZZERi%Wf9Vh5UQ=0YVs(n90t5ulr?SBNWa)bG8>@t0Ibt08V4rZ4 z3$RwdHB|8rxmuERO|HxKc!_qTSE&?i12!1$&Idc;qI;fTpY94miNs@R(v3anbIgKM zeUiK`TXx^tV#rtF0-=f=slJNQDUi98XkDuQ5l7R8S6NI144k^<8)xP%kO5y@V#?F^ z{5h3O&kAC{u(E^T$vIbpf>lUe=`I&o=6d~zd{kW_y9d-zKx{h4uC6st+NOlyuYYoa zKu_Y^i&8&Ptbh9~vL0H-_~RytqtpZ#)Th&3Js|vAyGu(sKT}xN=5D&yL+|1cU@_n3 zb3gh=#s475m0pr=LXMcW>$x6p=VmMdyF&l6DBw8`#3?Q;GqKf~^c7^+rEq(TA#`I@ zhzgkd3Z`aYV2taDG3J|o&!5BW=gXzB%c~ffXKx;M0!1(>7TGVLU(bnKYemE!>=m}% z{p@3{-{{du0s?h}P1HZ#!E6eRU0x473Lo0t6%^n&8yg5MANhmq*s0m9=rQEX*=dbn z+NkNuceS$JB)=%=thc`rSvUBf1t%G9vy^v#%TjPH*GuVF`tCte{uPmGs^@P>i>G|j z`CBkTS-WO~5L`L|4F98ECBuo}u*ggPZ$!V}gr%uRxUR;(Au3eq0V#9F{~}tLgmPO& ztK+}A6P)*RFVRI=3ft3<*0#)W!E-fd)o-k#kRv*`tXx#g!w|2Bj=$1V6YCFL6w8#g zRb1_}=%rhW_RYte)mBlqISmz=(pg3LdX0Jyc0P`;B{YzK^zKQPUE`jYuspj%7WB}C z5lTX=ZIw%yEo&kA%N2oq=sq|9HWF`tzp{JN=t}6fq${hc!TYti_{Qa1bO%JTfgel+ z{4juRYcYEppXrsR@Q zKTzqW78#WGC?!-6Ks!Z_k=FW@{WX@f_ow0y9+s!r>Ok`F+8R8!Z~{gdF)t5KKTdnA z8)P73r*!p}tB5Z)c86v$j1`|s358y!mWN2RVdf2pv&dqiMw(9J$ilBMKo7(`cs7Og zQxz1yeEFgRy@g*_^7dPU8!b?OA^~0OfJ!>QbP6p#AS55+AT~p%C`TJ1-EVXw0NgH3 z+*;pSIkG-F1C!v&t6_~!NLduH$U)qAr@A2d_mJu$oTT#>Srgpv!s7(vgW2B9yf}Al zPkzg&{nNtJr1#mh(I%Tt`DbG>WZk) ztRMvYnOX42jg!HpG=7-v^{lX5D#PnSunNDnnBe%IIZ<_Qwcv--U3ALgNhuDa8Gq0L zIrdMO1AAQE!1cU!Q|pw)`P*+AeC8|tgQovz)fL`;>`huc`1Q6bAp{>mSeJL}iQ+ug z7}^$&ggA(AMGVvaOh}Lz^(v2J_(|W{`#lhhzs%{+jTcUrzhT<*(4@JrH+x zKU3=T)>{*e$%<2$3B&G$2ON$42|yEyg{^O(reLDwTairfXTkmLP2?#ElI#kIX$TrX6+hIT|+6N^+A8ee}9 zGV$%q@R{G^;7ZllFBiT8ne7`W5Ft}mi|)**lxmsVJ=gF_nC#jbRiR7h(VK~Y6-GFo z%Je0NS={rPm%aLR4eMApEYz4N&Er+=7EbGGG5(Cs9QpO9CfX%ugc$c}uK_1L_U&l; zeEcoudSfiYd_P`Xms6(F^kc~yZoRg)0sCInZ!%!$YXSQHLcW~mn3`U8@MHihA|{Q$ z%Py|<4Xx3X@BELr#itW3AA*NXl?&ASZ4dE-+!G@X>HcucJCsGq>jhrkhBT9_38~*5NtNgE8-!x`L0-J)}^)cXLQKu+yV*WvAX-| zW@@mP+zIq&Fc%3Vucta`CV2Bu&QU-rM9p^QdLOthxnE?eg64Z{j)@#M%i`U_C&Eiq zZFu*q3LDq^zKdu-aT^wHeilb#ULBBylh|CG)4W|Q?Wek(!H|dica3sCh<-m**^I27>1=7Q z*AU$JmeqA%(?X1R#Lhs)WknGJ0fSMfYEL<9$1*ZX@%pVCstIp;Ca<4f9te@dY;*Vn zimX)wD0HhLSY3q<7kX&F6edj`tx6B8XFX_qy9tlnt=pdj(SgHID1owB;izXIaQ`yI zL0XF;vOTyW-q^qf2(S89M`EN3(_pYKFQdNUki}`_(FEB9)Mk6A$ehw2Yr`wxvP7*n zM)=Ze5oSj*-2`!&+jRs>?s^&^o%lR_pRC|*nazvNM@gsvFAa#MEn_41K(2_ysDA(u z4_$A{Wb^&a3V}>N%Af4iYbEl5x2zmA-0Kbq9f~W+el`=ueP>nb@ap`W7!$Nq{096% zi9wl{4p?$}pnO$Dw<(wh2Eqt+#V=#}SgDKR(h6J&KR$HLh*F;iDtQ$>93i=gi+brE-abn<570+8EcarS-NS#`H) z$2A{-toUKr+#kD7xpzMxktt9pUcWE$pk@Gt+H~O0XjXD`a;AtrQ)-_?%DR&wf(066 z>K+h-!Jd08v21Lp+Yc>};kJ7s(jR6Z-d-M|!*L({YX~GdHKlEk=9;IU3mVN&RrSfq z)m3(sgcdtY#ZT?@;BmzB8#UWX(2$=5&;C!>&295%?5Q0~UDl(*_9KqXYi7Ech_%V` zB>Y#N%RajIa3thIiM7h34$P0GGv3uFX2z$@O5US4m!{Fa1A)J!Zqs%by{zQjiWVVc ze021vUh%!{Y_s<%-acg1D*M)NMd?p^tWGMQSBuQmGojGl?Ncwe;RVs_?SX1@YQgOD z>WixM)bxHq*gp#;(@MG5(t4`TkNr+EBX6vhdbyja?VY?r%zl#BUUm-h+0uC<7r}Ky z#9WoV_ppB?KXW)U8M*t-rW>t=LZNo!TPKM;BXC-(joq}NKZvzU@T*l&yBYt)@6;n$ zPU|o<Vb>LW>(|D8Jm-x zVGtUeYgC%&FW);BvK4CMAJ!)pec<(r`#;e^^YOV5fb}&CBkS zlwN$ufJw-DMoTp?I(pM5t<_#F2Kkdm%bM zXX@2_f2>wd>AhLb&nd0hg|@G2|BOn!ag*9!RDg{!GkGk>xj#vs=#sznde=Skpi(jv zdhcnUhi`k5f8B#@))mA!0(SCeqtr(6#s_>-q3Vxu8!y>w52XFhQa;YV{?^jRZtWs) z788$Mfn1|NX+Bk)u(!8NVg!MtfTWwS@lt^2Ln-SzV&5CnwdVoh0Q;# zN;;m28oxc)4@tLb#OW$jda2Z&Tfh%9S7u9|x#YXhQ%1Y^*?sKsY)+aw*~{V!ltvWl z{E{_%KPBG^MDOI3UR%!wtZ#d^JGZM*L78O6`15ql!y+5H~2D( zgGYB15J@m&us6F^&HdJ=3|SsDMt3>Ssx9(L_PN!cmn@4dHQh?Pw)XZLa;ge=D;!LxlKwyYtYF95Zy%=zU>qrvr6xl;LLpUw#vQ3kdOH+=SJjCdgg4L7e)UqqZ0aSs@P9@msXac{goJ=>TK#gKb!Ye!3{=8FRCNo1pH;#Y_p< zM%`M8`)T!Yj{tde51S^x^FUJ6B^l+}o1P_(t71!pb-;#AxgP%LFSvY==|48By{UMr zDN*iAtQ|nRv;R1L-M0NQ#33hsYsxJg#gcCmAu{^;)~v8r~T~-~xU#5o36Y>ci1w}NU;;3u2`&G?VwLPT+ZM~=>(KdJ3hp19E1AJ(}Gm9zZ z`!qI0!s=DI2Y6eQYgd_S)Fh2z%} z`CtdyPm>C!w%{4$iD1m?yY`la@M9|EZi3BTe83_dRLk$hYdFh|>BF7}em&*WL=q-i z>=76&rX_$A+)q!Q=;GXWO7*(r2AuIq*(RTY$6y0@eS^tdU?s730!7(sg_9=<`|z{4 zskMqqbya~w^AcS(E*}ARf$hvn#~s%71%qUd(5Q#zG=CbFzeEKKM$FwwpE%m%)mc+J zT|LrHt#3N+RU5<>?Z4R{_qM8xH+k!C`6TZ^RHCNB-#e%2YewKCDb;9lEuE=SqgS_< z^n+M&J8bhCJrAh#+CX|)TuRf<$UVR1XE}e?P83i(weySA-)<(+NzZ+f_gJpv?nK96 zkeI-wD-ZhLk63K7x9eX=TREv;)9_L6|NP*|fOktT^1)=hn<9o;E%v8&3)qv9-Kik>n-_(_raN=s zW|Y}WBjVq13ma6*+heUhIV#wXY-e`mYm>DnzdwjyuXe19dXsE|56BLr=~LWQhO`5= zF{#M**DxhHp>35S@Hm8VG%9S9%X^+@x=*MfzO}{<<{=fO`{1hms21TJ_lC8822%Ml z^vs(oojC`urYq_h1oO8;qn2bf=mXy)tr20hp5&(4!SYLY|8U0G35;=ax2gke9oNu5 z#bJ)R>eVaMZe~Vh%+PyBUf<@;d`|A* z7bqDeN>AampOC6<)_Hr5|k z>l$b~-;T_F6{$Nbv_5^du|h%KIGxF?z=n=4%*H{QL))`Bd~=5DORm5`O0U7XYeapY z(WLt!vBcUe#X^!^ebe=O>4o=|YYin}?(-)>_b73{sEOlSwN#R1yND#M7t@ZZha;?j zyl9`aA1EaJ=s>AGPDCwHb|%Z)i{Va1oya3p&9lU2nd6ZGx4|>=Ggkh~)4dk(p)uy0 zTvUy8)6TbZXYP4y2COK4Bl$8rdJb{5ut6IsxF({=Up&kj1-ne510DvqnQ+v0ww(v{ zIkWVQx*e3gHGxMjiX)91Ea1E8(Leo@qy^0KkXz%>$ntBgzQ2U`S9;1|1xb)2-)*;8fM{ZOQgB^)? znS381;=m5@iYtp*O57&m`h`sLz23WW#je4I2NxE+CS+Lh@|ERD2ZumwlyOU zto_!)(Kj}TaXk+zjQ57yf%dI6=AzbKN_Mk+y?y_3zC0p8yqkROO9Ky+9f;x zi(xV`y^{Hl+?)UF|K!!SiuutgU^o6^tIQ*lhpS)JN}Spyd-gOw9(37yldV)iKc@l# z<&4;9gyCq6*h_Xk+v(S$6%m-SHtp~4)}ocx>jqN{7Uf!gEEP=SU}0qDzs@&$~y8vD4Gb^EtKBx zdy*xaEI|-^h?Tu^}`gN-)XF;_mtHrNwTLtJMuFH=C4j% z1DA1FORu#WjMWT^BUiBJqDt=J@!ZyO{;?0MdTEJ3IrNGC)1y>Xh6xe-?vvZ@Bm@tV z@acO#8>d%&EXyeK{xUJdlG~?-LL4Ew9 z*U!5;)IMh+6Ies%pJ=0PNUfUYVizc0l1)F0K`rMTG(N#awVytth-IzQDMd)#wnpf7 zi_%86;u1xAf_5AnJCuBtrVQV6S9uf2*7quG{F6&L#Uprh`}jaUwD!<>y;WG7`&o}e z;AM(vg?Rh^Z2*#@ig{YJrlu?|>-}VeINGJt6bGLu!^SL4gwT^&^#}Lr;C{&vfbud( zMgvZj+GJDejg{{n$v^2EyvF!ma6jvSn1gKbyPj7MafhC{;$5KCiPsm>=O6IiiBs6I z$VjQ04Wmd8X>hN0-PJ;LID$~(Oi&ZntY@NNNyd2sfU|n0KuwkvFOlL!>&Oy+5rHaV z<`PqWQf?dSck6Fgwp`D7?swxE5lXxb2an4JGEhk(Rcqe86y|Al=E0HJhdoJX7M&|} zcl6*K7N2}GQ|NX)RlXoqdKGAL9>&#nqF^AD`+GdqYJMxyzgUdp0}pIi?A&rTh?U@U zd|tqroJQK1tp$IaPJW{;?<%Gle^~8)F!UO#Pf|yVdMi2o33mhn)g~E8_vYe!`&wHD zmAIn4%(Jf3_1;PMna?GfaWR{?9UT`yKAToVQ=L(UjY?G5oCk_TR^3n~`k-IH&bhc; zy-a!Ryg47!7f0I0FQjH)9FHPL9z+S~oV*OPJ+^-yBC-P@bL9JhdUk{uS%hQ4x<#8B zmDQPz@@)ir6XJMyi$0BQW#nCe1MZ0Dl~{wHD@m}~R?IL7s_0v+Hhc90)gabMS#w2e zm|@GOu6Ull&0tU_7Hqm zWSqRm>>>xM zet!5d;aoWZlH;|N!`L&*L77wIYB@=HXI!uceVFd>XHXz1Ssoq7L$%~u;k%}Xr3LjY zrXM#hFD0DMBEp8wmN#66)6L90JP7u^!kSQiADK%mY;%$T0OQ65Xao*_2>>q)bXypU zLupVbm<e z)wEi8snFpMyNFle8u`{IPmwIC-pX z`p4YvF!`B!z|paobeu3UWJ~bU`9goA`x5s2(8W#!yH_6jEOe_TG#&++;z)7|1o&LH2LTIZ)hvwBr4SWu#5`2GXfh%%D`zPwu0dD&fwz z7Krij$^C-IoZL9&)`NF^65kJx8;*MrwPe%@t+U^dSkwJ;Ykm0u4uG5TK-g~MHUp^UXtVrTUG_b7&K0BW`m@G!TER<% zlehk6EDyNra&|PmE?*)i(G?~Xh6K)3Jf`;Qfa6+(%1}o4F3Z%C{YvOPT~a?TEL^-f z5qU7v);4ua7J6_uWl4G^B`|Lr_bk9M2e1`2}@b^ zY)%vR#}K2E0eI=?cc_G4vEy#m zHWqq=f}GVA!Qp^!p87LR2r==y1@AuTATuPa#@dBgcx~X+jwlOpD;@zD% z+M-NWPd46EF;-=NpAq`;-K4KV(DZdw>vik+z=aT_%8Dc?|Xj8LgbsfAmA)SVdA zWzOeP9eCxwsh{Ml?>e(+d68cxeV+M&AI9LAvz*$QEOgj#V3-w1J3A9*L>xfSxWyp! z9m!*OOPV+mEw=l(nT0wV3N4v`C|M7)alJGBJA0EuT;_>?pU2a7w#GtCApd|V(&MTN zt8L_jr2XYDu8;_lt}zRE>!8AX@XIY~Rj@7m!$_uOkg(6(u|4&vX*cSO*2VN4v39ji z-aRMuNhh)xKh|Lngw-mM&XO2_tiehpSyR&B&rH8< z?h?f1@kx9FpM?Qednj?Dvo7*}Yg6aq_8KLc3Sho3GNp&8fqnM$W zg@xs_YOgx3hfjCCA@2(}PYX*-qY=hOVM!Mmp_fvIB)6I^E5nUapM{}7OACSGMCTL4 zEs-K^I@mAxe!#6`ivdt_lY-}3<$xDN69TV)!s?=4rzsu;K((b?O}O|KW@ULE?8?UR zB!_YYu>gteP4YR$<{*Un7j4u*T%<<&Ofe1X`Oc;2A@2d{J|k*KIGS<#)3A4qDr4Pe z!YwZ7bW& zh3#&d-1Fu9m7WmQ=Y5|XXv0kl5AX1s(~w&!Mv!SOV?;=t#K|Tpu(VIaVGnj01)=Qc zp8Jt1bh-W-q?RT;^$IUjqLJrn--QaPzJ_zpEvK&ZyPcBSl$!lilCb48IVfB`B*8G0 z!g#rnx9%+d2{Irn*(wwV&s6W(y`e zH`c~w!C;L?Spe|04|(IF|Ew-g+p`1nXv*ly8cStv)8ZhTBui>^J3kncuGKQVIS_$@ zWda4Gdw`guJX;{#58E7w*85bsf*gKxKugIApjh{a}&};So}j5;F0Vjr}4k z_}gi=>AUng%_|qw;Tify?U*CbV>@m#>f=&su?~a;oY-?d^T`~cB6=yh)X|6z`nR{+ zRWP+-+O8(M=QcpBJ-G){VW;rQ;Vc2@5BPZH!P3#SOYild`ily&}43}OfoPl+D)-I?287o(& zW+O%TxB&D-G+B2)gO88&QHB*5=sE#v0Efd5a;p<>?ksgUdf(4tqFyXcuVz!klR%;G z-#sg+(B!}9PQg{9Zs+4vgsYw}hl%v07>UL>XSsSyiM7fOuf^KRf^(fdSI#4E`=3UK~!Y4x?^>L@KR$Db(3#i-Zw#J(jp9L`4-rU&xC z{w8&Ow_ByJ@G;47zqG5P9CK&ImkQi!(`2DWRJ0QCl}^IpP(6O6R$=7g%(7;-06;n z!++lx8?e$r%rmP8Dz)aUS2fPn6BK(Wqd2hezAYu4Nfh_V^s(U%pXhI%(V)YhgPF2P z{s+>lg{3AC%`8fa#DFgMM(P@7iEX(h6$-W_K|*x6q_4-vOihZszd#K;14}ht2-tW% z_@!||RmE8){(R*lPx6P&7AC3Li-159n|ofVT1~85&l6_^7YcSg!x=vNMmX(Hp8EZL z2WCGLr4zESsQ%Q_Oh~)gyQQS_Kk+PTdVj&cN-66&EWh;KWCK+dGt=`IeQvC~hbjo( z-YTT?a5p|vH5}WRj_hi|KH4+=rx|swv8;6D(LSDeXrg8o z@@y>XB^kU_E4Ob+G|mEG=or~hvKdUsE{2j^W;zRQv#8d#erAZ+h+-lCizHa=`zn0} z3jJYAMy+wk5;1i;=3!2bHlrlx6q(7(eZzg$&;4mz?grd7KzqdCym5zW)y-dMC^O%V$OY0JS=$Et-{WSRMU{LKeJP$!jNCN1J2e_WZ#qrHyT z`##%hfaypZcTdf4BDU7@jiPlKI2hp5g3P7;mDc3%cm1)iWVp>2P)k*2%>GK(7 zBqw8U@uM9gu#=Y^1`T=f|9fV@tF*HJ zwp61Cd~TUA(`StjPIUWIBhsdScv3@ER%WaPkKAzM);^jr)g8erJKI}3tGWOu2xM%4 zX*K{$x)W4sww(>`s#Imo=j|ytdT2CKJRdRJf0bl%gFKqVJd3UN&E}&+w`JgBg-=-N zhUN;eJs`lecMwTJ&0kU=Z=vx^2=QT2oi2@3>mTz z6-yl3Xx7wO*EBNxDzG<>9N4s!izsOYVUR)x29D^$3mBDWN%KO04AzV?+875Vh2uuN zowb4N(x)Hd&}m9Y2eQm1*x9F)GlIh(JI)I>G&BJ(2YR|)*yb~m6wb{!eyxcYe38Tg zv^%Wu$2~;3(>JIcg?hv6DS$CbCNQoLl*asQ}d7;W}~V{w)Af_#4_M6-UzMvu5Tsb4?* z2UAkX#~W#tpC{V(uv2PNdPP>=0o4F^N`74n>@9E#XoEADpuiS%PNIh`)8^Z{Ui4a?-Qx5=u-SQj_@#0f{>Z$0yx)uMX-b@M+BTv3 z)#?O+R5)U9O#6Cgis8@I%+>=%hoc#nshi{!F-i5)DUQe1Wh`u84@NMqsv|pn1y1<2 zW2>iB9w7L7FHQzByUluSt}=@U_J-kl=&y#)x?vhqWh(Vm_D5Cg6aCW@FUj88i3~gN zR~Jf3FBpnRWbk}p;*@uESn|E!T^Nq%X)1S}M5VJra0&f&RN#IO0=Fe=zQALjp2aYQ zP4KJZ{kW6jNYO1H*O7*my`W~|RS|lt$6rW5=Fe|mhE9GHe?T5R`hJMeX=Q(Ky}pBY z^(S&a&o(516&;9vii?j$o=Sw<))IHbu%^G<`ql*vX*llp3SOWiC+RhsF6p=owVh~P z@o(C$XAc?gq*N~wa%7r z+;A{$gcn(^Bsk$)*Yb+E{2-{JwhxYME>;xLADPIbHFho^+Md-GQ4LRg@eZ=2?L|Hw z8QC%{`4ZWbP=t{p<+-g4++6YD^UO-geJV@b8p|<7@XSQ&sd`Aj&>t65mr_+_N?QY? zR7F6p?pAQ96T4^mGcIzEH^CCXeN-B+-^QuinkrH}JS~#I4O~WgGOsk?#${l)Rhj*) z_J{?B7q-t1h1Rhi$PqpfiEqJwyNnV?u0)`p3p_LgL!so+u{c^0(XTh)0b#RI(o8E8 zhNY(QqiyQ;iyotH>XDMk`FTJQX2$>ZZVR)PFLA3@a#nr+qHv*4gZt-jdZK=Xz=bOP zq4l5s-J7KFgZ$jt#b1ABly1Tgb2{q8E<=Nz^^*Ybx6_wk&pG$@VE1vi#={@w+NoWb zwMmR1EU0h3i;8eV6z1+sj&U+q)O*lkif7xhO&Y4luEDSWy1h4nhrZH%*i#n643{5V z9DPy##E7u|FqSxS6oAX|Zcj%Ea;4`sI6ab<>!`9u6m-1eJZ{}~UtoZW3n6);P)lVh z)czCYPYHD^tUOxI-6vzTO76cRhcHFfl+E}vi}zXC>Bh&l@oUG`I#!fGKdhw1&_KV2 zV3HM6Lb`K0Yb?)JU3h&*JcF1kW=K3YB=p(bboFva%jdaAhD6q%9q&vg)?oraxn{?J z8?Plb5XLcqVM3~gJwk~2&RFYTH?48Bgt$BSbe%@k5h8k=JV$x}7)0%+$7Z!RYFT-C zHcbyl4W~e$xbEPsG!rN+5Ct?gdylz5ODd$+fm>R{t^8$=bwaAY&(qXy>yv%Z(PH1u z0O(o8J8!C7D=TiCyQ_|he^9Np{&?gP`Q8wNH_YF|j=uA?=cdT5TPdIUykuYRKL&2y zF;5~B1)Iq*y-H|#ppqw3yr|i%f!SR(c=T>wZn&5N@^e6Fl)$uia_9O^MvQYX6Jlnx zl@hR=ErGpg#+>n0M6@=>nUXX0GyD81iR)^w_@DyNpLy~OJ;|`UdSH?|<3{?opM!4M)7Ls|W`L89+y80t zb@A)!Eyrq6cG^B5sXsB54E~jbCVzO*t1|xt&XA?c<XD3Qf{BQy1EdY+KIjP*DoG|tQZ zg%C^ZynR%H>vO{mQFgA#=QmjDo3P^at?n&ek2QW$12CeB6R;&GAE+@AB#%zUJS_u$ z>wtZ5kVcZ>xunrI(~@7u!|N&IzE~oBk95v*iXf&!P)AgoCY=waVyW)X(;55#0%VTw z#DNlbpW1HfbYfHE+x|gyNsYU1mh%}garVgiN0VWj8BX*pB`H}~z48<{k(i2<-aN-7 zy}z|>keE|v+_~+8iF)%H-*}#I(;acN7GrMClDvi&o%f)i4T8ROZ<-4@drdfSJE6vf zG*tF7NoQTs+=&Ien@{C3lKCVRZ2-qgX-Iw_iThj8&Z#-fjdL`&w6(mc#hl=sMTokF zEp+Ii@?Q$BE?!P%>+E~k-Vl>Qg4-nhTPa5sNIG4C=h=_Mof-9d13-hljwkfol>3vv z^fuKV%_6-=3Xb!yUK~VI zu*dKjkY!F6=>pLZ&>ryg`=hU-;&uliHK4qI{thAV8{Ah+V^Gi0@EgB6f>HR4ti4dP ziy#T4K>zI9Qg&6c1fk_k{jk)KT0tHzZogjc&qTRjLe#?bn$DKLl?%|f_Y~1DK5&pc z#E_KAr~70&=Nr^jAd=DY6@9}Bf7b=7W&!gMw6F@M!2QIryYK)RY_0`033?+fYW5SA z$6wY!$yZ>&FA(S;62hM%^^J=CzwPwZ_ohbosY=vsO#R1GwI>jlI<;a#5xhh2tG(aI zjA?vF%llpW`eM=YGToBT|86@dEu!GDxHGmemztW=)we5*<}B{gXY7+JH^OqI|Av$x z&~{#>L`7p%LH#4Mn1aZCo_|f0j##GrJ8My9HQ3b^4>Ek)_TuD1#H0P&^IL?KNV;+#^3a6pxx<81OVC@ z#WGMGkJIj9P0vzmc*fFHEAm{2SA3yCx0FZxes*we6rUr$<9|;zr!YENP+2oor^IBx zB+9Vmi#NQ|mDWx{QQWNe##YvE@Xg$SjuR(}a50m8*uMK}ujF&wgv_$jb|G@2>|Hlf z*LRLQ2+HI&gJJ#O-!jF1MYAdN@vUaThV+OM&$rPz-@le-^RXto zi(oL&bJG!+qdH#6iQQf?B2lGIWYHdF(9<$WqLW{Vf5{<&g!`afT;DTC)r=DsXd~NJ z$fQ|eabN4h&VR@`z%>$O5~VNPmJ|)D@Vew>Sb#pu{>D6 zJN6Kf!wqa-k^4T=b|>SMviK53R$aW5$=Ep4*Dt@YDr7QlnEKddSPjlUmup-*x2gc{ zVYAM(AsWYzib_c-OtKm}?6juW_%lWeYve7M|HD9knzE;^P8XY?e(Clt-T8r&k)nk@ z(GE@g_rh*d@nEPWRs2z86w|pqet0xiC_A@nowtkam(}YE!_Mt8ke0TU3X%ch6jIGM zL=QM-(MCl*9w>ET@fh!6B~B5or_;(NrxFOxY&cIgT^cWZWg?tt3TT8`Gj>N#o|&s@ zBg%SDvWE=*Sjt>p3%aSFm!VoRvA+_$;Bvp%K|fEL`*rMYpigu#W*)VdZ*qYl6%`c} zOqP7UNPhZh)fBnnC~df7DU+EHHEhMBhL2p9JX2dj^Or%q(C=s86iV!U*vleS@wNl~ z-8;%Gl9iZOyx0x)F{zWz$CMEt{ac1P-KO6r?U{n4()yT42cdGZc-#E_BU(Ia*P2aG z_<{2A?y{AvbaD~Xd_pLKD`IRm>HR+9^IbS|!DQ)elflbzhUPzv>8&$OMmW3>WjOKB zW;2>E&Wn$ZY6U#gnH0v6U74AOl~7$&^F1&YR|iTRK3ySJQL-+ew;0d>wOF>~WnZwC15+HBDY-(!(mjwTjykgt-`x@}n7q7GO|1#=QHwKJ5THv=M%7~6&qh)Jy$YW zzcK%yq)>o4VlbFDmQB@kgmUzV-R>KH;d$@YVjHKm6Ow7q*sHv7i?-p2UQY728HWa% z8mGYs81P*JD0aF&3K{R=~h~vcPf`0yPMW=lmqfp#cb>a-r~( zqXxc0SLvS|N7UZchyGMhOb=SQsd)AaD%u+gD{$7Wmw`e3%g48bvOjU4jsnn?aTLZ_ z4Lx-0Ds`Q+X;ji$$CLNAjOWhJH|6YI;qv3Jw)p$T<}#mxEO+VCWqf&fx3ipYJ~FxX z#L4J6akrQTCvoHKCVsX7w){?u?9cJgoGyHx8zBM0)3?q>a z9dB#8pN7Fa&8Qy(NGNDEkT`R!*J^xXh<3rF?MyZ&72a|OA$K8(F zm3rKWc%MQ=#zMPIIMyjg!flhajGB3{OSnOW*&q?3fBGOZL!;Ml$+~Y8uAD~}M@|x& z&jeMrv=6-O2?{!+i@#;Y1d|C;OXI&zpL;Tu7xB@ZEpcl0#)RH0uChr@Z0QQMTn{BI zM6$W#q;>O|g>IcbvTscGVCtGS{My=(;m~_R~qW9kTd2focnUKkK3%U8sM4BTo3m&<1S^1kE;ecxTA<75lGBFj{cd z?zAInO3Wk`e9=S(v6pKxJB#&nzFgy%U$qiUD%pq+=ZxOF?WlvZ$_vwYdP^q)8gc_n zyL#1@VatSR1@vlaa35)9?`vFM5FJ%E(TF1wW)1`*tbG$fwv=QIc168oa~ zz`o%W9tve3izSY41GgEx`ewPBK}K;9v->)&U2MM-x8Xb;#B7O{I=`Tld$sn*s0;t# z+#WZBfUo{~#jBO#iwG+ZetK~Yqn6OZ^1Jkx7ekc4rb*f1pC&OHe1WuWQ7Z>o z2uVm3{u`_or7!>U;7?u5e**GfL^A+wXbc)W1cEt~7yeCB^UtHXW3+c4K14fIQAWo@ nVaDG9PQPDo literal 0 HcmV?d00001 diff --git a/test/vendor/knative.dev/serving/pkg/activator/images/activator_reserveRevision.png b/test/vendor/knative.dev/serving/pkg/activator/images/activator_reserveRevision.png new file mode 100644 index 0000000000000000000000000000000000000000..5f8731cd926408e797c41a57560a56bfdabe62ff GIT binary patch literal 24820 zcmagG2{@Er_&+=ql_X_NNNG%%B9%RB2obVOF-*3yFJs?jE3%F)>qwC-gR+fnqNE}F zGMKTC-Prf_eW>s6y?*cY{;z*m*A?e^p8K5pKKJ@L_bF8K$s@XRSI&VzAUYLgg{L6U zNh}C-0(wmvLXUo$6BDwVdB)?}V#T z>1t;gIzUdw=~y!7`q%RYR=f?hbNGJZp@Jro+2Q+#bC*rNpU~#G2(Nn^dP=}CECl$d zYwmoeuDx)%Qg++1OoZDnnoFb!X1IvWkI0JuF^l`fgbqWSY!R)m5zKmPn7ySIq-V_FjT76D`=7_GYGwq6bZx55YZC{~DQ}G&CT`St+WqVi zOtk7Gri1@q=11b?vOsgJ}a0Yi#DO@}$V2@JUM5v~$_r5lXzy8E^>Yjc)S4$Jd7 zlXF7;JVQVC@@~HRW^HP~)8R?!8BDa*rMq#~DRStCl1B7@<>B8t_L^o?mHDnaU$ma0 z#XYR+u-S?{7;EK*v=t%<=y#QEProlWynd$QrXrG=&ey_+Mcw9=8m7m)L~o_qp7=6< z_$%k!>Hmmj%3L%I{q&TTNtY72qax2-HyB<<|DHS)4&HoHj|G3&3}_s5Z6(%~Jwy6@ za=R{(kT|L6<=t{M3ZlIBVQ#*g8q5mp&WUMdrrT*ECw#2F@_wNEh|v}hXMi$U4n~6A z#6Jw{!jiv^M2TxDA@|4AgRJ%Vg9Tl(R~NylPLNTKe`+NWewDGmM%l_IS6-i(Cyx|V zXw+HTjDe z!#sOp!=7aN_nQ$l1|OQ@ilG8XoRnh&+(2g8rj4^LO83*6d^~FIW-eb?Rmzh0E$lev zvfPCv*Q_t}wJ7=2gmy&m`+I-&$Ksxz`h{|JFwZ-Jx;)QEV>%KWG26?vpQ?*cSOd=@ zhk2RrfP3(U-)zg(OCBvC`1m+SqWCzJS~^TbV0b$#@w2mR0m_oTa_^C+l-@BT?90%`dzvQTB$cS%C$ zMtRgfi~HXe_5WJT|2EnGEbbcUm4>JczwcFL0bm^^v7j8I0;``J_wnmF3;9;#xh!^{ z1-@o92$m=-vi{Si0XFC0270{|vL^&?j2+G|Rf`=eXD0Mbc<)jI8*d*Q@-#sHR-y2MaFK;`h<@}YVM7~J-ud zTFcZ&fhw}aY$`NDf&O&ijFEG`HO&+pnYE4EIh>g8bENgdq;xDh+h0}KFK3|A+vS({ z8eE89tV&wCp3#i44*a`cvgX<9`4DX|Y{i9#paj2OVBA;+Md-zLD;XN2`Vwq$7Q1c4 z@$Kn~WQ9`1q$fDwUfH4{?8;!iF0$rY)PReW^2PaPIou`dxw_I#qT^zv30PQL3CWx$ zRU~3dxXW%c_im#{Xptz@?K1RayM0tf?7_>w+ONqb82OM7#SSxgUqso=q_CiacN0c@`u2fsA>y?|tPbN@_vGk`%V%k`(@ikHba#Un?+X|MHEjSLrHP zlgO~}E6_7XK zj+RC=v*x?FR-xo;eQxmL00mUEYYNBQdr=#l!NxOU)e~N7-L0Y`+-uMN)wlmgDZtTe-`RYt=a2}JngCtG6bS4}~Y!DamtO?Gxji{U?c=RWc% zAkszi3bwDFf6aJrQWX~a&$(*y@L$hX9&M4}Y!h7fvREOtTok*M>-nK1Jn!GPPeS<5 zSa8EMI3u%iv+lkmpHT%RnwURjU& zlcdcNc%|bwk15%{45ORe#S{e?K<`)(D!l`IO23CCn5LUr0Dcce3%CdPy8xS{`>p}x zmo|u_coz-#7vUVI#cqfZ(#Zo4k3%4y+Tx86W=aGu#3gF*#>wz;A4Yjue`zb(Hsw#L zk7S{OL9=>eGYr~Uoo=mBfKI4Le-cd+@T#5*)3T)5YZ-Z;h@^Z^_hK4kU&q?*Zp~pQ zo*i5{o+^QT{9)bU+;!~p#0MVggd(=Ixi7*puhkB(P7c&$pdoK4?;QkENJAHbMB_*) zWt7VG6k+k!0VfkM0dtx-8L(EUR@dDuJ;z8(XgRShoU?y9@4>_2xtoc8cUnV&Y{bKw9LJ*RVaxn9OWPD|s|j z$+UqJ_IAuwabKW<>>T__=gpfADP-$rJBp9Y+#?-3rVm>1W-uK=r>*E5Cvt zEVi|z1Kz?@%OW4U&!cXpH{?aq^$F=wiZx(wcj**SlGaf$hE_xM%KC>@zc4}jk0_PE z`}$!%iF?dE1qbR`DF^if=*a;tCymxmgE9~CJ}8G;8cDm(pWlLW3JbmO*hNi2*H(e> zp0BRBzxT6P6;&07ygG5fA2BN6#}5RVYoW%l-!Xfhus<-JiI|1{t zaUhb@w5N64dRDr4{3wf#%P36PRF`gY99DRU5}H8aBtm{Uwq$)4vBA!EUiDPtK)+XB zcz?&pDeIk_JD!d;Q0$H+MOADvV&r`jmh;;jmT4`8&TOw8<56#BKz#I(u=9JM$X46@ zP0DN8UDAkF$Wbi>D|7&5;SQuXMa(@qjFi2`D`*8KkCEyw8c7z#BYv?8f_m0GH ztk)yeKb+c$v`JVhXxfJ$4JP1!JVS7e{7E) z>dAi_7bW#;aIlNatn;GgZx(A|kE#XGlW=}Mk|o2JxV0S>o2IEWa^n%(Ac{gcw4k-f zas^70t-y3N0?w?=D$pVHYM5wC1X^J?&a~03j8h#wxxG>}3_<2t6rz7-)7akIbJtKW zgsRm&PSBMC+yf{~e=#7kJawNFe z8ldxQLi$IxnI(-7;MSdO>F`KlK(WO6)4KYl2EHz@U7OzhZuF5AVc6IbE@vPl}%nIIsEV2jL8QqzJ^^8_=oR^^9nY1`fAOHJA=!qBuhqdTkR`9+^=EZ zK)ypaNa}NLpX*D_^M`Lpmi=_?2ojuA)wY_nC1xAGV>PukD3aFRa0fE>=dk2J--a@} zIBe}FC%e8f&H6;jL(~9W)fuQO;sU(*d^%?Hi{#Y_LzN0?KqNW-Lz;_3c0Qdb^v^Q8 zmAgz22!kdvho3R`1KfMaP$q>}nOO5fAaTd8IaAxFye+d&BJ1A3UO^ku5Mcp?40PQo zw>{$9sttJ5K0JNECFEq%n!YH-ePV$)9%TKlZH7nGnu`JMj4mB~D8#A-E>n9Cn8EM|Ef3sL{e#4PnWmd4&tij@__D>#~A2s`(}w@TOK%#i|xyw_f4{ zL)JulMP$W$hj2D8yQZtq&PuKae#2CLlI{gHkdepc7+Ze1aH(${;x%NWw>vHU8)|o4 zoozP*q{q!$k2Kl*8f*E%mWmCEwEm)s+p9$^w8f7iZPPi^#R~R26H`H#CX3u`QeJq$ zjFL=Tg7ac&G4GMly#k7b>+;u{??Kk1J`y}X{3$ie9ZPkJAz#pohL3)%l2(@|#K+YG z^g-qd=UZ7+VgXSuB){S&`a_lEYev>9O_+36s$LQnk~NWlBiEfv=~fD$cafHuM<8&} zNl4zXE$ppH;y99dibLWx`9hV+PdM&l)VwD^G2`6*)&0}uqoirkr1LjrBe1J!KeG!d zi|@sx&tkeB$?g9vei=vNOHAz@K?E+?UZ&BNUNYR}T+(Is3^aX!%%TkeYY7-$mwt5{ zHCMQ3>=5~GuL&Tlqg0>y7S2_k`9*Uf3ot8m8<`F5hgWbFWO#s2K))C6{RDX0*Oba} z#N6&VTI1Q*nV&QY{N6_$e5Vq{kfIedmcE6xdEAtERe+k)<_A~p?pYm?ec2TB5pB@X z9j+t~R#}PdyAuEM?pPT1lQB<_E-YVtj$e7fg_zAdQ_rSq#~YF?TV}EA-XCt=cHwii z9-7`ub3zL0*2><|R>*>H>{C5?RlWWi&1wmZ#yjZd$uBtOF%O_mDuIJ7}N zI`Mz?;**$yH(93@QEt#Q8kS9+W;+0u-_FOSBma8fp0R<$Ef(N+5NA-Pf(rHA2dSml zzIqzdR^OpdS-`NYLFVstB;RK&Gw1fVO~#LLccVuWj>aW3W@&c#y&?@dIxHAcR-XP? z(SL|3j_o_5btR-0lTH?qJ(;{jzF9ozf@iEc8V2bzsGDd%$2xp5Ry;`hc@zTFgEPy^ zFENQxhaFfm5YtO(NesZKn0=jLxe^a`&Y7Jerb)_>Q?QCn*ujQ_7w~N4RDlZ~-4{y!Jqaw%r1~VljqplF&!4v_IcSDsZl@V*plxnjGg#zkDJBmH7v>TEdgi^% zEAH`ac@2D>NROBo0HF(Vv48H8PFpZm+CJDkO~h2ygUetO1D~BJ!XyarDagVFSfh{W zaw2vEhIQ@I;-&|boHAW5>g)eRo>p^D5o7K|v3H`#Obvj_0dOWmO7qEBB-Go7wj~s) zx@qBJi#|`@u+N`aMpVfMC3-oR{e{7D%15ktzUkU2YJY3IeT+)UuA9L>)bd{YAMEM4 zl5V##tDrs$I7akF-oA!>_wUDo%dWvaDM}U0|B-zy$4IiYqyCG!`MVn5BM3OM{|}8C(n%`NQ3m|^ zLnHIKd6^P~4)Xx6mm5`VP61nFAMnZ?WM>k!kV}BXpT+3@RfS?ID{aaruOTo4HmeOsD-BeqIl_|c38>=4d%i_iv1OGdr%Sa?t z!M5rtcxizq@(T~_3_}@||H0+dyqoU%2j8$Fv0n?n`LZ!CsZry5gt7}3OQ43Q%bHvB z2<3K%i=i917rO~nS97>v@avRMZwy`^v!7uE&H}F`QDM?X+*9~)OU!{Vg_=32z>)OVwsunyoTV~y=q~5r8Z~Y8oI+~Vh->AM8#s;Cw8W!p} zf6ViDHito~MX<1{bv)^Z>|Gv-xiTN_zD+rd6u$I}tY>-tuu;7@kT0LR4KA{t>R4`8 zL@p9NUgjg*oYnq;ptqX>Bt3HLfx1w5mSCFc(RMY;00(5#{-M_H7-raeSyrcp$V+A+@F*l(ZHe??9A@KO^y=5n1>3snX{;?9S{MAhP_ z*~3cC0*2+>Kh9v@YeSz8704s)vE$fE$L2daPg-*Jy?bz2Vu&kg!KLp<}^rW)TmR`hGa&~@%WK2LSJ0&!DsId z2_V@o`a?{99V9#`qi@($muTac!ux$6DxGUr_~TKPUd*zeH2f9qbp%_s-U}GXqv_5& zdzcN)z7j-Ii6LzVSr3$pUJ;rSldCe^w4mP$zzeHpYGvnlFvphQvB1LvZuQG0k$~=)Y_=X#=0ZKZvuGa~<=v<@EtT$F8j zfShpBd`MG0k=y*Z=EYOKFFiqpXEqMknec-`p>SGmUTeP0D;r*8k%Fg3t;u%%>-?U3 z_xq$7*9J>$E6+%)J}}=$_pRIYjq4o@ELG1GqtBFvT=IYUonIl}jsbuM5sa|!C+kKi+~10$qhve|6Zo-HgtnEjqbW3_c7N0}-7+|Cva z#~h>r>Uw*yj{WJ@OUE8FF-F7PFvGSG3ZYF?{E{JrMzn%i*)CRSn*TEx9(GW>R$=0q z_%v&Sa!CIXcj>^MNVn^cTMQe9*n+*=FA$MS<>D9k!%x2s4IBwPc;X!OBeY7u*o(vB zEbI3q+MumspWl8LQ#X~I4?7`Kb`bcF3eLtvUvC>9jN-mG{+Em7p^?Mha{y^9Iq4J` zMxAyIo5*2#RoQMe#O+I!U(W3GG?V!dM(RC!Oqj5nN@+h|=5)71=PU=)RMD4I8 zc=LOtxCQsHEio=r_+n}^?w~k`Adz@2cT{*SxnkkDfBi5cn&oSB(W25tChUu+)jt4G z8Yq=6`tPV<(0$1z-B=Cr%TWhq4b8C{JZFtdQFE*fDyqBvGFjwbv7SEs@~N3FLZdr* z@ucPrcmC19rSzH*_hGRJGpPv|Apkta2j6>$b$tkgxEuI1#~H@_YJa0I*L;(-l@~Il z($%oI&oQJD6;qVj#4myVb&^Cdd1qEaC01kKY~ppC541)A;Ql_RROG-n%G*xHwRmoi zKka``Y6{%dpL?@Xr>w(%9ouZ)I(!x%Pui)hGk~PiYh1^AR-=*j`_}AA0GOKbJgHRl zE){FCA;{CPSlafA@7;VZ;&YgO!}ohH+inYMB7MM{V{FP8?%byR8b<2tcNd&vNI&9s ztFIWcqG*Bi;>{5G4VJE*`=mqLPwP9QSYt+hLF_|JNu}(Ag@-3>gUH$IP6ARl%cp`+ zl4)P}?~7w6xI6UUj%y|P99NgJAgdRDz_9pB_0%tRY*W(NRieiyXaHfSe;7@L#~i_Ks>`1P2X(h*^m6|Ra-lIbYyQ?-s9(nTYFCnN4nZIHF$ zF>C$p>+vOuR_F1EY#dm0qq@c2u*eue11)q~1jL(3#NKo@3J z6X){z&lq6dH?oM<;0K+1ZhJZo$b=EBNK+`u+W94qyEgVqU3=%`Xlea@35jZ%-I%!T zeOTnIcRLIhTcgE=TWV3R_ZXx28bIXU#}&>!6A^y&ui%baY3_0%T60eK7^XjUhh2x|cV~1*eX_b_x%&h=X?SoMfT&Csu;d9S zaa(;;Wl4!ygsVWzqGt$ZwF#3_t8-{fDh%y(V_*nm5Ol(;Mozc zq_>AXqaB$P;y%*xAJ%xuw%0X%2{oq3Otm z=Ut=h!`}66&&3^xTEkK>pPfLeB$b!`pj5$+8=GdCVYmhW=AO0yi|s;^12-OhYbGy$ z*%zYRty@Ek7wYn1&p<2-tRC>73$@z~Tr64}dy46EANgLhqy#daURIi1tpY(@Uyvg^+BMCVi zMRS!bDxVWc?hRm84(r-*vR$9!xD~BYa(U(|1wwlF$dwCPY}I(9=EQMYD%%Y3^W{_L zUG13e_*jx!KuIvl>$aF4#Hu{gy6YCwINVMj^Ub#{aoB~eb?LG>c-h=pYmr}w z6Q20m6o<6h%R4R8TOVHRFon_bp`eZ=QRA9doigyr+6$)xcCJkb54mJDTZH6^Hf&~g za!p@iG_Ji8{DcwI=~O{b7LZL*v8&!QMdb((c3Y5#Dt$iz=$i<39g-Z}1cPfq7+w%uqMAPV^_sV=Co#ndEo7u}2@1-8w9u(d;>@V)?+_=zOYi;8*Kethy zmk1Oa>n4rKhpLs0R5t|#g-xC^Z%9RuXX*RLgI3{ZTN#lpWsMDO^e2T1Cau?15giDP z=z3k4{&HP2IpndxuLT+2>134N4b^0Kmd6%**RLhO5zC`WYnA(@0_mE_MoKy=>TDV> zh+V(~1-X9}7eLen!dy2kLg;${dP>w=&4tirE+QRDO0oK!BdI}Yzd>CW$MK2vopz1JxgWPm}%F>E;=~d%vzx_A(GPU zZ+rS=iq1^}EhU{Y^{!z!@@Kg|p7OPkb-UF60uu;9!_!K~`4wj%wc%#bAnn|y;D-j# z_No9JwG*G;nO}k93F84i-+Rw2H48{ZnD9=2rpNl2x0YM@$%Gj0m;hPVn|a?kQB-Fe zZsU0(JUFX)p*eRDS3=+6W_9)%aB7jkEw%AxH5-~RzT(%NpcdK90(as0bQfY4*s&o; zltwMh_=tK|_H$}{y!?&w5zyhs0$7{pdoPPPXt0Mje12e7E1_JJ)Z$%2 z{EFNQ{s9ro#%lAa_n{l>o28D;u#3C&L;d|zjIM4yZ=Yc*~Wvyzk#yc^<+^h#NWk;Aecqwkwhykaqok@O2LBr_yxtNqi7Xtq>|w|IK6P+N-4*;bxY z17n3GqMn`=322Ps<^v#6TKBAhvP8I$2`2)*%XDaH`zypsXNe4i#}ND*G)K1abF`p>KO@y78!Gxrsoj7V-$*uw$UfnZ7 ztcD41cyuWD-LjpXCok#5NzE7EUZaTC3}2>m1nI@#}N&HpS=5#}D}1aGR0)y_l~y3gFCs2?2)X0Wi#(|tx8oH+Yk zXz%W5;uSD9*?H2*`b=ImL@;|7^KAe6m$3i@a=&JK7Bx`<1w2OC zUj>xwjtldlU3$Rl$&cIYIfeRb%=gEAwK2#m{!N)Yidy^DVZqXH@S^~UMHDODZ1I$x zewT-*wlo##=)hI>_S%gKe~`|)agi3(ZreOiQ0Nvz(zo4vKmRtyWE@xoysz~wB)}%6 zHcn`J$&%&78LXR}3CqqFHU8_t|PmjZas)+{|^nLQgkQ{drA)&%W<2RtfY#7 z<+=Ya6?ULAB$EyFj|S~8L*ukfEj#LK$EcgX>x-XMZmGaF9s_O}hR>QxEssp&zjm-= zNKQSZDKNs3{a?3Q39bH!9IX_5LhnDKXG#c@B z74Veh7Zy02dV3or3#GBiswT?8a!%i^qnreZN&VP(6mNA7)NWOxT3N4*k-8IF5R{-H zQ{)4Hv_ZUQU=!9Xz%=avB~3rSO~9M=_^>7eTDvTu5gp#I4#7m=NY+kU1v5R_2<#$K zSqtRv8c(9$y(WnDPPTRf>KBZzb>h57maX}`xKFdoJ)l?ZzzdS=_@scvvBG%wWr1x~ zr+uTIt)Gm9wVoB&-8* zy_h-4t>u#-jz@?$wl^w3?MmP*DBP(>eFa7SC0?^%w#euuVE3jB{h9D-CxQCnRji4f zoj9KJi$l7@q3;u-o?y9}AP@Ztu#(NB|jO zth0OXjL4BkHqI}5yy<}t#C_M<|96~k{IiaS16}DYLYYv7zaLMWz zxTyGI|6EKtMhJ0TPF`~KT>M3yfjP%(@s_B(oG(aoAIrk<{oS7j**hhL zJh1RzMkTI;J1Rzt;J1rP4lU!5FT0R3iMBA?*uD*gPOmlvgQ;ca%hEHvv@{^jUq$;u z=_7g|*)yHy44xxl+j*#;4nk+~GkQt54{qvW`zOycbbpxU5LL6-4`5_3D~#N~cClR= z7QVM|y76);h2Ae3 zJbYX^nTRN8g6z>LhQG)JL;1C+%tmd}X^;hMjRvv(Vi2&`>5Cf`)Q?bk-lmf;K(fT^ z#kzL=K*$HW&?3R(=fr;>+caxjbLj#q@eLKAruU~t4jqGkPaIC2>!Hc;XUTmwCIXVJ z%D5lQ{D_SWG|i#&dc+?7q4T@nnCsD=0_QwWjSjoOn`f|`p*ip?b@U($m9aAtDNpDvJpg6R4#Xt-doh2*Z1G~w&{0>cTMQMV!QsiuxCO{a4(-#+Oli; z!HY%KhkI71WT6?t{elkcK-bHF2+API$hQDEF$3Whkwi8~<_Xy@p?PHuprgc6c>%B0 zRPTyeU$(vgY6*ZO@_n`nF*xB*?u;gBJM=K?HJWX^fh^9|nY-Qsp&ma(&NUVB7}CK@Z+mDhccdI5C7Q za`|?sceM4#z1dT;AD8bTya3myEI@~sI+YFo5YPs~9e~<-mEa#x@-o2o;Ipn{U%l;Q z`L(qU6if0H`C}RBrk@3+j&dL((m;{}}i4;_3Ny`RmEUERWi&)ZW&J2m!} zzdaTY7T&B(eR07J@3`TT_IslC7-r5z|I9YMe#1ZVN1JpHH~kr`$nV=HWRYE97-e*b z%M87n)Yjr#$Li}l=F8BH*ezI}=UVWq#t=UKE{?n%(<9vdbun;oxcOjZ5b@iWOz&G` zf-o#^)ylK5mZ@1#s&)l$cAFt7G05`tGO3qckg~vvz_Kvl-!|!$? zBMY2E#LUN_hs!XIDhPgwEOR*E9Tq&ixhj2_)sNoop^sbZs+E}X!r&zR28|Jl2~d1F zGJ)q{bX}OgGlHD{{yFR!Ha0i{B;k+7m*QGVaYYDUUkyIqLcKxlSwJ1}3PjLxari?qQQowe%*B-4A566Elo2k4Xqy9D5 zT8m1bSl@jbSE!($>E$*4DvE3{b=wW?OgAffI4R1hSd3UNMWhXHn9A%HMgvC~O|@p6pS=+79lM)+M;q!T#Y^v3Jnw+_0EW_oMhL z&bJihf&FUx5=9oOPKZhLd%nSZXm(2%sY@dtHM|kI$lI?m)$IbNuz%)tR}pYSAN0>5 zV(mvoYVI1K^haGK^#mtd?g@{q z-?zkPg+vR;kkYL@_t3&`$}hdoXPpa*XJ8sVug#((ptc)={aQ@+d>*fy07U@J;X8B1 zLHA}V*HV!#T~9f?y?<5=cxBIZQHcq|AcLpyQf?vh_Vl0xJ}DJO+x#tAH}&w+K(|$X}r_mhoyTztr3Qu8VJf~nqvFf#a+LHM%AY* zOcS~aA`gt7K3O2ui*io%@LT0Ks%R0G;GVZWJjpweUyJJKQpI$Ng101KdzCSyY{g-? z-P8c!HtsXjd|2pcE%5<($NPwNl?}<`Vgn&rQ4G>lEqy7Q5$75&7x_vPyauk)Q~NO8 zmJTnIi&e3^Wofjt2QJn`uBWa%kJW!)e~qo1u$B8NZ=dO?dCqFK&~)ZOG6!UaE-DR- zorMWRm0bmzD$4j430#GpKc&FsmM5imH#TW9g#0qF)ZfMIC0?qlE}kZasQEJQ=agJY zPaLR|O&yg`j`y<&)aLWcLh z_*kI4K7{#n0a=FYJ?utzj6fF>XzMrw*AbVsqzg;_HBlCqei5d<)gXB&fx{_?enHCJ z!ZK*;I=waTL*G*e5W6u#28|6v*B#2(Cls{5cXbq8b|*?q#TPg*7}`5=d1~r$p<(%u zntjTrR$c63^g6nvkr8v>&=^Y4V`C}c!>Y1J!*k~o(X!dfc?vpXx!$m>OS~q~B(>Z- z89pxFJvcz3C^ODS*a}set=Bznobl8AF!@_?I=VsazLg?|Rovf=gEsHaoIB8WzQ8%G zQPw5G$%?mSQ`#P(J7Tp}jBxk;8%R#+lZ)Dik2-Mjj011S%KXWy?PDtjwSfB_O81iv z@7&ob^3|g1>CnZwFQ38TGSMo~Koy4ZvpFliJvdS^v+)y@>bkbyD_R|8_&7hEXac%k7l9WH&B%zKP=YZe8?Me_R^3}UKPq%TT z<@lcm1{j|!GY^p;j05uGa*t|3ET}-H)kj)?;A`zZS zM0#ho`g546Ysw!Z)r&7}37td99r?U=H#Igc7m4cNagZ^oGEvwBKeTE8bk(Cpw=9ni zWdRgzgn5Tc>azT8XML!p$ewDe%c++6)l+ybgAF*xjop#W)}yq*}n3d zW%cF8T?=G+2?)$z)DQGayqh)4-XdNs>m_gFGhu(88@_C&6NFx`j(3qL`0a+gET_`U zXPupu`bL-W!Kd$zpW{=M$c;)-h-+@F~8) zOfQq&?(ireL|%e}H{mno^o6UWb0aNW{*FTza!&qyTT^T(8slSP0&>RN-J{i*IDgViNm@_dm9nYLR--wF!wNY_NTk%*G523J5y% z=pBXHnM0MS4iTx(bD`yPsS=_nTITjKF;&UR-vxgSyGIx&ev;m|f*|30#DvR6V zuHm;oC3c?pnxpjRaA+0nrZc>mgn6~H{SDPogyt?rZ*B|T{zCm=c>u;0nlEW|Q*1YV zZn0HnQ*h``yx%5!J8zu^3&f_ zWbU+Lsnwe^puL&vSyd$t!H`hPug`wz!uaX%>K4O?>E%-mS`4|oGlRqXnnn(#6x+qE z$dA4Q0^555f_r=OZF?&kFJ)HF;L;OK@l!z9-{aLao*9HiMzSh-J;e8n7y6Am`yv$G z_r<}@ZN3}dz8%i57m5!|?#}2tKS?t{?FO*cW|;hB1K_q}zt`CO6He`0qFTRTjjUptr6 z<^?Q!uk0wB;mA^)F6PFL7Oz#8Jzp*@RHQ5&GtHl{eSrMQiJ3Wr3;{dprAhj5ryS8*=fpzAJm*u0cY2m*BX|dc$$A~& zxZxej>UQCwVVnlLD(2fBF;))-w3#cu_Gt=e>*@(Abw`)57;&L!&8NT8w~1nzk9TSG z{#NIaKp;V1j)~HkrjTyQW_c)NmgigV;a@B zCqVWG5jQE;B@f+1pcpeX%$aSiJFRVJ6<@qEEM_SPawSs1-U;vq5L;}mIPpDH_weRo z%W9=aW~|S)%-+gJTl@W?a`ItqLx(nEsQJIG2Q^gGegNgsYqyZIw3z;Or^cVq6c8=y%|S?i@1@utn; z6D)~vG$z9W^HV1l3r?^Kn+%z#0soM;-!LT9%AjjAeB!&<1qL!yj0a$it21YSO5Yof zI3?=)M7^=yDGdDhZI5%oz#C_3vFkgoDzWf4M?2$Zi-N|jI$4-RuhBfky)bpT7^I4t zbGjk9KWFmG!#vJp{CE+7B0%0*PGlH`sO+j>GBX)edeY-l`8{{FaZ8LI+qzUef@>qe zIswqv@n-xz-vu`=icIw7ss?0!YbsrjXK=1|kH0U>scrtJ6SLgtyMdS}d>l|8E@P<`t*Z@-RJ5@r|b_nZ)52Vi=Qmy+E!g!UmZ)1W0bULGc1TD zx5mfVqtBc#5N`dUUsCH#U}X1VAW=7x<&V;ZdP~jZ(brJlQ?%9FH?Y7Jk^m%U{H_FW zI&nxjeBcZRb__7&WS6z+g5Z({A(iCD2bi>N?7LV?T25< zk$oExFNis|22aF(*?`hL;mP?~j&5lFXGF*svuEWgM9hmp?PlGs)IBZW{ zBkoFS84m49Ca5#^f-!xo#)kTeTEG>7$gCi@W(8g)@ubcCHq+QomXb!K!;p0^CY7#d z&H^;+w`9JCggYTBn+chNE)Du61dgn#X!s|3e6Sw5Y+wBh5Dz3wQg@Hs9>sO(wRw-U zvSuZ1G#m`%sXmprDx4z>>QZE6t0Gp5oMBII4|qJ95jX;F>?A*Sf6F7C29ma(Ml5&H zH_Wqldh%7a0eKu&iVl2()JLKP?%kZwKaD2r$-$HrH6w2`UO2{6Mmj{@O#gnmRfxHx zr4jqI!$f=Sevj|vI(;;!0I!Q}qgEFO-u7fAM{GcA z*n$ZAR8xzt4$Q^&OTsa{LLHTR`<4THG-*{~JC1)}2p_ei)^5U7Zm{*R!6>W(Dg(n- z@}FxRqx*{%0h!5iDP=(tLLY2VTGDem)B+a~&u%N$>O;eJd#Pw_EBV|J#zRYex`JAH z;rTZ+`T7y4Vkg=q;+^u#`K?%6iB{osrVc5G;Io2X?i|r1wJI*2;J+shbn7D9&e5bc z$=zQ4__$j12A%X9Kxl9pnO7VRR?p@Rf~2 z$oXuW#cxxU4AKpBm9yL*cft%xVY;S;@NyP!_zoQEY>0QMk^Xun7c{i=?*N`t{ zrxI`*w7PrpubQeS`Kk`JIq$T(aOM02#rG}xEx+RGl*p?wTjD{ROuL=;Y?F5QLj+a` zqG{ZZBRC2Yde!Q^t6rUQk{a-pi5_b%vlWq;R3z3u1ulsuCx?1m<}iqv%nGV~ypg)- zb`d|8VGDG)bF3d4^j-*@TmPevw)CpDjC!~eNU62G`Fo*gkXSUVA5i2*w5}MG8&6d| z4+5pp1-v7Rq#DH*_QVlJvmCmUS&drKhmpY+q_-5busHK6NDO<2kPmM3E}_K22V;QK ziv92`IWfR#$9I!b&B~2$NsenH-c%MOIvPb|Hi47fAX7X=;MzBo^ zk43 zl6e4)RJK$5rFBG6dwR-V%Kl*eWjA^DX&~6Go>`lzvo#hh>ZqXoG^hI{I&iM*77LUk z4@EXA8{c`mp-hdhSwOMAnJzQk+UKdL65iwRequ*IzIPcg5CGCfT4y(c7heW~VOWtc zZ|TxioqK^s`{jm~PqH!EF}+oFZDTgp}cIM=%M!bT<0%zaxWaYPb#70VZ=OcfyYrFlPBIgH|!A} zZ5)2$0qL%8JW2BP9Syx$d;Do`ag{~86b_|^wwHdr;6+;}2 zgpb5yVIMcS^}ST@3KxnPrUW1#xxsyqFT9ie1@{$CZaX?_Fv%fI zQVDeYLcnc)E>)EjGLWew#t>>DKX`Y%dO`C@2Wq0VrwvAd ziw5=wpm3+-FBx}SpVYl`{5!OI@_6m^YwD|z^4CXIs0ZKih|gIaAR+oLrg427a9W`D z*)=h%ql60>2|AK!!8?du-UGfEAPNG|4b61a#}()J!pBF(*SoV@ut(DUH-WQ`haB_e zV;T~?hH~rsNw0tq1{$wE8Ag73w8YW_yEj-Y72k1*k|-x$+0>JDB&xZLE0t*HV8z$R z#jyw5U5sY%>!Yb#T4Y*EKVvr%BS)x?*jml);8Htc?fy;ujZMD^{oVmChsVd{VDv{0 zCpl4+Jb-vlK4^opPbM6*Rd_7L`!~a;4{Adfq8ucTUq_$+O9nP?fA9Zk<=X$5e&4@J z@8}%SK`0@osDvoI3$Z!nI0<{Ba>^mc95$Wa3L!&|QF0y|F^pIxE!2j{VP<5S;~Zwg zX4~gA^?iIkf53M?ZhPJL>%Oo1zOLu>ybj{szrWh5a<*ux(hD}CPt)B|r+tSQ%L_d7 z{>CGXwT%bXFsoR~cY=F9V5mSXY+omR#t2T!zcq;0LtmgY0 z+eEEdM8o{zFh@UX^!Y&EeX908JFIr5-!=*MHz!>FIoL}7z!envD<(2CuW%8DWqDy) z?gP^qhXokmxr;LiL~oVOyXr>^$C>axo4YV~;jWt$V0Pkr2MjwI&Y0LV_Ut9YnX*s! z0Y(I{fWy~YnFS)0*dC!|bl&;c?9r4XGdKj^7 z0D-Yp03o-$dVMbyHkVv_x-z%Ytr94vo7M47*)hL#r<>-Eay8eDr8#&A7@tibAawp% z0V3$spte5=1*zfNCr}B(L_^g6D(MbPGs;fo>~XA_`tJw|;qk=>S>UU(lf%iXhC^!4 zKF8TgT@7zmOK5hP47*tw8x8rNVe>@Yc~!kT_J;UZ#&I*CoW8;u>$#c~F-XEs#j}1k zt{oAa|9y+RBL@B_ZWvLujIqtmmR=*sy!e@0(Uo6NirH0;%H1mF;T?sY^A7P_fXLSt z0gw|4Dw)f*|KYhct7u3=cKT#{?&YF;Z#yUIpb7wz>ewzo!&~2HzlD?<3XxGxXFp`; z$p|mUkR+A^{9eQUcUjK1!5Y`?8%Z*hu;N53m$%+@kT(~+TR(3ZpV3vk7g@~1Bl$&jO1D@gZ2T{qlA=nq%zeR9bK4Pe_(p~QZLY2y9 z3HLTTl==m<0HX$!g6Z2iLRtE6Qp1(mFqxWtxy-yzk>m{M(M^CV)GdiXg4|QyD4{5= zo|b`6tR1T2&5PWH2 zrtsydbcW@{7Y0S6A_m58HD6XHl&)OL^3$2m!4Q2{ng=~`rKIqYi}Yi*$$~R2^h&~q zzPTn)0Q7hykTiRf*=`REQAj>3Bf@#rq&Z~`^#J$xZu)zH6CJsUQaAZ`;lG@FH)b!(WFxw7dG z{LM_A{DlN=Nci%nT3>~TerL7T>uI;WCZbNygpiAR3j8z@l%hMrB-dMBE^buokDhVg zi#;l>X$5HvK#;^?Q})J@qcRKGe>WfZsN`NHE)VP%oG78;2haRUHidpeUu^g+#6n{c zF&xM*C=WsfBbxtNp8j{GBtEcL=j4RqyVW?xQvdszZztfql4WVmzpip8vpx#B0d@{;91qYLb}a7}}Fn;N9y97poNo;P_RqNyW|h7L@z?t)ZqF4c1$BK`C*;Cl%gNP=>OWAJ*7=48e1|BiGO>~ZA zd4OMs{vh|;+^8(9tVn;9a%U}4b{=*eqIUXSp@Ir)2$L3T8`hEU)@5c+(5{)&pIjxH{|7MiIY^@*8I+4YPLGsQ-kd0S;gUEWn z?3>L4er0H^P9IV5pV|eWmZepAL>qwwZe;4(fd!xa{k&T{;S-Fa}*=F_%h*gW2C;t4_ zsgc@nRMz;|pcE_mtT+DfP9W10)QC>*_M#-Oxr`utbYWa_GixIUw3Z#5kKlwDt`puU zVp|s5V-0a!PHS}Q2^v>NYM_>)ZRi@(xAq~BAH%xw%WYu)kTGx^3Hi_bLVv56LphHb z=_5sTLM9>2%>^0!*isr$_$MTkRDAc3Wf>`JnJ3|l3JU7N1YR$12>Vkge}fiyeA=Uz);HJCKJq{n>$S~9>Y9398gs<=9eb8@~s9DIxk#IV#8caZnl+G^E7(; znbBLA9Pk|@Wrv%Nq_901@;U??_-rn2sZV?yNR_yM^CNh7&V`V-zb6B@n}lKZuXbC+ z;j33n`5z8tKJ`6T%miE{T@23pdN(fW*iMfes!D>C%nhQsw?Zul!5L?+PRim|cZ^2O zHKnunvn&&~W|G`2tx&yZ5GX+#vocwCg*YF9PLRW5%uqb$w^*hutsiWU(Ld;cS&BW6 zFpPWroraG)0Oca_!){hbq5P&q)5)%bC%71ltS*9d{zQ_A_8o(;pQk9RtT+%VhaOLt z0x}D_od9?iKi-Ng7Bea}qvQCifmZ+{d%6HMhI_c$d-Tg-YPW)x<=iqXgE3<#xq_B~ zoJ`UyEi6np%wd4Szd!DiJ$2BV(q~F+p}7Y7lu_BB;LVWpnCjWG7gIszW&yGMR4gB{})Qd6k|@wcYth!}Hgh&YI_JrMqfWcW8kINu`J z2gXz{uZj9FpN}-A6n-5>&6;lB;A*i)FM_zgTsUb6k%dTedM13@G-P1$yle+IkGTHE zPI~s`LX4N&3)~EnN7RJv(JLSz87HybKM_cgV%zO~G15*ZjtWT$iz*Zc*!dv~o3GVS zCov0rvg&Dh5K6IedNASO>?OVWdSxKT>#bop;<@kSd9RFp@^xaq1;MXlRXgqgeDuJ& zYD4UWSaO2QYz<1Ia&A-G=~sMOloAwZvvjbmQq#g{Ktn-!T{@H7A%)RPKb_qU8m_$7 zDP{4K7aC_;H@X&@G}aN8s|k=hjZ9`k`qPs7*~5ri`Ye1@*A4guzEdPtuat-ORFH-O zDGC%8Z#5RebJ1gc`C__^UnWkk&7c|6?`Fhz)!UwNM{9G8D*x z0KjtUm+E0#Z)OQ=xLmvJ5wY?uF&>(gGE3TFSdDPf)R~`+?I?MC)}S6;m8)0;u-l;u z9dJV~lo{3*niyqz7$?4&-E*i-po0iI9R6AB8rwrXGr)^>3nkhGdwIofC~ak@ zro`Of@zF7pXn7WFs0)6I|1M>XN5*ju(X+jHGiHt%>Wg`_ON7;)5O?rNO}`;NFv=2L1!<%)tfpiU^LF=(bX@Y>E9qgwYFL zP)yZ zMzx{>uK}BGdo$aE+qNw>iy~v1n$|1P2eFk37>GRBviH_7&Y&ef$|~6?I5(AkJJn@) zT}OcV?ygMAui{=Njzz`qKG_g>uPte#9>_b2$Xsy{wzuhZf@|4Wrhb>@_AHcxg3}5= zyV*1IG;~WQ+SB?swBml|$YXg-<@OWsc|CMK2T1QQ1j|G4D4IU}S2llO)Oeyk%`aDZ z-VxmVYiA_+ZgZYri1O*Q-gFhstZug6SOh(mI!y8$G7+E}p)ws>2I|!%NWAM|ti5`r z?f&$fYbt)RlLiY61NhH)FQ6w`+1;$SufF*9e$p{E`SDNUsO(ChLX(px0QUU zEOML{X~h-vIaqfv4_pUTrZjy{?d$2;M{B^1Z7-<29^4i)v8ER$JszfQ6--igJz=Sc zw-pXOXW~TbGNUIQJUeQ3V7ltuGR*1fV69>^8l@eMp?O*ZQ^AF4=cK?*%L1tDkG;EcFeW#yl=s#2`Q7sWg6 z6)jR6tOpWNw!7ua5_oJ;pVli^fKG})vdP{4kL(8Dg!fS(Caq6fw6Xq4Sa{3YXVrR8 zp#tOeJCy|F0J`qU1-)m~aJApduTsZ?jmqjCqp6Otx<#X7ryXV-YM ztWPUGYZ(mdyP#$yX;6AlbiOnMWWhvJBunh38hMe$|Ew{TxF0kQzMeObIO*nb7;p)Q zHo>r}!N%hL_X1}VNZYoxLm_Xfe~q>DMT}i&Vrv>QUantq6h$W=NGylmzwEGUW2eV( zNq<`}q5R-BXs4=j*hln>63>~91mjt!rG%SNgw;gin+Z;QSb`9~xI6Tv-fgGoLxP#g z)*Wy^OETKrR&y-ESH-17l!3zo`2RU&0>!Zh!O^FzmuxK8`{IH*PZ;JC;84uFfL;|3 zL002t6rg(rfsCD?!IxoyM8j1WML#87fDkuBGAy9MT@W*`SMuZhDu2Feqy9r?>xGLO zZ$Hf4pll=!;W@eZU>JopPf6dzrj)=r)z0XoHW#ntbx-O`w zeURpxo?PdK9n)xzI@XE2Sx?g}$fc-v^m~Hp>f5*S79-$iWAh_$6W>fp^6!Qf*ZXzl zd!s7(|D6=!B3dSFOXLm@ryTRQOGXJhBBp``xM zHaG6qIOr@5Np;3t3+}hZejL{Vw}!o2be8X#LGC=@*6Dhsv)!-tSH?OF@P7^h=nUXe z{ME20QPn#$JFCb*zmP`XVjvVSm&6H3<$J=9y5MczAB+Y#kK+uR=8$ZeB?1#KHWX?T=8C|U@2%?CKNh!8G&rQYpC&k zoN4x+>HM*8yUBf{1A8ZRB)xtg1WXFm>esM4irj%Ic4_uWf%qP?bat62m-Z|%i9Pwt z<@Mtq|CEAI!F~#9zHD553jj8qdm+#Tzh>j!Ri>ql4OgnOn5`m*v==scBq6%@WP6m` zIceFh0%(RzCkS;jax4TIn*R2<@(;LsYZYMuefmz+p1XB|TnJ$K^9op!V&e&O#U_mL zd5JA#-zCb^d*t>TZb>CbUFQyE(hP;St#=ibv2MK5)UJP#`0#4P;yacs#+#8d zuMy{uYgT0KN~MOA{BAVN-U!}fcFX&7o=g9?{_tX@HK6CPZ*K1aUwz^u3UB>gkV@5u z(!FM8xL*$Fvc^EbSr`8%*oBl+g+96RnPLEcY^I(R??w1zpE@$C!hv>S$SxPp_`Em< zlwbO%S#(hC(!O4+W?toA7@*%MFiIP(w$>J-YNZj*%*3=2Lb_z2``FR12Z081-o2bp zg~Tq!CTqD4ay87ca(j9~^m)(5RL@%v#`|`>j&+%!2WjSJC~A1=L{8docq= z)d2J7Bc^*~E@TS)eH-0GPR(Ytp_L$PcTO)bC1PZ0V>~yUel6HjmwdivZv#1AUgyQE zd$g+&@KWwn?XTaM5{Kkdz^9&9tL$(*Fucm`o({X`gW*v!Al#oj1kC))E7cIR%lMuAzE&-^ipxEQb% z3xDW+OAQMbbNtDHS4hi{SyX!7X>>99(8Fiu#6$Gjfgl~Rr+4J%$>Go1z4_&4 z!4dNpz)6RF`q1vRVANw9+lr?s=-lPO5&tv4VBF9brw zoKe6lG1ps~P(?I_33y?4pj%Wt;65J5*0C~2bj5Kb`b{YQ3F6{E=gww!2Wmd;{OmDj zaR#%HC5rA9RTo~4{FY%ABo$U%{uf7*41~9I$B!*BZ^;2o5lC%ggI%bkUJ2lK-~Oyn z|MM*rbimfZ@R~VN3kj1)&b%2Js`Gn(O!a5(4{KV2-BE;0_`}lF5%r6s#gE}6382uVZfc%9mb2Fa9PwyE#cQobdcp%}9prpDING0dC3G}V z9ouvz#UwJ2#7G2y$RA^YK~mL=M1VdUkP++sgX2>!K8R+Ip5arHu3?H<`|j{xD=p{5 zvnUnEv2H~=h5iFuJEXNw$R@GP>H!Al&lp;0Dl6NJT_JK;sve#>wQ%YLc5^R!}mVgopWYNXDVUWb(d;x010jlv4Fy(@6H3Bds5Rw+$-#=WhK#~0)-MwjAe+RdJS1N2WyFekvQQ-N_0tw*H7;}2i z%d6>*a?O5$qws0B{wgxY|E{#NfL|9x-F)_>j$rj#hkVB$o1&`!c_jI;8_{@Od^8f` zbgQzr6POH$?F$ZkxNnCmY9hC)u&&|cgCDt(vba0He*?w<8s#1l|BhE|euSz{Ur>NJ z9fg?Bx)CulNiSS-zhSgUUSt{k+l_9(1-p|)omonkhVmEouP>$_@Ukv1$~7 zoSJ>mvL$eG7&fblbx`@3pjveD-UHV%piwXDUN_4|uTA`73Y$_#qy6`({OO$r?PI~! z{_j6U=nb#Fj;}QR7^6Dw^xFOj(3Gf2b?Obb;mz?6iea;Y{o{qSI z`uXv>$?a8(JJxboGK=57_6AbQ{UZ{gzJl`|($Cn+37q2td*|1mNRsu-se7CDT3BvM z0@y-^8A@Y&N#1o!i`l(JtRe(y790|#4+~FDj-UE1SE0NVs`^kp{hwSF^-DlQi3Dj# zS?_)*(Q}hP6$0vO8*l!M?3a)$10kY&>Z_Wr*hlnAC_Y>76gibK;sZ2P&%aUU={8*? z`(Bd>RPHo2EZlQFUAiP8-`DIXt`GQIn(CKRWTjUS6-w~*6m}`=7 wzigK#B4|!od{X}+PtH)#%I*L6bH9nf-_}phTj7en!wH54nOGU4uKe@xKSj$%fB*mh literal 0 HcmV?d00001 diff --git a/test/vendor/knative.dev/serving/pkg/activator/images/routeTraffic.png b/test/vendor/knative.dev/serving/pkg/activator/images/routeTraffic.png new file mode 100644 index 0000000000000000000000000000000000000000..6b6cb6ed675bd3bbf159713eacc5369a04ea6abd GIT binary patch literal 28562 zcmce;2T)Vr+CGYkf(lUtl@dTiMWsm*LTD-~C`~~W1cHEw)PRIuqM$^j+6X-g3L;4F zEyhrO8j27gQUgRvKnM^Z36SJ&Ea!i|^PQP{=iZs?>~VJXUTf`D-}il<^(>xTF*6k0 zCBBP?hez;|(Rm9Vo^2o=o~^I>cK}aRWly^D@W5>@oj-dmXy7MX-}Bm4urhP_^P}Ad zl@ywR|3#FJ+Z_UKj73C5jC{|Y9gEz00k{gUBUfwNaDtnlY|nt z#f3>U74g4wQZKain}-A0gG?DIHw}J0-_aUiuO;WTk0;#--c}m?lwRMR)ZjGW<{I`! z(c!6M;J{Lz;vkZ>YVS_)E`gCcZiQw}lop8~i}uqku<`;0YhJIiigPuc{p}WS~XQ7f0rn{sYhtN>yrTmhg##qPTk*W#XFc2{y z9m3}iSJIQn?Y)GFD^A3-%7g=oJDq**;zMai9K%Z#rN?z^_hFE@Ej*v^AVjA+eWcu` z>eOyGPPGRV(8zb*vC85LGf`#c6|6fx>g(zXhKG|#8p1}1FmEF4G$>QO;}vCHZ&(A%3F9A|%^>VBOT@JsF~R6bZjaV3~g*yN$q1TE6U7g=`F)8glrsuZbRs6#dGszk{)c`tLDjsO+%SE<@rhgbllbq)^Hn;B zgNAdH&4RytJEyYY^|Ib%cXQCJil_XfgA%vQ-JbE(cPZdK&DZ*|zY*n~nu;{VU%MSw zc52pOByDXZip5-aS(FM_I!E#@ksN6WqfcD>k}=L^4+wN5BZeu(8n^4btt1P~yQpg- zm>6?Oy5wqOFDAwC!rei|%HGywMnLaQa(oACYH~FEw0$46a=6UNW@%$YgdS?&4*%Z& zY^h+ty+kKCw9Sg%A4d)IY^hVeQz|wTgn{2qm>8g)ha2*(*>7-;az^I)L367=D+cQs z8ZDhg^5@RNkF^+cLmlcT{g6=EDE#I6d6$jYcs7SSkl3>>P8@jRHpN0`aLPfFuJly{ zbLBplJpB$1xo&aY;xwkgU2kr^KdqqBL+{jKb)HWuQ3mr(Z#zQt_N^U>b|?&}E^=lL z2}$M`8o!!ZT+;cPy>KvFmJ!kfZEuCJ{&Ae=)rp}hwvwf@%B}~TL_2>+Wf6;E4`XT$ z_xvb3cJPCl$8b%a_5)fL-C8Ofdjnx-E)H&N^1$R%#=8yA;SbPtmM&8YH+N2LZF0Uo-XRs2 zQnCB$s*}VkI>A*iQ%Vi;l12po4;Vfn4+EdBK`O{MnnzVit| z?A{OCkS?+Qx+ftD3CXlUi~y+U!(vWW_MpF*X+t`N7X|)t0Gd-JOe5W1ghhCOq)Wp4 zV*}yF>=YXs|Az^6qAQ}-x(cQ?BI`qkj(m*P2QP_zC6eW5nmTz=d@4UJT4(joYhCiG z?%T5m6+J}=D!h(+LwUoyZa5yeZb)Ht-uk)HDGC;+&37@3Huyp2AI3DDgIaCgvk6Z^ zqRuQ@;>IS&zR@X2Wxm;+JR6=qqW)zxJA{d)E5}z2(!EiJ8*|#=;&%4A$-}gLBYnEm zqvQmJ_3!XSq>uXKibYV{vVk_?KVKUFvGn44&PGUOY*2Qf5YZE}iNQbOSDVKhhQzYf zbiI9oRK|5bR#APuuQ*8TGlCgHM5oH=YPd0f70>U&oe^0Eb;#hZiRz+Tvhmf7wJqak ze5!jF2ly7T&DFf`Nqcr0Or743d#jLd<@0{B3~6p4u18X7=#6?ao*QY9*?BS6A!#mZ z%XS`S%ILS=6F2YfK+i0v;#T^V%}ziH)a8&{+gC+1h$}*~_JIp#YLJ2ie$cGG7`V35 zYNq-XBR9-@2O|Zn35nhHY*vyIPfU>l?nBJ!_h76iqs85eVr%M?9udbNKL{W6;*R^t z@e5FojooNw^Va#6E#%AK9KFt?!ro0Y6M|;?65L)*SndaZAWac4(A*mhlev-j$y>qE zN$+;>KwgX{()Jjc%bdhm`!QZd;?JpV9AC5BKzVv`!w?;* zHp({y2?T?^gfB?dre{YQ=uu7)d3dnukRPBcq=ROwfMd*dc!DN9_gle zl75}fU1qin?#-+eDv&LAY>ulZ#(*G9>A@Rt_ogL*-5|}2yyz)DgA27=Lbrydu zy`UsQ*f^X-bL!e+f<$*lV+RB>|LI9T>vNnGuA8JRS)(M)cT>qYVjelK@KGbGbwp}E z*jpWi;ZYwJh<}Jwj#fS$m3zouv^0X{W-<|=h~2)y5!->H@*e}t)ichMtSBz?F=MnE zCxeW(m_oj>CK*aTP}Bb?A9y+UsaN+bf1&cYE+N5kIF5z*NZN^6C9WSZKY3ExH5GHH zXFvF>qws}K)q_INc@cbJvpPnqLg}U`z~{T52{U4_6)p=&kP7(?L7$VtwwHEL}P??cCQy4S#z)9~hM5}J^qr=alpGgF3{ zlOPuUB*e41+9Attc!l@vm+|*pQWSZ&Z%^0SRff^>fv0Je1Qu2YGR&4}ARlCS{=6VM zU3)EWqw#TCbC}+KI_qW_s=2^^3lG>!26_sgIrKdK{GRnna*WX4c|(+{x$`?MW_y!8 z(RzBiKroHugh--k4xuu3>iV_f0?>hus#>me)}|eBk%?@?J;9A!V~|_ePlx8lB0sXd4z1va7BdaMn=`_ zoD2{zz?9Upu{Xxvlgtm$8!f{-E;jJt1#DXjS;OAZ$i-2FXVuF0=oigXIUa+(!>YarF zG;*mnlkM>kg=o^T_sdYeRO?L#;K`+0J2~Xy4HkQsrl&X$&+DZ56UlJ1{X_@N)e-KZ)`2aCTuh{VCgg4pSc!KePiC;0FQ4 z4kztT+j7PSDB*u!%yzwU7u+4-4bYl+_qr*s9XzY9PN3yL z8~)dIpM1Agk?U;RSmjbgMG6hQZQuH<20a|IDK{2hRar0y=823N5otuU|oL@s|Kv z9<1aF(Ym*dO1%4N^rUsXTC4|+d%fArRRRP;kjL6ERl3Y>u0$cS)wg2A`7 zKw=gdh**6gB!CWHEQX2)=A>Q}ZyZW8Zl0}GXqi=x6_qph(26kp+f1LYI7dBBL{AcW zF;uA&3SHSVxYQsz2A+J5}g znafcIB-xvx7x;7%7;l~H+kt52owzbarg}dm>FA^98KUq)D&`~e+j1vFppI`GCgeR? zUooTcd;A}grNP%P#%g-|{DVFrC_gbZev_zQ_bfVh{Rph7xcfYBN?7LwC^50)rrm-@ z{@{I$vr)X_`y$MJsu53dV`dvgO2(47(R)N}KP$X`HMn)d)hW|Fjng1>!pLsrI*?(M z=dKZR1?L!YzBPPkw0Hm<@!10XD99^Zx%qlM$7v&@#ipR;5<~y3nuzv@`>$y$xD%@f zIYPZ;9NJaV*&X*q8i?^vgoVW{&eCkF9Uk6aUtH^TPmdFAv<{2cvm#!0J<(V>kx_N? z*L*ejOxp>f-g`&uVwr0mnaTx+ORGVSkeoM`YY{uU8=O+STOic#Y(&|zcblB9J=Lw= zyDs^9WB*#X0y4GFOl`zQ@YnoTKN4Gf)xXh|LaUIGp}3CXPS5fgjPW7)dMfQhmCfJTxG?nveaiH^ z$V{avuypU&n$)Tv*FE50E1HxEd#NNdP#XMexn4nE>OOjr%85n<$k-RYlzx^P(J`R2 zxhi75M}pj`DBK_>A02sUPVgVel77C)jx|$thK!AA@*9WiRZcD9Y1NO zfn};?n}=Vi2h2~u%JPAM((VJKRZRT5JIp}6Q#aSnHB_}@{QPR!>VTn`Dpi8Fl!D8M zy&evv*$S5t+hBJLYR)pX@Ag9+aw08T&S|j0jT5W3XO#%lD!r~Yz0n?`dl^AxD(fcT z-c)^0S|OL4<%)roTGrQ=q(7MZdG&h{YHJ0*lRXm87~HGve%7C0$|ba!b$`)Iz&R@R zxlJhV0cq)&u$}#99EseBxPD~4_qJ8Wrm;%@u*lQB)LMmfII2EW9;(G;dA zCNxetB8PNFL^B4|Ke9>;C)|UH>V%cd1gcq+Ds09&zW(RIiwd+y zDoMHNeeZ;bmoU_of3Fw(o_r?!2j?D?0)aYZ;Ur zr?y8z;OY2pMU@lAh^3egFPatLwI%{$(ep=xgz-s0g|u`ERvgx|WiiqE z33592kJp7m*opTFMtz#ZP$TV5Vh6J7A3NSi_}-zJmA$SGt=--Azzkm)#}7g~rymsJ zwUE>Ph-XYr6A01UQA1;m`zS-P)K{3Oet!8Aw+mW=@)rYd_!v$2n|Pd#Rz81azzwj9 z-Rkqr)e_CK(yhg|DjiM=#^RM04`bkq!S*MUTs5P6Gh1eJxnuvDULR)SU8K_E$Mp-N zP0x!0PyswswwvS}1dgLAHj{fPFvhkoFeDThlHA^fFV>lXl4xPwc#=Y#G#K4?8`dFPb@=fW48wSP0q) zk_N(D)dBZf+UX!EeD2eQo201SB?YW-8}C|W{aRs4FTGlVqC9B+cUHrEL>Gqz*U<;< zPZmQbZ5&TQ~trYgV6%4%?gS9D;F5lUiwz@i2%?_+jJzCPIsTy z?N?vdk1oBl(oCDDdH#cQoRcmbyw7gfu~Yp>t`u0xUtlCQ(I>z53zG=&wIe`%zFhzx z0AB}&X7?htHCF^&c#siRfIz<~Yz~O{F3=&`<6`YoZB-u-aNZD0=-h$o)E5HhI5YEw zX6?wx#14(O#$}lo&kT6gVlQZCoBMIKpK1g=kSD@BB#O*e|Pk*U0R+ z{>3tRNLYbXL#j5}9``1sU^-!JvHT4?bbkQl#N`QfWx@C#(wdN;T)3Q4_DQKPHOQ3S zx@59R?_v?WI70O>nb0eC;AuLYRUBd7uq5d*}`$Qt)g_lb?tKF?v-T5!?~vDBXdZMLF(lC^e5@X0=BfK z>7JS{f8>a{QzPcEC_*RNk9%>p-MS^gm(I9sQdfJ;oqOU+u>Bpww%J}$^4OJtADHue zOiNzWT1B2({Z&C*uN|85xn^Ot{cCpwB>Ycwc*a^rr^d-h&V)+hiR+A0Nt*smnA1_A ze0}++N>y%)b*p#xzYi?gH~}7V1ChUlovv)nb%L#E-ZnQZ{)ZTHkeE5c?`H2R_cQlb z_`mUXE{A^aD96{{T>h);8E-a@kkMiER-#U>*Xt7p21InviLc^+<`4|~>Lni{g0Dk~ zS2BfCV(#~^yz@prMO2Q3(nGOWT!i>u@O?jfG|@$pn!4ly-)YLd8@{&G7>2w65(q^a z<<=YT(x9s5w$5nWLKdCfD)^=UG8!|ql61u=;jAeT~11}e*T3^{s)vnC;=$|bo4Jc!cxo`EM-Yl zb$+a@_asm%+v2r8vMN2n&O>QlDKWttf|E$x)CsQ_9<8EW8{1>OUI4^`==%pZD0&#i zmRUS}^u#&fP^*gCYzdHF z5$jjtG!}Yq1EyWkM?&7VKz*nS^|-g**N7sj^!A9}q>oO_Y*7vrx^AfwrUAoB7G*qM z903CSz~}H*kE*QfYA~HCT9o!ASiP6o`80l_f?w8labV)`A)O~KL864j=(z&~8#qQi zOg4!df%r)eU;!B9c+UEJ=bLIJKUY@6y1{xLi-p#O^_gzk!*|zQp%ymBq2JA&+3@{z zBYj%a!Ve42nB@V2p#puvNe_*B_}Z{nBfY4SgpXgT+e_=34eBh_Zg!Q&B8#Bsh!x}E z3h2elo(apT%FP9SkIB2>FHwvo9oph*i6&)H(&O)wy$E#COzmi5*VQmP6Gnsp z@<7q05lwdXC(Et}1esg1Eh+IGV|UtSLwKL&F?CTdN?)WxUn@JMFAwbLdS;rjo{@vB z>2yc5q$QKsqDVF9$Lz|^YYGPQ(@ISUc+0GG%L3npsdZl!oiz9kL%j4wS0ps2%rZ9c z9sll$5bu_djuutydsip1${5C^FXFlDVZ?;&D#i)A$0_uOO9z2_bTXrF#Y4fB zYEvR8-Msda5Tco*E5s$B@6I)astaQso;)X0D%@>*mQwP*SQoOaG0Ka24hf{G*WL14 zTho%^yQDDMeV636Y)--ZmLCYlrrgJ?cY3w#pOA^2q-Mla%(q2#=tnrP?Q%Zxxc0;T z=IJYN5khshK>Q(eMo-4%sL&5mNx^LUXxkY}e|I9}MjGCAdu?SW;Uur!W3%a~>3XvA zRuess?$g7nkg(WzZ%f_}HHZ1_!27^`&>XAG6E=AxX&X{MKr1)9v>f8R?3C%}+vuU8hfE`DjOE&P8=BXofSl4YzYz}B7f*Um_F15iMgXS9e zlV+WFpz32gDRfn>!;t#Ik*W5{=}>pXKAufl19H|rqE8_H+X%$b$4GJl@x-Vx_D4Y1 z8@mLvTl2|N1XSTQ+?b7_-03)s@}pF*9q2az6HR^0I9DQIKF+wRX;^fSuvRT*SNXE< zgu_?*Dd?CfZ)UrDt&1x%Uu>va`yf5#F?9rb$HypRGQT;jE)s9STL8YEl%35k78_D4 z7AI6(@qCdpJ!*cSh?6C|pzGXz@32JyK?S zU9)wLrX)^gdx~Lm=xT{r_=^g&t(#zdi(COAkH|R+jkWDc(ed^%D!jnAYWlK2i@2r* zO+=5cz_YViANjYtiD`T=bJx3!6(&^NM5qFFdL;_iPC827R!kDWYK0tz;yqMoRV5p= zn7z-mrv_EnLnWL8L}l%DdPzOpX^3zE6*iNMb3D=VWw2Zx@}q-}N;YhiyPfy_z=SHi z1==V-Qm5TM3VYP-WFMF%3{#q|V1~<$;HxfNA<(#Hp6Gt3e)2(F-t6S`QY@Zm-FQzd z*QwFg$|w1$|4815+F!eH4V`P|)@V;m0XwHZnOwL!{X%K5N36i$UV550!PhmlOVfRi zn)Kk?`Ue};1l^zPGojvl9kyfpANmS( z$*_AW4au;5esz-s`%6M)?EdWx47UD4)c=Zl2S&4M5 ze)7h;J-zo5&u>~zpkySeg8coe6bbPX+Z~5MD*LHVILR znmr;a$}(x}@9{tni~`)7;1^{O$i*=DgBH6?boMGQDgw1Oxf7i)0HUg?auar-nkQ+x z*^a+L9r$h#z=M5I1P1ZACv;y%{9UXEfu0qmB_Ua>zTw9EFs?Ez#xX{v0o5B;*i<~n z3_l5TFw1@7rg$KuF0|{(hR9J=x6&*4Hz}D{R|GRkKwgKTa(i%ek5s=e41c6kKPGYE{|r-N@+U z8r$kM(S#SJ8e_bkRSh?M?+DiRzJ`-&hbvb)hP(BgGnqAwt+HugxCKjHBX$swxr$Xz z!7veG$82(y%Z*|%z|;@KjaPKW_?y#j#$(*H3J+o^sAkCS-qlOQ2#c^jR7P`+1W_?p zMFR6?iJhk}tn3szHBtM!TgYHA>|*YeJ0df6`i$-g=nyT&tcyL)KuJnYD0y`G){4>x zo|@g6Z}JkP`T#>w0~0VOrpCw5qT*TasKc=nNO5}d;}zRN=wc0CR|sEp;mSbVL`FQx zs|8Y(C7X=ageR=4e7vV?F}37O#46lX8bPlaF=D2ZCs&4)pe#lVgA$>l!}s2!B!U$l zGN{sV`E>iMaWz$P$=pbD`BLSeLZ92|X!r%Wprbiz60?koAdJS-c;?`EMM5;<8@V-1 zE}u0CVw_*B@qBtnO9(&G8d|aUtOjykYQH1>LPhwb>>2x~tVSAtmgac4Dc9jCLk*rF z(Y$`$JJ+^ZgK6!Ffwy;|Q2_Lb+_uoV12u$8SB9ar>rA_XZ69|q2ZKz5--e`*UV*t^ z3w+%uB@4j1x=yE5kHrDtn&aO>j^R$JoiyE`dH5};Cg(Y^M)pg7y(i&)ebBAWIap5C z^CceMObfONaI!J3Yait#mcGDB6m`xP50@yI^1fN`y2M_7fs<*-#pE-*)IN83mq?js zzXV1%{7uN5U01(%mLF!)_gzS5?WDGC!ye0K*LQZDweFmy)AG%${eZ4wW!(-Lk2%fP zDWcfC%Cr#3C$C>6oQD6J^ufD8nA9juS(9=XsOvF*EoTCra(!_pw8JzqAwQ1#M9G zY_2=65w1pPK-#QkGqf9n>s}GcbdEsU0_#`pd%tkRWe#Co4_<6zuoFRR5Zo95QPRCw z00^|-=g)dPr!Iui!fgSfYNQfG$72WDR-tL($@F=}{TZdXl9JPfa))|xV_scCjokuD zCBZfipA8yB8Z`bmf+?x)D}hZcPo?4}t)SjM)i8zDH1|-23$2nA)QQRo0H0EH|2BK(4^BTib+*=Nr=^>4xbSf){}DWqZY-p29bEw;ifhb0#oa= z8pNc@gRTzOh%zS5bb3lnomC+xG%r-#@u}$oTH~fx{cK|m(41th z4u9`pg?~ShG;5(UcO!D!CW}}jQ)7uRn|c~g^F-K@FWSV9<crBQv+^ni2-$$`B zk@vU2HkDU8NljGVb<+Aew_HDc#Jg4JYy54oV(;d)dH_t_&h;v}sxXTRV&74jdt@D* zF{67NJ#o$xm9SXLP+>QVuBt!>I%7za6SWTYt`a39fGkCXI8ZPWwGq+qWWx4jP6jEo zFsO5u)EF~2vSPpg##06-xYdJvy(nDmHq3N!#Qv#2YwsY(v_i0MWa=INNmh5>mpeXw zt96He0Gw35UDA}H0%=J3!zRMnEJ)fky?pI!$ic$ZX!71zoZ4Gr7eC|4s0QKSeDbYqQRs*~;KwS=4E7DbOh^y3@#(HyU@+xdb z^YWkzZ9m!OTOBE>nNvIhGtrJC76umC(?(jE{r)GuXIQtE4;P6HZuoklUE4?Zl`aqO zW37vHFQhox=F*&u$o<7vO(mkNc>c_=UwP$gF(l6MZ>$v@!aqkZp5wBxg|#5x~t%7?)r(Aj9Bi+ z7Vguzfx(}heq6?QtU=LSF=L$26Oyn-)E#vyVBGf14{X*5*^4E|_`C{(uQ@<9gegC~ zQn!9y0lH8V-V%!b0UybH(uue8Bs1E-yZV0nS5yGyP6$mSu0fGy3l&W6oi3GOk2P1} z=Xv}R^e!QeMZHE~khBOfXc*jBaKhm^qG(zjjYoZ_^^oH_IJx|S@_fox1Nu>k9lM5# zI`kAkEyKz&i)w)z@bY&M;qJNvGLFE;%wQ6##51kLgyEEtX=unXD1g_*pFnsutqwc} zMX+H?Edgy5^XoVnK5PzuRK5y!<#-2l`UuZvoaA#7FlSr+G@K1AA3BMp)zMz7|Rc0INP-FgY-L&*~N8ibO7=Z9R@<)630z6W zjfI-XqU9yk(x1+)74UC$8>otztAPG!2$Oi@t;Q*gG6;XM8W-e1V0;$i`!nA0lHhxM z>h@ok6rmN$+4%nRt{QrCFY=;dRM^$6x42T#wkk#N-en+?o{hNm zof}?=%J(l3a^h{3)l5})(J=L*CxP5eTVb2BYXR&DWpCZIJk?em7c7dJYeoB0|@jKt;&SU>aZ}3}qFzU%>c(E|4oNy4=-}V?F z?B3KRyk6DUWUGP5gQaeMzm);@9Jmm;+XiKe8Q>(>S~AnW6mh;Wv<7t3LAN!lxh469V%4y1dX!F(}XwX=YEtqgCu@M|fafy06_uQPa7c)lHQUj{6 zpF+KnJNZGIsjGw=WG0Ml`vz67#?~F++Opbb8}|`P0(KDXq=Ef6(Ljzgt3db3iaoUT zPo}c1~Qkg|%{F{2*?aPxZbr$CeIppiP{)(PG^nNZUm&Arh}I3I18IWwY~6 z11xwPmkhA*c5#W%lBnTCeW>oBN<&Xq#K3Z=Pnaj6tWp6H8(5;)>6{+u75qIZPX;HG zA=&>V80$%RCqoanXlINAD80nGN*yyO3ka-LxgW#>v(~r)8(~rud&$L-ovscD9xb2qrr17`xPNgbHVWS$S}B|{-}7KNW+F|g#aMC;(AanpE)uagrm;ZXHUEoPLqnD$BArNv zKQ#-Q|B-99G?hG3CQ7sI#Yt+AX(~lmagHjmItj_S)@D@_5CvCA!dl4YF}_QygK<>P z8-y#U=%TAqR)hwNIL13sxNId3eRA)z6O&=xqCPSq3M22$Bpt=L-IOkA)H?NpK@Hz8 zCDj~Mo!G;yc1DKQgAqX;4U8%f0%- zKIe*EsljP_g*n*$1Wg8C@9*UkCj{2JFm(!z0`_aUp|DtOK;q~x7Zzc}ry98G$HxfS zzxV6jGFwF34d~W$rPl3%d14U=ZPc?~8G;Vr-nDe{m90YdB9*UG>;Z{+kHg$3I zCnwe1>l*W*T(^2xgUJ`rMOMpH3Uhtz@#q!Iv*WraM$q*cGie``HFEkCDV#UP zAW4PWFeHGrsRe;pZ6*zEooP{ktCoYb8Mf+Ugl@t?WOAbN(Lri*pq{)%V`rKqy~g@c zahjyedl1MxbloQlv&^dKBCe(H^ZDXTc-1iyQQN5S#N+- z%1o*6XFv*k73*`}$65>6|52)RzM=I+40R-9dCU53L777UMYcNUUO#Ik7p4kLghO*upg4yQ_q+VtIgf;SQ7Jj=lXE|8 zVnZE`e2lu^oufX-tX-T$TN$qPJ-%P+^LaEeiRItAP}LlYftw*oC9!2ziDLV~cVrX%G=hjY{rAm}r|8)jP5yDq<}cVPOR=68pjT?#~MDl($cC z4i}ycl}(y+GNFX3L7dfz0#f@!bz6)dIA&{Yl%M=tMat;6^R2!`(T zTG=Dui1Ds#YSnqw3e@APO*-JE^J=6!p& zP8DSY1un(^s1X3ovi&cQjSXEQFV}Z1V@=l!3eH1n?jU#mT26GLnO&UOhzo8EkOyGD z4)VYKYk%eD&etAe6B0qofUt9w=J;J%d7$*WBsc^;uynFzeQyRW-Z(r2(0ZO|2?hL_ zEOhCHlOA>IdVOVe!G|glLK0eSIMBDmxp7dEfOI*v(MrQOWdy7trA3h?$)wgw9XA4m zM9=l|RfZC&;bG*Y0EyG~vk~&p5e#rPX@)yjtL|3Q-LMPpR4=dc)4O$9s(iwqs6Tj0 zf-;;BXv}oVXS>5r?-<5p_kvSOeK{Mlh zpmA$CVDR1>Sx7mN>eUjYt@6|Dbi1L-hE*(ygJm=*!Y`7|i^dp8h1)#7+>|F&5VuEN&@ubWBK<9W`U zW6+-dYlt6m`S-728s2MjwEzl^_a)6rPA_AnUvcs(ALcn;IcX__TE+T*$gsuZZ@y7KKrG&gqSYY9Z!zL@~*1FW6}) zjN1LrXd+hs&2i`Ls7LW`AFsLU0F-0FLA0?~z)QmmhV=>)V$sdhucr!cv$;b>yMX}_5tVeL@UGWF(k=9HhP0S*YsGzngQQBU+WeE zZ~>x}7Pj!Zx%A8m9_K442x9G+kEb1BE@FEvcY&%S3n^t_r?D|Rj5=Njzs8{F9EQGvEM z!S*`iwmGC!oUG;SnA`f41YTMlkA-;owv8t0#C=NqnhiLS=kpEBZg6AO;q3{4>=Us8 zSN?3;PzwYLe&|8$oZW?bSc0t`G61LpwX602^9u6Z{4l{4D~- zv0uEF{e&4>RV@^v1!H{V9(fC#kYaOnpkw41o>Rh-O!fJ8(acG1fjI5` zXz`H1e2Fy}@cYK8-Te=Yg?Q&%bO?qsBb#O+h%OY;c#U$DCD8ogk8kTR)PAP@#;P?O z>&LFCz8b6y?WtoMK9}MLVaC^m8xK;j`pob>>y(ZIXR~i0ekjMmnw|k6a-;cJXhngf zBx~E*HwP0u!$*MQ92C1yK4ts>hD31}CsR}lp2$}E)q<(cehV$sp0UDCC57{^MNf}h z1rEJjuk|(o0ti#y`$f$kas@Xs8NV}`M6l0-Wxwu`5JlJ5zr?b^2o*72wU zLPGGz^wG??@{SX4?PjK5lD0T~4+0pHiwzMuS{pq%$6#vuKaekke6EA|zHIAWwU<&E zx&1IP4*UK$ba5A8N!-u%axy!+>kT=cU;2-kXYL++f_MnA?|bgOkx`t&+DVPQTx&>) z{@bgr+Rx%axoLqU&pQ$4Y7;2&e_sU3o*u5YdxBAe|8a%L`u~R*^o;hFnJupsyZ3JW zFVOZsfa{+S;4i-ZZ|M0iwEaKD{>d~-qiYT9nqa)-dP-~vxfpJq8{qkViRTvP7K-?0C522Z(i%X+DCY_ck=uc{+Y=0 z^9WC|d9lIP;uk!h^l+e^Jag)sY|4P!CYMo8-2BMhTwoW2=Wl{fW?TPz3IF3W|6a;} ze&TH z+q7-J4tGr7!zywuZ8@Q7TY`@)U7OX9PH`i6+vfcGF=}2aJQuk%^^%+5 zwP2W}*uEVoUHk~=h!pNa;U*g1v9o9YCJl1vK`n5A19)}i22jQsV9!|oex)Iv@2>C} z`z__V;(BM&cxHCJp6goJGQbO@@k)r^^X=IE|Hw46Ym+}o1hQlzwf> zRl#}*Vg}9?ElF0ULF{U&9fK#H~AAI;1hR4)x+*+7Zs;1K1`EyIwa56XJM|{Jw-Pvz zu~ON#aofjeq;ewE@JdRIe=R8~zkSR&^_X+Y6iUG1_&I!rb#yy3wALA>>0D2cBvgwG zDu3k0iB>%cEt zu(-vg&f#ezu8B{O7#E`$J)CgrYR9al-@@|`jv0%r!$;H&`pSO3CdUVdCwRT;yl-MP zNU^uo6Creq;POGrv%L$Bgw&}9O{bARW7?qr@oXyYWok>ZBKG0E0}(zqpnY7+UNLf9 zykDcVqsHeifCO5i47D9NSh!zRxVi9`~5Z>NN z#l1T#ZL&yop@zX!Vg509P*$Fbs0I zPHh0ccDsfah?qp#Q?Mifso{{DxUowcBUZcDkT5ni+$Pou8$*(jU>&J_k2k}b{dvC<^gK8-3EUlm;I)RCB%pr)nkoVGy zN2MsqS+!n)%u&Rf87A_567{U1&p&S{j;Fa{L`H@?u(!M&=!!wfS$&c~=4W*U$jdjB zJEz<0dRPA#8N?mjm^-2rdl67m3b7Bklw=#QwV*9!z>aXg3|Ay$sp`UBt(nr)t7J;C z<@U%gqMF!A8wD~W{{B+2b&mRcE59i~jK%ts;}#3AJ$QR?*0PWk1<0roZM0AmxmOKN zVt@jyFc|9ra%g<_XVqL&xQIfOh-X5_sim0+^H2HH{GEb3rKnu0=Q%wC-(0OQ5j`8X zHFKST_)k7wZ7y~I9*Qa%0qe}F2vM+)gLUVvu{t9kbGXazNkT^^Q|FBUH)=3uT)J7O~1 zzI8JGZ=yCszNPs@v*kyXLr>O4EsT<-q`)D&z1(t%b>t+1EOb)dt0r(c2sK#-cHP#k zq(T;}5F;}Dt?FRBXA&G}C& zHy2YYJ^c~2m;eDn5mNP{&-8_48&O10Xue9Ahfk6tSK$k4dZ<%M83Goq0?ri&x}5S> zo)QjlrBRm;Zi=p>q^knmh`c~oIGsdrKBk|dy0YzfzF4g-Se|Y&Z+wi^d zZh(Vkmjk|CY-YKTNv^hztX_EMm}&d=4(eLuOn`(1fq0d2EZTmjrO zK+w^;kO8T?l2P&<`+qg_-BC?_OQWEaAc_tcKgo@kwO+b^W=5noDWlIz{Q`NJ5K291hUmzH+^7x|dAD zW-tHM(NQ((oUd9ZpEV&bnFHmcgF!%mlEdnUW-umgNjaZ;mMC3kjW(z2ONoi9s zt?qB&pjWQ=FRpAXl92p?_&uR7*q*xMCQdx&YNwft@LG&0k+Km(&2`QGY6u4;5_BGi z$BzgV(Sm=6PT;kZ5*I&LFut%SoPkCSV>5%=4ool5XjvQpir7H(xH`{_82Ay3;0Pf` z5qfS@Zmxyv%2o%zH-sIZJC3fIcDb-YCZJ>H?m#O3)l<9Aw(yIB4oBD-VNqy4xR@XE zJf4PxRpJeE0Z+^|@Ycx%W3;5{yIKGzyT2|H^uZBTXf%#;FUz$SEpmJqJEKh=8U996 z7r*E!x0>?%qliQF8dP*kWboF*zj|Ni%+#( zb-)0>yhxn-O^K`aH^MP+j&d1a_Ygb0wi=|`ILHY zNMH-+Pi7U1T=CvCdnJAOzj?&lfWXwoNhkMbH(xcL$~KN*mHY4moy_okp-?6UFze&( zW;xGlWB^e0W0E@>$V-u&+-I?q2)!PYCM&rP6v0q!Msa4$@;L-*u76qJ-EB~qjqogT_w=7%D2rc--B!YYfE_@4Rb z3I!17MB^;`Xc4LRW|_WTFujy?Lq)ztqOKr3L-gtLPvz++ zZ7_*>RrT%RPSH!Dy#oUW*$3Q9wk*q;MIJVvT3J0{%)9LTG^{0k=(Xvz4X!}>Bd?Eh zbvC&CXIrfx&ux*m*D^>6tz>R{CE=$I&F#kmQOSae-erGmgpJW*_#MirT$mD+xd!Dr zT?QDRJtW}GTUmcNZaYsmRp=r#xgRTN+FbhAs5Bxlx3KRD`hX__#(N2C1*88sWumjQ z(G6H5HLHZ5X>$0l?@oBmHVps6{@~1cAEf@rtN-g|051I5<^ym9I*I?&*5beW{Xky! z-pUJbJdCMFU)OOLV+6Jzjz`?% z&P@VZc}QHLfF%011rm&It;c;@tHV4s)JNZb;c}MDek_%Q-f9F21LV1QdA*q4B0|(`$^Lj9S^$mMX6B0kJDieGn|4f*Fpe+vyvJTN2 zczDff9>|V5DI{3@t;YVYRLaxWsQ8%4*4ili~i!%a2N|)*^rn z=S;=B`}d&nv00}0n^1AM>i#+q+had|5f+vD!PHP^x}Ni4qtH1Zs!&eXPt%u*66?&R z6+#0(*v&b4fNYQCGagJ}Elm@2FsLSw;$TuxndgSgB|aHqZWe5M-BS|sQ_uhqIS};& zOSa6TV`Y331i&V=9BHSZ%r*JJs{t#gOWzqFhYJ2s$~RL~6Nrtgz|t1kq>%wrA`{@) zJu@1HHOcZE*$TuBOD+2l7kln==FP=W87e7bgOovrK{SwM3=FAyOYCF-EUV;2O&qvw zFLMDOaxDDD=gVx1C;o>Uq3*)7d{>59KUY@7SnwqlP-g(_LGzVOGw>}H`;NKZWYIvv z?w@@!D^^g?6TJgeZ5lpsH|QiLBd109(qP{e?u>}V=#v4#5&kgs=H5ehR}cb2fxS(t zOiu&8$}J-xFp1v+s@pgp>d$q82oO2G>hAhBI$Oj7gTD5B^yIGNObX&!sH>^M>mz;W zM(m(|p(vf8EeT=7b|3OAL4nU&T3)QuK0{7cDI?d6YOYLEOiT7h*=P-WnRpCTLNVO1g;Q8a&^$1V!>T@V1PTPO z4FhM07L=Nm_AOn%x?3nOpdTF~^9SY`?8r_zwl(l~)tiMlU|5X792`U-xG24JWvjdz zY6*I?Hw%tTX}ETvRv^@-zOQ&EMnW>;B3MWPh-#fvafbUBA1EFWVd5pLVjYO6~z_BK`Ff73C6muO`o`kEK1+V7P1V z*VNmmH~4Pldk=463edyll;6*mQ04W*mq&iVuCJwKPK74zB&}*3sdnwootj~*3raQc zGvAY~*O3^FWIHB;VpLL{9zCuEQDW7`@1&mZBtB&Uq*t~VKBJ11TV8>K2z}m1^l1LF zIEGu-ut7THgjgWWs2w34jpUsI9QqE)d4R)au1{Rcuc5$}-G}DZvflzEi*sIR$A^ZU zLk2wOK1Zt$3=`nOC%%UL-O!+chyJ|6kYvvQfD}=4>T*`uL;!C;=X~+1vLD#P&sqrz zs+)ok1p^|w@i@(fEM^+V6y?*b@77fq;EONDW;p>)IaxH%sutqj2jy=w@UTlb0yY71 z10!LTv6?(Z$j?|2Z|wV#QYOd69hlt!ej*8Pij+G~e4HL!2;*BxGr#WI=446&)J^Gd zh|g?4X#$-uzNaZ!hOyPkk~~3v7Hsh!)(z#(uph1Fn;9us$ElK6n~1*=y43aqSbqY_ z$tw3|@Dn=~eW$QGO#4eR)0yqu&00!`Y9Dv-Dn3es*y`qLnm)l2xnk{_rv)=o8%ITe z^Yx8*M0_kz=o6-@2)+Mau;KtFc9JJ0Smhtz@`|oQC~+{ULhQtweJig6xsFx$f>Q6b z*MtL{xd4^GPuCv{Y2X(ZUy+33CmKD7{o4(p%dvy91bYiSP_@GI;3R>ANuuUsASYUH z@Dx>l99jsIVh1ef7tLFFxx9G;NzfABr4P`&A` zGPaaI^K8zaOq+$YlXygRKmhW$P0+=_c?aRm&Q2vH5uxh`D8F&R`&(3K7j6y=rtZ^O$#7E~X zaed$qd_zVJyr%p^wGvMY7V`&qGVuJU!v(4&VE^-H6jE|#axtzYUI0f;gIArzV*dZ$ zK06}*Ga`se;{87(YX5Jf$&NXrfg@mAwRuR%2gsM+LIL7@h!1=db^iYu_V0aHz=H!@ z8VxVtH<9huQv>!ue8{q3-HYFsJ=+o0TF1eOj#x%`Utsr8+M$ML9HUs$7Z1S+FGl=g)DQQ&!l04igqXy~$}DDLMbCct!4721=V7u#H7bVAlE<0>JS>Yvim$T8&gm zCR0Jt@E6D{`J;S+$FT1DX6ALBF}_1I6KcBYgd{YJlFB1=WO!Bd{d#c9egT~;RCf}- z715vvH8>D}J}T9bMBetX9@wWV>sx=v3#A9T6~_FG(zmfik|Br*rmk zgVfmyx|D@r9~@W6R>!%26*BjluEn^ELippNmkKzuOXZpJtTu7xkq!D$@lcGA#W?oe z8&^Ihua7%+t>thZ4&!pCc5D{=;T7bE^FwwbLp$v#k+o$JX_I&swEe_PZ2eHisEIN9 zV=co#YOnW?D%Sm9_=u{^>uIOr!#1PeqOme>vFZ=e*$L6kuYef4t1O6eTbTt;?Lz8Z z*+QA^%Rex30zlnAgEhC2$;UlbtNM>%4WT~20jn{*Rvm4EO|i<{Lx1qllV5sj13kCj z;<>8dS37_*saWdN$tW@s;)ec~9xTwK6Nze2YyrxcwxSh{{l=sXX=>lchRjNIdB7mKWEUOgh8+(TVDt^v*{3p! z#2iy&fa(BE-*s?VW9AuaeUQt!t*@xER2!fC_+sSHGCr29nC7t`gnI=jU&wx^SzVr< zvvkO+!V90Ls|k8N#|(+Jueb)hne?IezkGZ-Ufg-a8ujVlKgCvDoOFBrDEZ<3s?L(D~o9^2`Q*st@OCi=@xrFoo(b=JpL5tWa~OWMpAM5e1I z7c7)F1(-0QA|KZuh4Q)ux~Fy4?P^(!Uw>~3<1CsH9@W{+HL>(CA5HVj+IDM+3Gw0X z&#)Gf^?1P|AYHj%Y9l8E#O!m*TGVVxjYaHwU6tN_J+^Ht*SmnTxwWu{Z*LPNd7Ed` zwGl?gqsa#Rr9%Slrr%|wejE(^3kw60oTCWw~038DE zZq)30pxlovE%(_-&JF2_CssDJh)PROBG*^GmV?nPR%{FQyuBVa+gSxv(egelKh1bi z6ssxAp3F^F$<6}u7FNnM!+sVdhF>N3BBT5gyuAc>Z!ZTn^V<})I1e)OS!@T7_q!~i zP?0h#pWAe^#>yMBtPs8Av|{Vbc)Z5xPnV0|+$3*YZeOcdSb_5`AnPr0IremH>uPB~ zPO)tf6tCv@jj=*IN3Y{fs~X{^N(y}`BKCW2BYg#SY2{;hbqL;xy`4qvIM)wR;f6>( zFJsk^WIVU+9yzrA=1MW#JCO)HyI~O29mKoLY|$!nh>H+Sx$I2kc5(b^9cQPy98xOt zEnVfplw*doI%GvL{eE}Yd${!8L{#izeBz`sE!EBUvQ4W7@Mx!TxF*xDNyS?=tg{Lc%)ZJ`cfw@V?tfe#m6Uz=9kmAbieUdaA zuhEdUMLIS{h9N&P+1ttevSLCUPNm%09Ti0lPEFKg*swCP%K<;XhmjbC^bVPE3UFR3R zOAVJBrJ1ZZs0drV!Fw%CFopZnPw;+LREZ&L_{wW*w`0eP`^x?4?NkG2PsJum< zX=K!R+|EADrnR(r-9k^;=&4!Y_jE^yTfm#~#pi`l#%j1d>y~k+zO}}aW{OSY;GccY zJ$_7VKGE~i!V<*+cR6WPzFM9WN*ijXBW=G8ugYlJyF^zh@SLP8n4}aG+8)yoIx*6; zt3YIO159SNQ;doWOTk@K|1`dC(3>RN~E{cW3ZDq*aL;FT`&5n*PDjxVIjFVjvVU}@qN5r~-A;V23?BH4!qt9NC zyB3)kr+Y1LXEcpZp*~gRgsc{Rr$IGq(bh$4_E<7-O1u_WokXpFCw(Qwd@=;Gr(cu4 zHVp}zNV}RAcaKfuv9mEKw9l%uHr$N2sMWrOt#QyWZs|fO+b_K$emB~Hy`b87hqzPt z1IWsYqdUIut2|>)s~pJg!Nad+J|LNSbfP7x*FgEI^10ep`d6d|lEPM4X~#JVA^8X`$N1$wz%F0tw4yhI7H zA5Vf7U6Cy)H1WMGwr*?@P+lHUo`%WZ^>K|^%&;erGhhv_W}G79;0yQdV2}De9_m3x zp4!$q51l&c9;H|`k$vp0uSqSb&xjyrG9Q9I+2V|pCEZ1xv=urw7GoKysX(EO{NNFh zv8F!ugb3Kut)SIZn#OfA*PCjOgma?T*yd4?T2bQr{KtUOD`KExIMGhb$SSwJvqbhz z#5D(_&pTa4d;4BSw0c{`=WY8iAwh3@KI3&iJ(H{>*0Y@=lBZ=M~we(*=#5 zsz>L;p-*nJC>6Q)lzUfLesy80vM^T<>Mo_DsuB`gj(<}Zqz#JeP$K>OQgH#oiNXuc zUThF0{-hcvs)F-`DeqNsN5G)98= z#b?WyD?TM<#XD*s1{Xr(a$OZ3>#PNjWX_iaFgB8@3#ZI>3&sz)FJN z^oq+@rRfJddZt~ygKhNLv)!8{D1`_Hs-;gW4jL3h<)8TI1Da*cTPgA={as1EMvoRi z`wER<>_iMb*)2IfzbX?b*j!)!h;z;CsT$B7EP32?dTCZzVAq8sx`3vihYFRGZZSrQ zS^mwCKhdTwxb(j4TEe1O)F{VTiiNRAp0sgO#j@^i86Gaz3x=qX8aV6a*$t22H>*2t zR~+vLzT_-ew+1kj2-e5)QmdRhe44uo>Bv6}|p8rW371 zM^H{WnQ!c7U1!gog&d=8G0r!+j|*WLC1W0(U}`EM6UdHwhpqI6#hl%UYR{B=cFe1G z1-}TlXuTYt{mqZqCmt_JsQG`@u2%I8I>`XT{qi3&J{0U;9LZ|ZhRaLWnLg6aokl=3 zBL_c$mo0f0lBV8kewK*}0j`ViT_JWXXUkpwb-IG`VMaJtC+LsG-H0?Yhk!AN>~(bQw? zwikNmkQ7Z&JITh(%@V#n!W`o#ClfCFE*uHh18QX1Zh)PAUx_9lFNmLrt-k0yOf1!g zfA6lZ=w<>k%TFv;(yROm*jHsx_z@baG=)xA@Z5(Mp~SD>vXyc17@yQMzXwhz~<|TV_SOj=yGe@3 ziSd%kVf|hui}r8NSL9H=2F~hw95fY|y4^(QTuwT7o?8^>ar5Z;E@h1~TtBzX?RopiPTC7T zZ`|eG7-OWR_x=jZ8QzC}%sFQ(&n=ibVD$LWbd=*=h@&1cH5HM+BT)x84OPNrBEwE4 z!i`a`u7G~x4r22>A`m%|-8*MaFp;DGC@)bLF%h7t$%(*h5*L8Ch=R_a-60y~1tP&5 zvk{K|XD?hyBk9nmrh?oBIuJ>`)i_6O!2xb8COo?fA$k@MBJU^n|M=|B=(MwUF+?*& zRXS&#Rhww2h#+_Et`N-<0sVpZsi>JrJCv$a|NCSg*}(s6vTXc)$PCdLv{15mY+(rc=nP;s@CPw48v$SD9&X}F z`k?bv)P&KZ%;Sl6OhM#-o&luVNjpQvF?;DB!lhmC=lq9^7$wO62{i!3N*y~|qS?Cz zv;P9$|39DxK$ktO0MV8uvLj_C{Y%H2{I z;KY5DbM_)jp*$m%NkfgaT3ASy2llsMTT7o8IqhQkao#SXT@9u6@HBS~AAclGdrv#T z0w;L#Zneb{Uv7u?n|H5{X#%Qj6Id{o_R_wu`9=k0Xwfaa#~0~660@$bw@vN5;8y^v z6R6rTmJ!9yRCwQ~|H0#WNzMR|$AL2(yP_{r6MZGt5I)>7y~YNKzC1Tlezr!mNAPf^ zg}d;&yc-Ik=&+Yv&H;~IR|wpfr2SB#k|}cJoGDN|@bhXQ#>Fzrt4R)hZB{teo1PzO zH~%^bb0F?vgiOGu9Z#D$$oJQzHnsbz(m6Zm&*T5f+Hqc2&R~3qF9nATy1H%o+>FpH zUe8Z(4UuSvn~xSaw^Xr(#u!5{W5hDtxni_((zo$b^L6ZAW8%c}6tC<}pt&%1tbTo+ zk*oxtnr^RQaf7X@1MRLWO~2a(#<=Xs8XlbB!xfbC{XWXj5xT&fWeZE7FxYckqI5lT zaRfcm#P4&xi=Gg9`Cw>{k=oo4F`%0Ow(XGP20s+La&J=867Lk;9YsMKF}xg8Jz1W1 za0~bGy*(xEC%=e!CqZJLxYyhjYBYBgzeCw`F+WLnO^s78Fx)t3$UA4t{Aj6HZuEK-%{JA-ih$Vr7-`U)j_(&kius{{rhs!!Ng<8r*khZb+~=>zYs~E zJT>58D;QH8MlJQ9@;+_%BI&*~ zY$gZ2nHmzM(F$g>R*K$UKwrWj`Aiu@x6ox@wwd(K|IrMmzE&PAex__bR}l`_YmXrp zIdE$!zK=f4BiuP+fYor1YSq8J(E7ot^)`R*Z11r?D5mEvF2_yIxN3UjIxUp|4RO1= zRE45S-R%W*g%0Bzh0K3(+u$HBoq~cZBHQCMpv#P%Q`Uf8<|EI@XA7g*Y=;N$E8-rn zq$|o~L_Td7XsYL+Mio&{em!kgdd68Z6hyMmvrkRcmnvH*bhxA!)Asey_2uIX9=ADqegk zO_nMWEZeIvwO5I4pY~Fa)BD)QZGHUW#RJInAD$gxq#|wZW!tmN(_ox2#cMQc{?Bsl!gJ)+6UrW|K}XHYn}a*`2ubYfG_PEbC8 zM)zkF1iz(m@zrYA%t*b6@nGm$^m;>USo4^D~IBA)lXV{ec(Nf#pA-S25Vp5E*mAh zMxnAz9=LYcAc}{%$dp0l5#Dpfg=8=#KbsH0h`uGMeAE2!HP_D zxh?NeT&sS8Ixgq@pFm8O~xMq3Ay^+i(;io>3P&VHF5PDs)!chqf2jt-%61ESwDisI}>$6 z<9{-YOV U{Hk4cpD==|qNYOm{bzyy1rH7){Qv*} literal 0 HcmV?d00001 diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/doc.go b/test/vendor/knative.dev/serving/pkg/activator/net/doc.go new file mode 100644 index 0000000000..aab1c4287a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package net contains the Activator components that govern Activator's +// request throttling, routing, and reflect the cluster network state. +package net diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/helpers.go b/test/vendor/knative.dev/serving/pkg/activator/net/helpers.go new file mode 100644 index 0000000000..ada62d7f7a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/helpers.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "strconv" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/serving/pkg/apis/networking" +) + +// endpointsToDests takes an endpoints object and a port name and returns a list +// of l4 dests in the endpoints object which have that port +func endpointsToDests(endpoints *corev1.Endpoints, portName string) sets.String { + ret := sets.NewString() + + for _, es := range endpoints.Subsets { + for _, port := range es.Ports { + if port.Name == portName { + portStr := strconv.Itoa(int(port.Port)) + for _, addr := range es.Addresses { + // Prefer IP as we can avoid a DNS lookup this way. + ret.Insert(net.JoinHostPort(addr.IP, portStr)) + } + } + } + } + + return ret +} + +// getServicePort takes a service and a protocol and returns the port number of +// the port named for that protocol. If the port is not found then ok is false. +func getServicePort(protocol networking.ProtocolType, svc *corev1.Service) (port int, ok bool) { + wantName := networking.ServicePortName(protocol) + for _, p := range svc.Spec.Ports { + if p.Name == wantName { + port, ok = int(p.Port), true + return + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/helpers_test.go b/test/vendor/knative.dev/serving/pkg/activator/net/helpers_test.go new file mode 100644 index 0000000000..ac0cad2a7a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/helpers_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking" +) + +func TestEndpointsToDests(t *testing.T) { + for _, tc := range []struct { + name string + endpoints corev1.Endpoints + protocol networking.ProtocolType + expectDests []string + }{{ + name: "no endpoints", + endpoints: corev1.Endpoints{}, + expectDests: []string{}, + }, { + name: "single endpoint single address", + endpoints: corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: "128.0.0.1", + }}, + Ports: []corev1.EndpointPort{{ + Name: networking.ServicePortNameHTTP1, + Port: 1234, + }}, + }}, + }, + expectDests: []string{"128.0.0.1:1234"}, + }, { + name: "single endpoint multiple address", + endpoints: corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: "128.0.0.1", + }, { + IP: "128.0.0.2", + }}, + Ports: []corev1.EndpointPort{{ + Name: networking.ServicePortNameHTTP1, + Port: 1234, + }}, + }}, + }, + expectDests: []string{"128.0.0.1:1234", "128.0.0.2:1234"}, + }, { + name: "multiple endpoint filter port", + endpoints: corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: "128.0.0.1", + }}, + Ports: []corev1.EndpointPort{{ + Name: networking.ServicePortNameHTTP1, + Port: 1234, + }}, + }, { + Addresses: []corev1.EndpointAddress{{ + IP: "128.0.0.2", + }}, + Ports: []corev1.EndpointPort{{ + Name: "other-protocol", + Port: 1234, + }}, + }}, + }, + expectDests: []string{"128.0.0.1:1234"}, + }} { + t.Run(tc.name, func(t *testing.T) { + if tc.protocol == "" { + tc.protocol = networking.ProtocolHTTP1 + } + dests := endpointsToDests(&tc.endpoints, networking.ServicePortName(tc.protocol)) + + if got, want := dests, sets.NewString(tc.expectDests...); !got.Equal(want) { + t.Errorf("Got unexpected dests (-want, +got): %s", cmp.Diff(want, got)) + } + }) + + } +} + +func TestGetServicePort(t *testing.T) { + for _, tc := range []struct { + name string + protocol networking.ProtocolType + ports []corev1.ServicePort + expect int + expectOK bool + }{{ + name: "Single port", + protocol: networking.ProtocolHTTP1, + ports: []corev1.ServicePort{{ + Name: "http", + Port: 100, + }}, + expect: 100, + expectOK: true, + }, { + name: "Missing port", + protocol: networking.ProtocolHTTP1, + ports: []corev1.ServicePort{{ + Name: "invalid", + Port: 100, + }}, + expect: 0, + expectOK: false, + }} { + t.Run(tc.name, func(t *testing.T) { + svc := corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: tc.ports, + }, + } + + port, ok := getServicePort(tc.protocol, &svc) + if ok != tc.expectOK { + t.Errorf("Wanted ok %v, got %v", tc.expectOK, ok) + } + if port != tc.expect { + t.Errorf("Wanted port %d, got port %d", tc.expect, port) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/revision_backends.go b/test/vendor/knative.dev/serving/pkg/activator/net/revision_backends.go new file mode 100644 index 0000000000..a8f515872e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/revision_backends.go @@ -0,0 +1,482 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/network/prober" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/reconciler" +) + +// revisionDestsUpdate contains the state of healthy l4 dests for talking to a revision and is the +// primary output from the RevisionBackendsManager system. If a healthy ClusterIP is found then +// ClusterIPDest will be set to non empty string and Dests will be nil. Otherwise Dests will be set +// to a slice of healthy l4 dests for reaching the revision. +type revisionDestsUpdate struct { + Rev types.NamespacedName + ClusterIPDest string + Dests sets.String +} + +const ( + probeTimeout time.Duration = 300 * time.Millisecond + probeFrequency time.Duration = 200 * time.Millisecond +) + +// revisionWatcher watches the podIPs and ClusterIP of the service for a revision. It implements the logic +// to supply revisionDestsUpdate events on updateCh +type revisionWatcher struct { + stopCh <-chan struct{} + cancel context.CancelFunc + rev types.NamespacedName + protocol networking.ProtocolType + updateCh chan<- revisionDestsUpdate + done chan struct{} + + // Stores the list of pods that have been successfully probed. + healthyPods sets.String + // Stores whether the service ClusterIP has been seen as healthy + clusterIPHealthy bool + + transport http.RoundTripper + destsCh chan sets.String + serviceLister corev1listers.ServiceLister + logger *zap.SugaredLogger + + // podsAddressable will be set to false if we cannot + // probe a pod directly, but its cluster IP has beeen successfully probed. + podsAddressable bool +} + +func newRevisionWatcher(ctx context.Context, rev types.NamespacedName, protocol networking.ProtocolType, + updateCh chan<- revisionDestsUpdate, destsCh chan sets.String, + transport http.RoundTripper, serviceLister corev1listers.ServiceLister, + logger *zap.SugaredLogger) *revisionWatcher { + ctx, cancel := context.WithCancel(ctx) + return &revisionWatcher{ + stopCh: ctx.Done(), + cancel: cancel, + rev: rev, + protocol: protocol, + updateCh: updateCh, + done: make(chan struct{}), + healthyPods: sets.NewString(), + transport: transport, + destsCh: destsCh, + serviceLister: serviceLister, + podsAddressable: true, // By default we presume we can talk to pods directly. + logger: logger, + } +} + +func (rw *revisionWatcher) getK8sPrivateService() (*corev1.Service, error) { + selector := labels.SelectorFromSet(map[string]string{ + serving.RevisionLabelKey: rw.rev.Name, + networking.ServiceTypeKey: string(networking.ServiceTypePrivate), + }) + svcList, err := rw.serviceLister.Services(rw.rev.Namespace).List(selector) + if err != nil { + return nil, err + } + + switch len(svcList) { + case 0: + return nil, fmt.Errorf("found no private services for revision %q", rw.rev.String()) + case 1: + return svcList[0], nil + default: + return nil, fmt.Errorf("found multiple private services matching revision %v", rw.rev) + } +} + +func (rw *revisionWatcher) probe(ctx context.Context, dest string) (bool, error) { + httpDest := url.URL{ + Scheme: "http", + Host: dest, + } + // NOTE: changes below may require changes to testing/roundtripper.go to make unit tests passing. + return prober.Do(ctx, rw.transport, httpDest.String(), + prober.WithHeader(network.ProbeHeaderName, queue.Name), + prober.WithHeader(network.UserAgentKey, network.ActivatorUserAgent), + prober.ExpectsBody(queue.Name), + prober.ExpectsStatusCodes([]int{http.StatusOK})) + +} + +func (rw *revisionWatcher) getDest() (string, error) { + svc, err := rw.getK8sPrivateService() + if err != nil { + return "", err + } + if svc.Spec.ClusterIP == "" { + return "", fmt.Errorf("private service %s/%s clusterIP is nil, this should never happen", svc.ObjectMeta.Namespace, svc.ObjectMeta.Name) + } + + svcPort, ok := getServicePort(rw.protocol, svc) + if !ok { + return "", fmt.Errorf("unable to find port in service %s/%s", svc.Namespace, svc.Name) + } + return net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(svcPort)), nil +} + +func (rw *revisionWatcher) probeClusterIP(dest string) (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), probeTimeout) + defer cancel() + return rw.probe(ctx, dest) +} + +// probePodIPs will probe the given target Pod IPs and will return +// the ones that are successfully probed, whether the update was a no-op, or an error. +func (rw *revisionWatcher) probePodIPs(dests sets.String) (sets.String, bool, error) { + // Short circuit case where dests == healthyPods + if rw.healthyPods.Equal(dests) { + return rw.healthyPods, true /*no-op*/, nil + } + + toProbe := sets.NewString() + healthy := sets.NewString() + for dest := range dests { + if rw.healthyPods.Has(dest) { + healthy.Insert(dest) + } else { + toProbe.Insert(dest) + } + } + + // Short circuit case where the healthy list got effectively smaller. + if toProbe.Len() == 0 { + return healthy, false, nil + } + + // Context used for our probe requests + ctx, cancel := context.WithTimeout(context.Background(), probeTimeout) + defer cancel() + + var probeGroup errgroup.Group + healthyDests := make(chan string, toProbe.Len()) + + for dest := range toProbe { + dest := dest // Standard Go concurrency pattern. + probeGroup.Go(func() error { + ok, err := rw.probe(ctx, dest) + if ok { + healthyDests <- dest + } + return err + }) + } + + err := probeGroup.Wait() + close(healthyDests) + unchanged := len(healthyDests) == 0 + + for d := range healthyDests { + healthy.Insert(d) + } + return healthy, unchanged, err +} + +func (rw *revisionWatcher) sendUpdate(clusterIP string, dests sets.String) { + select { + case <-rw.stopCh: + return + default: + rw.updateCh <- revisionDestsUpdate{Rev: rw.rev, ClusterIPDest: clusterIP, Dests: dests} + } +} + +// checkDests performs probing and potentially sends a dests update. It is +// assumed this method is not called concurrently. +func (rw *revisionWatcher) checkDests(dests sets.String) { + if len(dests) == 0 { + // We must have scaled down. + rw.clusterIPHealthy = false + rw.healthyPods = sets.NewString() + rw.logger.Debug("ClusterIP is no longer healthy.") + // Send update that we are now inactive (both params invalid). + rw.sendUpdate("", nil) + return + } + + // If we have discovered that this revision cannot be probed directly + // do not spend time trying. + if rw.podsAddressable { + // First check the pod IPs. If we can individually address + // the Pods we should go that route, since it permits us to do + // precise load balancing in the throttler. + hs, noop, err := rw.probePodIPs(dests) + if err != nil { + rw.logger.Errorw("Failed probing", zap.Error(err)) + // We dont want to return here as an error still affects health states. + } + + rw.logger.Debugf("Done probing, got %d healthy pods", len(hs)) + if !noop { + rw.healthyPods = hs + rw.sendUpdate("" /*clusterIP*/, hs) + return + } + // no-op, and we have successfully probed at least one pod. + if len(hs) > 0 { + return + } + } + + // If we failed to probe even a single pod, check the clusterIP. + // NB: We can't cache the IP address, since user might go rogue + // and delete the K8s service. We'll fix it, but the cluster IP will be different. + dest, err := rw.getDest() + if err != nil { + rw.logger.Errorw("Failed to determine service destination", zap.Error(err)) + return + } + + // If cluster IP is healthy and we haven't scaled down, short circuit. + if rw.clusterIPHealthy { + rw.logger.Debugf("ClusterIP %s already probed (backends: %d)", dest, len(dests)) + rw.sendUpdate(dest, dests) + return + } + + // If clusterIP is healthy send this update and we are done. + if ok, err := rw.probeClusterIP(dest); err != nil { + rw.logger.Errorw("Failed to probe clusterIP "+dest, zap.Error(err)) + } else if ok { + // We can reach here only iff pods are not successfully individually probed + // but ClusterIP conversely has been successfully probed. + rw.podsAddressable = false + rw.logger.Debugf("ClusterIP is successfully probed: %s (backends: %d)", dest, len(dests)) + rw.clusterIPHealthy = true + rw.healthyPods = nil + rw.sendUpdate(dest, dests) + } +} + +func (rw *revisionWatcher) run(probeFrequency time.Duration) { + defer close(rw.done) + + var dests sets.String + timer := time.NewTicker(probeFrequency) + defer timer.Stop() + + var tickCh <-chan time.Time + for { + // If we have at least one pod and either there are pods that have not been + // successfully probed or clusterIP has not been probed (no pod addressability), + // then we want to probe on timer. + rw.logger.Debugf("Dests: %+v, healthy dests: %+v, clusterIP: %v", dests, rw.healthyPods, rw.clusterIPHealthy) + if len(dests) > 0 && !(rw.clusterIPHealthy || dests.Equal(rw.healthyPods)) { + rw.logger.Debug("Probing on timer") + tickCh = timer.C + } else { + rw.logger.Debug("Not Probing on timer") + tickCh = nil + } + + select { + case <-rw.stopCh: + return + case x := <-rw.destsCh: + dests = x + case <-tickCh: + } + + rw.checkDests(dests) + } +} + +// revisionBackendsManager listens to revision endpoints and keeps track of healthy +// l4 dests which can be used to reach a revision +type revisionBackendsManager struct { + ctx context.Context + revisionLister servinglisters.RevisionLister + serviceLister corev1listers.ServiceLister + + revisionWatchers map[types.NamespacedName]*revisionWatcher + revisionWatchersMux sync.RWMutex + + updateCh chan revisionDestsUpdate + transport http.RoundTripper + logger *zap.SugaredLogger + probeFrequency time.Duration +} + +// NewRevisionBackendsManager returns a new RevisionBackendsManager with default +// probe time out. +func newRevisionBackendsManager(ctx context.Context, tr http.RoundTripper) *revisionBackendsManager { + return newRevisionBackendsManagerWithProbeFrequency(ctx, tr, probeFrequency) +} + +// newRevisionBackendsManagerWithProbeFrequency creates a fully spec'd RevisionBackendsManager. +func newRevisionBackendsManagerWithProbeFrequency(ctx context.Context, tr http.RoundTripper, + probeFreq time.Duration) *revisionBackendsManager { + rbm := &revisionBackendsManager{ + ctx: ctx, + revisionLister: revisioninformer.Get(ctx).Lister(), + serviceLister: serviceinformer.Get(ctx).Lister(), + revisionWatchers: make(map[types.NamespacedName]*revisionWatcher), + updateCh: make(chan revisionDestsUpdate), + transport: tr, + logger: logging.FromContext(ctx), + probeFrequency: probeFrequency, + } + endpointsInformer := endpointsinformer.Get(ctx) + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: reconciler.ChainFilterFuncs( + reconciler.LabelExistsFilterFunc(serving.RevisionUID), + // We are only interested in the private services, since that is + // what is populated by the actual revision backends. + reconciler.LabelFilterFunc(networking.ServiceTypeKey, string(networking.ServiceTypePrivate), false), + ), + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: rbm.endpointsUpdated, + UpdateFunc: controller.PassNew(rbm.endpointsUpdated), + DeleteFunc: rbm.endpointsDeleted, + }, + }) + + go func() { + // updateCh can only be closed after revisionWatchers are done running + defer close(rbm.updateCh) + + // Wait for cancellation + <-rbm.ctx.Done() + + // Wait for all revisionWatchers to be done + rbm.revisionWatchersMux.Lock() + defer rbm.revisionWatchersMux.Unlock() + for _, rw := range rbm.revisionWatchers { + <-rw.done + } + }() + + return rbm +} + +// Returns channel where destination updates are sent to. +func (rbm *revisionBackendsManager) updates() <-chan revisionDestsUpdate { + return rbm.updateCh +} + +func (rbm *revisionBackendsManager) getRevisionProtocol(revID types.NamespacedName) (networking.ProtocolType, error) { + revision, err := rbm.revisionLister.Revisions(revID.Namespace).Get(revID.Name) + if err != nil { + return "", err + } + return revision.GetProtocol(), nil +} + +func (rbm *revisionBackendsManager) getOrCreateRevisionWatcher(rev types.NamespacedName) (*revisionWatcher, error) { + rbm.revisionWatchersMux.Lock() + defer rbm.revisionWatchersMux.Unlock() + + rwCh, ok := rbm.revisionWatchers[rev] + if !ok { + proto, err := rbm.getRevisionProtocol(rev) + if err != nil { + return nil, err + } + + destsCh := make(chan sets.String) + rw := newRevisionWatcher(rbm.ctx, rev, proto, rbm.updateCh, destsCh, rbm.transport, rbm.serviceLister, rbm.logger) + rbm.revisionWatchers[rev] = rw + go rw.run(rbm.probeFrequency) + return rw, nil + } + + return rwCh, nil +} + +// endpointsUpdated is a handler function to be used by the Endpoints informer. +// It updates the endpoints in the RevisionBackendsManager if the hosts changed +func (rbm *revisionBackendsManager) endpointsUpdated(newObj interface{}) { + // Ignore the updates when we've terminated. + select { + case <-rbm.ctx.Done(): + return + default: + } + rbm.logger.Debugf("Endpoints updated: %#v", newObj) + endpoints := newObj.(*corev1.Endpoints) + revID := types.NamespacedName{endpoints.Namespace, endpoints.Labels[serving.RevisionLabelKey]} + + rw, err := rbm.getOrCreateRevisionWatcher(revID) + if err != nil { + rbm.logger.With(zap.Error(err)).Errorf("Failed to get revision watcher for revision %q", revID.String()) + return + } + dests := endpointsToDests(endpoints, networking.ServicePortName(rw.protocol)) + rbm.logger.Debugf("Updating Endpoints: %q (backends: %d)", revID.String(), len(dests)) + select { + case <-rbm.ctx.Done(): + return + case rw.destsCh <- dests: + } +} + +// deleteRevisionWatcher deletes the revision watcher for rev if it exists. It expects +// a write lock is held on revisionWatchersMux when calling. +func (rbm *revisionBackendsManager) deleteRevisionWatcher(rev types.NamespacedName) { + if rw, ok := rbm.revisionWatchers[rev]; ok { + rw.cancel() + delete(rbm.revisionWatchers, rev) + } +} + +func (rbm *revisionBackendsManager) endpointsDeleted(obj interface{}) { + // Ignore the updates when we've terminated. + select { + case <-rbm.ctx.Done(): + return + default: + } + ep := obj.(*corev1.Endpoints) + revID := types.NamespacedName{ep.Namespace, ep.Labels[serving.RevisionLabelKey]} + + rbm.logger.Debugf("Deleting endpoint %q", revID.String()) + rbm.revisionWatchersMux.Lock() + defer rbm.revisionWatchersMux.Unlock() + rbm.deleteRevisionWatcher(revID) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/revision_backends_test.go b/test/vendor/knative.dev/serving/pkg/activator/net/revision_backends_test.go new file mode 100644 index 0000000000..3ef6ebce53 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/revision_backends_test.go @@ -0,0 +1,1104 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "errors" + "net/http" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakeendpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" + fakeserviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + "knative.dev/pkg/controller" + "knative.dev/pkg/network" + "knative.dev/pkg/ptr" + rtesting "knative.dev/pkg/reconciler/testing" + activatortest "knative.dev/serving/pkg/activator/testing" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + "knative.dev/serving/pkg/queue" + + . "knative.dev/pkg/logging/testing" +) + +const ( + testNamespace = "test-namespace" + testRevision = "test-revision" +) + +// revisionCC1 - creates a revision with concurrency == 1. +func revisionCC1(revID types.NamespacedName, protocol networking.ProtocolType) *v1alpha1.Revision { + return revision(revID, protocol, 1) +} + +func revision(revID types.NamespacedName, protocol networking.ProtocolType, cc int64) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: revID.Namespace, + Name: revID.Name, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(cc), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Ports: []corev1.ContainerPort{{ + Name: string(protocol), + }}, + }}, + }, + }, + }, + } +} + +func privateSKSService(revID types.NamespacedName, clusterIP string, ports []corev1.ServicePort) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: revID.Namespace, + Name: revID.Name, + Labels: map[string]string{ + serving.RevisionLabelKey: revID.Name, + networking.ServiceTypeKey: string(networking.ServiceTypePrivate), + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: clusterIP, + Ports: ports, + }, + } +} + +func waitForRevisionBackedMananger(t *testing.T, rbm *revisionBackendsManager) { + timeout := time.After(200 * time.Millisecond) + for { + select { + // rbm.updates() gets closed after all revisionWatchers have finished + case _, ok := <-rbm.updates(): + if !ok { + return + } + case <-timeout: + t.Error("Timed out waiting for revisionBackendManager to finish") + return + } + } +} + +func TestRevisionWatcher(t *testing.T) { + logger := TestLogger(t) + for _, tc := range []struct { + name string + dests []string + protocol networking.ProtocolType + clusterPort corev1.ServicePort + clusterIP string + expectUpdates []revisionDestsUpdate + probeHostResponses map[string][]activatortest.FakeResponse + initialClusterIPState bool + noPodAddressability bool // This keeps the test defs shorter. + }{{ + name: "single healthy podIP", + dests: []string{"128.0.0.1:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "128.0.0.1:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + }, { + name: "single http2 podIP", + dests: []string{"128.0.0.1:1234"}, + protocol: networking.ProtocolH2C, + clusterPort: corev1.ServicePort{ + Name: "http2", + Port: 1234, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.1:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + }, { + name: "single http2 clusterIP", + dests: []string{"128.0.0.1:1234"}, + protocol: networking.ProtocolH2C, + clusterPort: corev1.ServicePort{ + Name: "http2", + Port: 1234, + }, + clusterIP: "129.0.0.1", + noPodAddressability: true, + expectUpdates: []revisionDestsUpdate{{ClusterIPDest: "129.0.0.1:1234", Dests: sets.NewString("128.0.0.1:1234")}}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + "128.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + }, + }, { + name: "no pods", + dests: []string{}, + clusterIP: "129.0.0.1", + }, { + name: "no pods, was happy", + dests: []string{}, + clusterIP: "129.0.0.1", + initialClusterIPState: true, + }, { + name: "single unavailable podIP", + dests: []string{"128.0.0.1:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + }}, + "128.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + }}, + }, + }, { + name: "single error podIP", + dests: []string{"128.0.0.1:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + }}, + "128.0.0.1:1234": {{ + Err: errors.New("Fake error"), + }}, + }, + }, { + name: "podIP slow ready", + dests: []string{"128.0.0.1:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234")}}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + }, { + name: "multiple healthy podIP", + dests: []string{"128.0.0.1:1234", "128.0.0.2:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234")}}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "128.0.0.1:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + "128.0.0.2:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + }, { + name: "one healthy one unhealthy podIP", + dests: []string{"128.0.0.1:1234", "128.0.0.2:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{Dests: sets.NewString("128.0.0.2:1234")}}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "128.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.2:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + }, { + name: "one healthy one unhealthy podIP then both healthy", + dests: []string{"128.0.0.1:1234", "128.0.0.2:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 4321, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{ + {Dests: sets.NewString("128.0.0.2:1234")}, + {Dests: sets.NewString("128.0.0.2:1234", "128.0.0.1:1234")}, + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "128.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + "128.0.0.2:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + }, { + name: "clusterIP slow ready, no pod addressability", + dests: []string{"128.0.0.1:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1234, + }, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{ + ClusterIPDest: "129.0.0.1:1234", + Dests: sets.NewString("128.0.0.1:1234"), + }}, + noPodAddressability: true, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + "128.0.0.1:1234": {{ + Err: errors.New("podIP transport error"), + }, { + Err: errors.New("podIP transport error"), + }}, + }, + }, { + name: "clusterIP ready, no pod addressability", + dests: []string{"128.0.0.1:1234"}, + clusterPort: corev1.ServicePort{ + Name: "http", + Port: 1235, + }, + noPodAddressability: true, + clusterIP: "129.0.0.1", + expectUpdates: []revisionDestsUpdate{{ + ClusterIPDest: "129.0.0.1:1235", + Dests: sets.NewString("128.0.0.1:1234"), + }}, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + "128.0.0.1:1234": {{ + Err: errors.New("podIP transport error"), + }}, + }, + }} { + t.Run(tc.name, func(t *testing.T) { + fakeRT := activatortest.FakeRoundTripper{ + ExpectHost: testRevision, + ProbeHostResponses: tc.probeHostResponses, + } + rt := network.RoundTripperFunc(fakeRT.RT) + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + updateCh := make(chan revisionDestsUpdate, len(tc.expectUpdates)+1) + defer close(updateCh) + + // This gets closed up by revisionWatcher + destsCh := make(chan sets.String) + + // Default for protocol is http1 + if tc.protocol == "" { + tc.protocol = networking.ProtocolHTTP1 + } + + fake := fakekubeclient.Get(ctx) + informer := fakeserviceinformer.Get(ctx) + + revID := types.NamespacedName{Namespace: testNamespace, Name: testRevision} + if tc.clusterIP != "" { + svc := privateSKSService(revID, tc.clusterIP, []corev1.ServicePort{tc.clusterPort}) + fake.CoreV1().Services(svc.Namespace).Create(svc) + informer.Informer().GetIndexer().Add(svc) + } + + waitInformers, err := controller.RunInformers(ctx.Done(), informer.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + rw := newRevisionWatcher( + ctx, + revID, + tc.protocol, + updateCh, + destsCh, + rt, + informer.Lister(), + logger, + ) + rw.clusterIPHealthy = tc.initialClusterIPState + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + rw.run(100 * time.Millisecond) + }() + + destsCh <- sets.NewString(tc.dests...) + + updates := []revisionDestsUpdate{} + for i := 0; i < len(tc.expectUpdates); i++ { + select { + case update := <-updateCh: + updates = append(updates, update) + case <-time.After(200 * time.Millisecond): + t.Error("Timed out waiting for update event") + } + } + if got, want := rw.podsAddressable, !tc.noPodAddressability; got != want { + t.Errorf("Revision pod addressability = %v, want: %v", got, want) + } + + // Shutdown run loop. + cancel() + + wg.Wait() + assertChClosed(t, rw.done) + + // Autofill out Rev in expectUpdates + for i := range tc.expectUpdates { + tc.expectUpdates[i].Rev = revID + } + + if got, want := updates, tc.expectUpdates; !cmp.Equal(got, want, cmpopts.EquateEmpty()) { + t.Errorf("revisionDests updates = %v, want: %v, diff (-want, +got):\n %s", got, want, cmp.Diff(want, got)) + } + }) + } +} + +func assertChClosed(t *testing.T, ch chan struct{}) { + defer func() { + if r := recover(); r == nil { + t.Errorf("the channel was not closed") + } + }() + select { + case ch <- struct{}{}: + // Panics if the channel is closed + default: + // Prevents from blocking forever if the channel is not closed + } +} + +func epSubset(port int32, portName string, ips []string) *corev1.EndpointSubset { + ss := &corev1.EndpointSubset{ + Ports: []corev1.EndpointPort{{ + Name: portName, + Port: port, + }}, + } + for _, ip := range ips { + ss.Addresses = append(ss.Addresses, corev1.EndpointAddress{IP: ip}) + } + return ss +} + +func ep(revL string, port int32, portName string, ips ...string) *corev1.Endpoints { + return &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: revL + "-ep", + Labels: map[string]string{ + serving.RevisionUID: time.Now().Format("150415.000"), + networking.ServiceTypeKey: string(networking.ServiceTypePrivate), + serving.RevisionLabelKey: revL, + }, + }, + Subsets: []corev1.EndpointSubset{*epSubset(port, portName, ips)}, + } +} + +func TestRevisionBackendManagerAddEndpoint(t *testing.T) { + // Make sure we wait out all the jitter in the system. + for _, tc := range []struct { + name string + endpointsArr []*corev1.Endpoints + revisions []*v1alpha1.Revision + services []*corev1.Service + probeHostResponses map[string][]activatortest.FakeResponse + expectDests map[types.NamespacedName]revisionDestsUpdate + updateCnt int + }{{ + name: "add slow healthy", + endpointsArr: []*corev1.Endpoints{ep(testRevision, 1234, "http", "128.0.0.1")}, + revisions: []*v1alpha1.Revision{ + revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + }, + services: []*corev1.Service{ + privateSKSService(types.NamespacedName{testNamespace, testRevision}, "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}), + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + Body: queue.Name, + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + expectDests: map[types.NamespacedName]revisionDestsUpdate{ + {Namespace: testNamespace, Name: testRevision}: { + Dests: sets.NewString("128.0.0.1:1234"), + }, + }, + updateCnt: 1, + }, { + name: "add slow ready http2", + endpointsArr: []*corev1.Endpoints{ep(testRevision, 1234, "http2", "128.0.0.1")}, + revisions: []*v1alpha1.Revision{ + revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolH2C), + }, + services: []*corev1.Service{ + privateSKSService(types.NamespacedName{testNamespace, testRevision}, "129.0.0.1", + []corev1.ServicePort{{Name: "http2", Port: 1234}}), + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + Body: queue.Name, + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + expectDests: map[types.NamespacedName]revisionDestsUpdate{ + {Namespace: testNamespace, Name: testRevision}: { + Dests: sets.NewString("128.0.0.1:1234"), + }, + }, + updateCnt: 1, + }, { + name: "multiple revisions", + endpointsArr: []*corev1.Endpoints{ + ep("test-revision1", 1234, "http", "128.0.0.1"), + ep("test-revision2", 1235, "http", "128.1.0.2"), + }, + revisions: []*v1alpha1.Revision{ + revisionCC1(types.NamespacedName{testNamespace, "test-revision1"}, networking.ProtocolHTTP1), + revisionCC1(types.NamespacedName{testNamespace, "test-revision2"}, networking.ProtocolHTTP1), + }, + services: []*corev1.Service{ + privateSKSService(types.NamespacedName{testNamespace, "test-revision1"}, "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 2345}}), + privateSKSService(types.NamespacedName{testNamespace, "test-revision2"}, "129.0.0.2", + []corev1.ServicePort{{Name: "http", Port: 2345}}), + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:2345": {{Err: errors.New("clusterIP transport error")}}, + "129.0.0.2:2345": {{Err: errors.New("clusterIP transport error")}}, + }, + expectDests: map[types.NamespacedName]revisionDestsUpdate{ + {Namespace: testNamespace, Name: "test-revision1"}: { + Dests: sets.NewString("128.0.0.1:1234"), + }, + {Namespace: testNamespace, Name: "test-revision2"}: { + Dests: sets.NewString("128.1.0.2:1235"), + }, + }, + updateCnt: 2, + }, { + name: "slow podIP", + endpointsArr: []*corev1.Endpoints{ep(testRevision, 1234, "http", "128.0.0.1")}, + revisions: []*v1alpha1.Revision{ + revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + }, + services: []*corev1.Service{ + privateSKSService(types.NamespacedName{testNamespace, testRevision}, "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}), + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + expectDests: map[types.NamespacedName]revisionDestsUpdate{ + {Namespace: testNamespace, Name: testRevision}: { + Dests: sets.NewString("128.0.0.1:1234"), + }, + }, + updateCnt: 1, + }, { + name: "no pod addressability", + endpointsArr: []*corev1.Endpoints{ep(testRevision, 1234, "http", "128.0.0.1")}, + revisions: []*v1alpha1.Revision{ + revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + }, + services: []*corev1.Service{ + privateSKSService(types.NamespacedName{testNamespace, testRevision}, "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}), + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + "128.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + }}, + }, + expectDests: map[types.NamespacedName]revisionDestsUpdate{ + {Namespace: testNamespace, Name: testRevision}: { + ClusterIPDest: "129.0.0.1:1234", + Dests: sets.NewString("128.0.0.1:1234"), + }, + }, + updateCnt: 1, + }, { + name: "unhealthy", + endpointsArr: []*corev1.Endpoints{ep(testRevision, 1234, "http", "128.0.0.1")}, + revisions: []*v1alpha1.Revision{ + revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + }, + services: []*corev1.Service{ + privateSKSService(types.NamespacedName{testNamespace, testRevision}, "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}), + }, + probeHostResponses: map[string][]activatortest.FakeResponse{ + "129.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }}, + "128.0.0.1:1234": {{ + Code: http.StatusServiceUnavailable, + Body: queue.Name, + }}, + }, + expectDests: map[types.NamespacedName]revisionDestsUpdate{}, + }} { + t.Run(tc.name, func(t *testing.T) { + fakeRT := activatortest.FakeRoundTripper{ + ExpectHost: testRevision, + ProbeHostResponses: tc.probeHostResponses, + } + rt := network.RoundTripperFunc(fakeRT.RT) + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + endpointsInformer := fakeendpointsinformer.Get(ctx) + serviceInformer := fakeserviceinformer.Get(ctx) + revisions := fakerevisioninformer.Get(ctx) + + // Add the revision we're testing. + for _, rev := range tc.revisions { + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + revisions.Informer().GetIndexer().Add(rev) + } + + for _, svc := range tc.services { + fakekubeclient.Get(ctx).CoreV1().Services(testNamespace).Create(svc) + serviceInformer.Informer().GetIndexer().Add(svc) + } + + waitInformers, err := controller.RunInformers(ctx.Done(), endpointsInformer.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + rbm := newRevisionBackendsManagerWithProbeFrequency(ctx, rt, 50*time.Millisecond) + + for _, ep := range tc.endpointsArr { + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Create(ep) + endpointsInformer.Informer().GetIndexer().Add(ep) + } + + revDests := make(map[types.NamespacedName]revisionDestsUpdate) + // Wait for updateCb to be called + for i := 0; i < tc.updateCnt; i++ { + select { + case update := <-rbm.updates(): + revDests[update.Rev] = update + case <-time.After(300 * time.Millisecond): + t.Errorf("Timed out waiting for update event") + } + } + + // Update expectDests so we dont have to write out Rev for each test case + for rev, destUpdate := range tc.expectDests { + destUpdate.Rev = rev + tc.expectDests[rev] = destUpdate + } + + if got, want := revDests, tc.expectDests; !cmp.Equal(got, want) { + t.Errorf("RevisionDests = %v, want: %v, diff(-want,+got):%s\n", got, want, cmp.Diff(want, got)) + } + + cancel() + waitForRevisionBackedMananger(t, rbm) + }) + } +} + +func TestCheckDests(t *testing.T) { + // This test covers some edge cases in `checkDests` which are next to impossible to + // test via tests above. + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + svc := privateSKSService( + types.NamespacedName{testNamespace, testRevision}, + "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}, + ) + fakekubeclient.Get(ctx).CoreV1().Services(testNamespace).Create(svc) + si := fakeserviceinformer.Get(ctx) + si.Informer().GetIndexer().Add(svc) + + waitInformers, err := controller.RunInformers(ctx.Done(), si.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + // Make it buffered,so that we can make the test linear. + uCh := make(chan revisionDestsUpdate, 1) + dCh := make(chan struct{}) + rw := &revisionWatcher{ + clusterIPHealthy: true, + podsAddressable: false, + rev: types.NamespacedName{testNamespace, testRevision}, + updateCh: uCh, + serviceLister: si.Lister(), + logger: TestLogger(t), + stopCh: dCh, + } + rw.checkDests(sets.NewString("10.1.1.5")) + select { + case <-uCh: + // Success. + default: + t.Error("Expected update but it never went out.") + } + + close(dCh) + rw.checkDests(sets.NewString("10.1.1.5")) + select { + case <-uCh: + t.Error("Expected no update but got one") + default: + // Success. + } +} + +func TestCheckDestsSwinging(t *testing.T) { + // This test permits us to test the case when endpoints actually change + // underneath (e.g. pod crash/restart). + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + svc := privateSKSService( + types.NamespacedName{testNamespace, testRevision}, + "10.5.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}, + ) + + fakekubeclient.Get(ctx).CoreV1().Services(testNamespace).Create(svc) + si := fakeserviceinformer.Get(ctx) + si.Informer().GetIndexer().Add(svc) + + waitInformers, err := controller.RunInformers(ctx.Done(), si.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + fakeRT := activatortest.FakeRoundTripper{ + ExpectHost: testRevision, + ProbeHostResponses: map[string][]activatortest.FakeResponse{ + "10.5.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }}, + "10.0.0.1:1234": {{ + Err: errors.New("podIP transport error"), + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + "10.0.0.2:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }, { + Err: errors.New("podIP transport error"), + }, { + Code: http.StatusOK, + Body: queue.Name, + }}, + "10.0.0.3:1234": {{ + Code: http.StatusOK, + Body: queue.Name, + }}, + }, + } + + // Make it buffered,so that we can make the test linear. + uCh := make(chan revisionDestsUpdate, 1) + dCh := make(chan struct{}) + rw := &revisionWatcher{ + rev: types.NamespacedName{testNamespace, testRevision}, + updateCh: uCh, + serviceLister: si.Lister(), + logger: TestLogger(t), + stopCh: dCh, + podsAddressable: true, + transport: network.RoundTripperFunc(fakeRT.RT), + } + // First not ready, second good, clusterIP: not ready. + rw.checkDests(sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")) + want := revisionDestsUpdate{ + Rev: types.NamespacedName{testNamespace, testRevision}, + ClusterIPDest: "", + Dests: sets.NewString("10.0.0.2:1234"), + } + + select { + case got := <-uCh: + if !cmp.Equal(got, want) { + t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) + } + default: + t.Error("Expected update but it never went out.") + } + + // Second gone, first becomes ready, clusterIP still not ready. + rw.checkDests(sets.NewString("10.0.0.1:1234")) + select { + case got := <-uCh: + want.Dests = sets.NewString("10.0.0.1:1234") + if !cmp.Equal(got, want) { + t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) + } + default: + t.Error("Expected update but it never went out.") + } + + // Second is back, but not healthy yet. + rw.checkDests(sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")) + select { + case got := <-uCh: + // No update should be sent out, since there's only healthy pod, same as above. + t.Errorf("Got = %#v, expected no update", got) + default: + } + + // All pods are happy now. + rw.checkDests(sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")) + select { + case got := <-uCh: + want.Dests = sets.NewString("10.0.0.2:1234", "10.0.0.1:1234") + if !cmp.Equal(got, want) { + t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) + } + default: + t.Error("Expected update but it never went out.") + } + + // Make sure we do not send out redundant updates. + rw.checkDests(sets.NewString("10.0.0.1:1234", "10.0.0.2:1234")) + select { + case got := <-uCh: + t.Errorf("Expected no update, but got %#v", got) + default: + // Success. + } + + // Swing to a different pods. + rw.checkDests(sets.NewString("10.0.0.3:1234", "10.0.0.2:1234")) + select { + case got := <-uCh: + want.Dests = sets.NewString("10.0.0.2:1234", "10.0.0.3:1234") + if !cmp.Equal(got, want) { + t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) + } + default: + t.Error("Expected update but it never went out.") + } + + // Scale down by 1. + rw.checkDests(sets.NewString("10.0.0.2:1234")) + select { + case got := <-uCh: + want.Dests = sets.NewString("10.0.0.2:1234") + if !cmp.Equal(got, want) { + t.Errorf("Update = %#v, want: %#v, diff: %s", got, want, cmp.Diff(want, got)) + } + default: + t.Error("Expected update but it never went out.") + } +} + +func TestRevisionDeleted(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + svc := privateSKSService( + types.NamespacedName{testNamespace, testRevision}, + "129.0.0.1", + []corev1.ServicePort{{Name: "http", Port: 1234}}, + ) + fakekubeclient.Get(ctx).CoreV1().Services(testNamespace).Create(svc) + si := fakeserviceinformer.Get(ctx) + si.Informer().GetIndexer().Add(svc) + + ei := fakeendpointsinformer.Get(ctx) + ep := ep(testRevision, 1234, "http", "128.0.0.1") + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Create(ep) + waitInformers, err := controller.RunInformers(ctx.Done(), ei.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + rev := revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + ri := fakerevisioninformer.Get(ctx) + ri.Informer().GetIndexer().Add(rev) + + fakeRT := activatortest.FakeRoundTripper{} + rt := network.RoundTripperFunc(fakeRT.RT) + + rbm := newRevisionBackendsManager(ctx, rt) + // Make some movements. + ei.Informer().GetIndexer().Add(ep) + select { + case <-rbm.updates(): + case <-time.After(time.Second * 2): + t.Errorf("Timedout waiting for initial response") + } + // Now delete the endpoints. + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Delete(ep.Name, &metav1.DeleteOptions{}) + select { + case r := <-rbm.updates(): + t.Errorf("Unexpected update: %#v", r) + case <-time.After(time.Millisecond * 200): + // Wait to make sure the callbacks are executed. + } + + cancel() + waitForRevisionBackedMananger(t, rbm) +} + +func TestServiceDoesNotExist(t *testing.T) { + // Tests when the service is not available. + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + ei := fakeendpointsinformer.Get(ctx) + eps := ep(testRevision, 1234, "http", "128.0.0.1") + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Create(eps) + waitInformers, err := controller.RunInformers(ctx.Done(), ei.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + rev := revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + ri := fakerevisioninformer.Get(ctx) + ri.Informer().GetIndexer().Add(rev) + + // This will make sure we go to the cluster IP probing. + fakeRT := activatortest.FakeRoundTripper{ + ExpectHost: testRevision, + ProbeHostResponses: map[string][]activatortest.FakeResponse{ + // To ensure that if we fail, when we get into the second iteration + // of probing if the test is not yet complete, we store 2 items here. + "128.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }}, + }, + } + rt := network.RoundTripperFunc(fakeRT.RT) + + rbm := newRevisionBackendsManager(ctx, rt) + // Make some movements to generate a checkDests call. + ei.Informer().GetIndexer().Add(eps) + select { + case x := <-rbm.updates(): + // We can't probe endpoints (see RT above) and we can't get to probe + // cluster IP. But if the service is accessible then we will and probing will + // succeed since RT has no rules for that. + t.Errorf("Unexpected update, should have had none: %v", x) + case <-time.After(200 * time.Millisecond): + } + + cancel() + waitForRevisionBackedMananger(t, rbm) +} + +func TestServiceMoreThanOne(t *testing.T) { + // Tests when the service is not available. + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + ei := fakeendpointsinformer.Get(ctx) + eps := ep(testRevision, 1234, "http", "128.0.0.1") + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Create(eps) + waitInformers, err := controller.RunInformers(ctx.Done(), ei.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + rev := revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + ri := fakerevisioninformer.Get(ctx) + ri.Informer().GetIndexer().Add(rev) + + // Now let's create two! + for _, num := range []string{"11", "12"} { + svc := privateSKSService( + types.NamespacedName{testNamespace, testRevision}, + "129.0.0."+num, + []corev1.ServicePort{{Name: "http", Port: 1234}}, + ) + // Modify the name so both can be created. + svc.Name = svc.Name + num + fakekubeclient.Get(ctx).CoreV1().Services(testNamespace).Create(svc) + si := fakeserviceinformer.Get(ctx) + si.Informer().GetIndexer().Add(svc) + } + + // Make sure fake probe failures ensue. + fakeRT := activatortest.FakeRoundTripper{ + ExpectHost: testRevision, + ProbeHostResponses: map[string][]activatortest.FakeResponse{ + // To ensure that if we fail, when we get into the second iteration + // of probing if the test is not yet complete, we store 2 items here. + "128.0.0.1:1234": {{ + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }, { + Err: errors.New("clusterIP transport error"), + }}, + }, + } + + rt := network.RoundTripperFunc(fakeRT.RT) + rbm := newRevisionBackendsManager(ctx, rt) + ei.Informer().GetIndexer().Add(eps) + select { + case x := <-rbm.updates(): + // We can't probe endpoints (see RT above) and we can't get to probe + // cluster IP. But if the service is accessible then we will and probing will + // succeed since RT has no rules for that. + t.Errorf("Unexpected update, should have had none: %v", x) + case <-time.After(200 * time.Millisecond): + } + + cancel() + waitForRevisionBackedMananger(t, rbm) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/throttler.go b/test/vendor/knative.dev/serving/pkg/activator/net/throttler.go new file mode 100644 index 0000000000..76c0eaf963 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/throttler.go @@ -0,0 +1,677 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "errors" + "math" + "math/rand" + "sort" + "sync" + "sync/atomic" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + "knative.dev/pkg/network" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/resources" +) + +type podTracker struct { + dest string + b breaker +} + +func (p *podTracker) Capacity() int { + if p.b == nil { + return 1 + } + return p.b.Capacity() +} + +func (p *podTracker) UpdateConcurrency(c int) error { + if p.b == nil { + return nil + } + return p.b.UpdateConcurrency(c) +} + +func (p *podTracker) Reserve(ctx context.Context) (func(), bool) { + if p.b == nil { + return noop, true + } + return p.b.Reserve(ctx) +} + +type breaker interface { + Capacity() int + Maybe(ctx context.Context, thunk func()) error + UpdateConcurrency(int) error + Reserve(ctx context.Context) (func(), bool) +} + +var ErrActivatorOverload = errors.New("activator overload") + +type revisionThrottler struct { + revID types.NamespacedName + containerConcurrency int + + // Holds the current number of backends. This is used for when we get an activatorCount update and + // therefore need to recalculate capacity + backendCount int + + // This is a breaker for the revision as a whole. try calls first pass through + // this breaker and are either called with clusterIPDest or go through selecting + // a podIPTracker and are then called. + breaker breaker + + // This will be non empty when we're able to use pod addressing. + podTrackers []*podTracker + + // Effective trackers that are assigned to this Activator. + // This is a subset of podIPTrackers. + assignedTrackers []*podTracker + + // If we dont have a healthy clusterIPTracker this is set to nil, otherwise + // it is the l4dest for this revision's private clusterIP. + clusterIPTracker *podTracker + + // mux guards "throttle state" which is the state we use during the request path. This + // is trackers, clusterIPDest. + mux sync.RWMutex + + // used to atomically calculate and set capacity + capacityMux sync.Mutex + + logger *zap.SugaredLogger +} + +func newRevisionThrottler(revID types.NamespacedName, + containerConcurrency int, + breakerParams queue.BreakerParams, + logger *zap.SugaredLogger) *revisionThrottler { + logger = logger.With(zap.String(logkey.Key, revID.String())) + var revBreaker breaker + if containerConcurrency == 0 { + revBreaker = newInfiniteBreaker(logger) + } else { + revBreaker = queue.NewBreaker(breakerParams) + } + return &revisionThrottler{ + revID: revID, + containerConcurrency: containerConcurrency, + breaker: revBreaker, + logger: logger, + } +} + +func noop() {} + +// pickPod picks the first tracker that has open capacity if container concurrency +// if limited, random pod otherwise. +func pickPod(ctx context.Context, tgs []*podTracker, cc int) (func(), *podTracker) { + // Infinite capacity, pick random. We have to do this + // otherwise _all_ the requests will go to the first pod + // since it has unlimited capacity. + if cc == 0 { + return noop, tgs[rand.Intn(len(tgs))] + } + for _, t := range tgs { + if cb, ok := t.Reserve(ctx); ok { + return cb, t + } + } + // NB: as currently written this can never happen. + return noop, nil +} + +// Returns a dest that at the moment of choosing had an open slot +// for request. +func (rt *revisionThrottler) acquireDest(ctx context.Context) (func(), *podTracker) { + rt.mux.RLock() + defer rt.mux.RUnlock() + + if rt.clusterIPTracker != nil { + return noop, rt.clusterIPTracker + } + return pickPod(ctx, rt.assignedTrackers, rt.containerConcurrency) +} + +func (rt *revisionThrottler) try(ctx context.Context, function func(string) error) error { + var ret error + + if err := rt.breaker.Maybe(ctx, func() { + cb, tracker := rt.acquireDest(ctx) + if tracker == nil { + ret = errors.New("made it through breaker but we have no clusterIP or podIPs. This should" + + " never happen" + rt.revID.String()) + return + } + defer cb() + // We already reserved a guaranteed spot. So just execute the passed functor. + ret = function(tracker.dest) + }); err != nil { + return err + } + return ret +} + +func (rt *revisionThrottler) calculateCapacity(size, activatorCount, maxConcurrency int) int { + targetCapacity := rt.containerConcurrency * size + + if size > 0 && (rt.containerConcurrency == 0 || targetCapacity > maxConcurrency) { + // If cc==0, we need to pick a number, but it does not matter, since + // infinite breaker will dole out as many tokens as it can. + targetCapacity = maxConcurrency + } else if targetCapacity > 0 { + targetCapacity = minOneOrValue(targetCapacity / minOneOrValue(activatorCount)) + } + + return targetCapacity +} + +// This makes sure we reset the capacity to the CC, since the pod +// might be reassiged to be exclusively used. +func (rt *revisionThrottler) resetTrackers() { + if rt.containerConcurrency <= 0 { + return + } + for _, t := range rt.podTrackers { + // Reset to default. + t.UpdateConcurrency(rt.containerConcurrency) + } +} + +func (rt *revisionThrottler) updateCapacity(throttler *Throttler, backendCount int) { + ac := throttler.activatorCount() + + // We have to make assignments on each updateCapacity, since if number + // of activators changes, then we need to rebalance the assignedTrackers. + numTrackers := func() int { + rt.mux.Lock() + defer rt.mux.Unlock() + // We're using cluster IP. + if rt.clusterIPTracker != nil { + return 0 + } + // Infifnite capacity, assign all. + if rt.containerConcurrency == 0 { + rt.assignedTrackers = rt.podTrackers + } else { + rt.resetTrackers() + rt.assignedTrackers = assignSlice(rt.podTrackers, throttler.index(), ac, rt.containerConcurrency) + } + rt.logger.Debugf("Trackers %d/%d %v", throttler.index(), ac, rt.assignedTrackers) + return len(rt.assignedTrackers) + }() + + capacity := 0 + if numTrackers > 0 { + // Capacity is computed based off of number of trackers, + // when using pod direct routing. + capacity = rt.calculateCapacity(len(rt.podTrackers), ac, throttler.breakerParams.MaxConcurrency) + } else { + // Capacity is computed off of number of ready backends, + // when we are using clusterIP routing. + capacity = rt.calculateCapacity(backendCount, ac, throttler.breakerParams.MaxConcurrency) + } + rt.logger.Infof("Set capacity to %d (backends: %d, index: %d/%d)", + capacity, backendCount, throttler.index(), ac) + + // TODO(vagababov): analyze to see if we need this mutex at all? + rt.capacityMux.Lock() + defer rt.capacityMux.Unlock() + + rt.backendCount = backendCount + rt.breaker.UpdateConcurrency(capacity) +} + +func (rt *revisionThrottler) updateThrottlerState( + throttler *Throttler, backendCount int, + trackers []*podTracker, clusterIPDest *podTracker) { + rt.logger.Infof("Updating Revision Throttler with: clusterIP = %v, trackers = %d, backends = %d activator pos %d/%d", + clusterIPDest, len(trackers), backendCount, throttler.index(), throttler.activatorCount()) + + // Update trackers / clusterIP before capacity. Otherwise we can race updating our breaker when + // we increase capacity, causing a request to fall through before a tracker is added, causing an + // incorrect LB decision. + if func() bool { + rt.mux.Lock() + defer rt.mux.Unlock() + rt.podTrackers = trackers + rt.clusterIPTracker = clusterIPDest + return clusterIPDest != nil || len(trackers) > 0 + }() { + // If we have an address to target, then pass through an accurate + // accounting of the number of backends. + rt.updateCapacity(throttler, backendCount) + } else { + // If we do not have an address to target, then we should treat it + // as though we have zero backends. + rt.updateCapacity(throttler, 0) + } +} + +// pickIndices picks the indices for the slicing. +func pickIndices(numTrackers, selfIndex, numActivators int) (beginIndex, endIndex, remnants int) { + if numActivators > numTrackers { + // 1. We have fewer pods than than activators. Assign the pod in round robin fashion. + // NB: when we implement subsetting this will be less of a problem. + // e.g. lt=3, #ac = 5; for selfIdx = 3 => 3 % 3 = 0, or for si = 5 => 5%3 = 2 + beginIndex = selfIndex % numTrackers + endIndex = beginIndex + 1 + return + } + + // 2. distribute equally and share the remnants + // among all the activatos, but with reduced capacity, if finite. + sliceSize := numTrackers / numActivators + remnants = numTrackers % numActivators + beginIndex = selfIndex * sliceSize + endIndex = beginIndex + sliceSize + return +} + +// assignSlice picks a subset of the individual pods to send requests to +// for this Activator instance. This only matters in case of direct +// to pod IP routing, and is irrelevant, when ClusterIP is used. +func assignSlice(trackers []*podTracker, selfIndex, numActivators, cc int) []*podTracker { + // When we're unassigned, doesn't matter what we return. + lt := len(trackers) + if selfIndex == -1 || lt <= 1 { + return trackers + } + // Sort, so we get more or less stable results. + sort.Slice(trackers, func(i, j int) bool { + return trackers[i].dest < trackers[j].dest + }) + bi, ei, remnants := pickIndices(lt, selfIndex, numActivators) + x := append(trackers[:0:0], trackers[bi:ei]...) + if remnants > 0 { + tail := trackers[len(trackers)-remnants:] + // We shuffle the tail, to ensure that pods in the tail get better + // load distribution, since we sort the pods above, this puts more requests + // on the very first tail pod, than on the others. + rand.Shuffle(remnants, func(i, j int) { + tail[i], tail[j] = tail[j], tail[i] + }) + // We need minOneOrValue in order for cc==0 to work. + dcc := minOneOrValue(int(math.Ceil(float64(cc) / float64(numActivators)))) + // This is basically: x = append(x, trackers[len(trackers)-remnants:]...) + // But we need to update the capacity. + for _, t := range tail { + t.UpdateConcurrency(dcc) + x = append(x, t) + } + } + return x +} + +// This function will never be called in parallel but try can be called in parallel to this so we need +// to lock on updating concurrency / trackers +func (rt *revisionThrottler) handleUpdate(throttler *Throttler, update revisionDestsUpdate) { + rt.logger.Debugf("Handling update w/ ClusterIP=%q, %d ready and dests: %v", + update.ClusterIPDest, len(update.Dests), update.Dests) + + // ClusterIP is not yet ready, so we want to send requests directly to the pods. + // NB: this will not be called in parallel, thus we can build a new podIPTrackers + // array before taking out a lock. + if update.ClusterIPDest == "" { + // Create a map for fast lookup of existing trackers. + trackersMap := make(map[string]*podTracker, len(rt.podTrackers)) + for _, tracker := range rt.podTrackers { + trackersMap[tracker.dest] = tracker + } + + trackers := make([]*podTracker, 0, len(update.Dests)) + + // Loop over dests, reuse existing tracker if we have one, otherwise create + // a new one. + for newDest := range update.Dests { + tracker, ok := trackersMap[newDest] + if !ok { + if rt.containerConcurrency == 0 { + tracker = &podTracker{dest: newDest} + } else { + tracker = &podTracker{ + dest: newDest, + b: queue.NewBreaker(queue.BreakerParams{ + QueueDepth: throttler.breakerParams.QueueDepth, + MaxConcurrency: rt.containerConcurrency, + InitialCapacity: rt.containerConcurrency, // Presume full unused capacity. + }), + } + } + } + trackers = append(trackers, tracker) + } + + rt.updateThrottlerState(throttler, len(update.Dests), trackers, nil /*clusterIP*/) + return + } + + rt.updateThrottlerState(throttler, len(update.Dests), nil /*trackers*/, &podTracker{ + dest: update.ClusterIPDest, + }) +} + +// Throttler load balances requests to revisions based on capacity. When `Run` is called it listens for +// updates to revision backends and decides when and when and where to forward a request. +type Throttler struct { + revisionThrottlers map[types.NamespacedName]*revisionThrottler + revisionThrottlersMutex sync.RWMutex + breakerParams queue.BreakerParams + revisionLister servinglisters.RevisionLister + numActivators int32 // Total number of activators. + activatorIndex int32 // The assigned index of this activator, -1 is Activator is not expected to receive traffic. + ipAddress string // The IP address of this activator. + logger *zap.SugaredLogger +} + +// NewThrottler creates a new Throttler +func NewThrottler(ctx context.Context, + breakerParams queue.BreakerParams, + ipAddr string) *Throttler { + revisionInformer := revisioninformer.Get(ctx) + t := &Throttler{ + revisionThrottlers: make(map[types.NamespacedName]*revisionThrottler), + breakerParams: breakerParams, + revisionLister: revisionInformer.Lister(), + ipAddress: ipAddr, + activatorIndex: -1, // Unset yet. + logger: logging.FromContext(ctx), + } + + // Watch revisions to create throttler with backlog immediately and delete + // throttlers on revision delete + revisionInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: t.revisionUpdated, + UpdateFunc: controller.PassNew(t.revisionUpdated), + DeleteFunc: t.revisionDeleted, + }) + + // Watch activator endpoint to maintain activator count + endpointsInformer := endpointsinformer.Get(ctx) + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: reconciler.ChainFilterFuncs( + reconciler.NameFilterFunc(networking.ActivatorServiceName), + reconciler.NamespaceFilterFunc(system.Namespace()), + ), + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: t.activatorEndpointsUpdated, + UpdateFunc: controller.PassNew(t.activatorEndpointsUpdated), + }, + }) + + return t +} + +// Run starts the throttler and blocks until the context is done. +func (t *Throttler) Run(ctx context.Context) { + rbm := newRevisionBackendsManager(ctx, network.AutoTransport) + // Update channel is closed when ctx is done. + t.run(rbm.updates()) +} + +func (t *Throttler) run(updateCh <-chan revisionDestsUpdate) { + for update := range updateCh { + t.handleUpdate(update) + } + t.logger.Info("The Throttler has stopped.") +} + +// Try waits for capacity and then executes function, passing in a l4 dest to send a request +func (t *Throttler) Try(ctx context.Context, revID types.NamespacedName, function func(string) error) error { + rt, err := t.getOrCreateRevisionThrottler(revID) + if err != nil { + return err + } + return rt.try(ctx, function) +} + +func (t *Throttler) getOrCreateRevisionThrottler(revID types.NamespacedName) (*revisionThrottler, error) { + // First, see if we can succeed with just an RLock. This is in the request path so optimizing + // for this case is important + t.revisionThrottlersMutex.RLock() + revThrottler, ok := t.revisionThrottlers[revID] + t.revisionThrottlersMutex.RUnlock() + if ok { + return revThrottler, nil + } + + // Redo with a write lock since we failed the first time and may need to create + t.revisionThrottlersMutex.Lock() + defer t.revisionThrottlersMutex.Unlock() + revThrottler, ok = t.revisionThrottlers[revID] + if !ok { + rev, err := t.revisionLister.Revisions(revID.Namespace).Get(revID.Name) + if err != nil { + return nil, err + } + revThrottler = newRevisionThrottler(revID, int(rev.Spec.GetContainerConcurrency()), t.breakerParams, t.logger) + t.revisionThrottlers[revID] = revThrottler + } + return revThrottler, nil +} + +// revisionUpdated is used to ensure we have a backlog set up for a revision as soon as it is created +// rather than erroring with revision not found until a networking probe succeeds +func (t *Throttler) revisionUpdated(obj interface{}) { + rev := obj.(*v1alpha1.Revision) + revID := types.NamespacedName{rev.Namespace, rev.Name} + t.logger.Debugf("Revision update %q", revID.String()) + + if _, err := t.getOrCreateRevisionThrottler(revID); err != nil { + t.logger.Errorw("Failed to get revision throttler for revision "+revID.String(), zap.Error(err)) + } +} + +// revisionDeleted is to clean up revision throttlers after a revision is deleted to prevent unbounded +// memory growth +func (t *Throttler) revisionDeleted(obj interface{}) { + rev := obj.(*v1alpha1.Revision) + revID := types.NamespacedName{rev.Namespace, rev.Name} + t.logger.Debugf("Revision delete %q", revID.String()) + + t.revisionThrottlersMutex.Lock() + defer t.revisionThrottlersMutex.Unlock() + delete(t.revisionThrottlers, revID) +} + +func (t *Throttler) handleUpdate(update revisionDestsUpdate) { + if rt, err := t.getOrCreateRevisionThrottler(update.Rev); err != nil { + if k8serrors.IsNotFound(err) { + t.logger.Debugf("Revision %q is not found. Probably it was removed", update.Rev.String()) + } else { + t.logger.With(zap.Error(err)).Errorf( + "Failed to get revision throttler for revision %q", update.Rev) + } + } else { + rt.handleUpdate(t, update) + } +} + +// inferIndex returns the index of this activator slice. +// If inferIndex returns -1, it means that this activator will not recive +// any traffic just yet so, do not participate in slicing, this happens after +// startup, but before this activator is threaded into the endpoints +// (which is up to 10s after reporting healthy). +// For now we are just sorting the IP addresses of all activators +// and finding our index in that list. +func inferIndex(eps []string, ipAddress string) int { + // `eps` will contain port, so binary search of the insertion point would be fine. + idx := sort.SearchStrings(eps, ipAddress) + + // Check if this activator is part of the endpoints slice? + if idx == len(eps) || eps[idx] != ipAddress { + idx = -1 + } + return idx +} + +func (t *Throttler) updateAllThrottlerCapacity() { + t.revisionThrottlersMutex.RLock() + defer t.revisionThrottlersMutex.RUnlock() + + for _, rt := range t.revisionThrottlers { + rt.updateCapacity(t, rt.backendCount) + } +} + +func (t *Throttler) activatorEndpointsUpdated(newObj interface{}) { + endpoints := newObj.(*corev1.Endpoints) + + // We want to pass sorted list, so that we get _some_ stability in the results. + eps := endpointsToDests(endpoints, networking.ServicePortNameHTTP1).List() + t.logger.Debugf("All Activator IPS: %v, my IP: %s", eps, t.ipAddress) + idx := inferIndex(eps, t.ipAddress) + activatorCount := resources.ReadyAddressCount(endpoints) + t.logger.Infof("Got %d ready activator endpoints, our position is: %d", activatorCount, idx) + atomic.StoreInt32(&t.numActivators, int32(activatorCount)) + atomic.StoreInt32(&t.activatorIndex, int32(idx)) + t.updateAllThrottlerCapacity() +} + +func (t *Throttler) index() int { + return int(atomic.LoadInt32(&t.activatorIndex)) +} + +func (t *Throttler) activatorCount() int { + return int(atomic.LoadInt32(&t.numActivators)) +} + +// minOneOrValue function returns num if its greater than 1 +// else the function returns 1 +func minOneOrValue(num int) int { + if num > 1 { + return num + } + return 1 +} + +// infiniteBreaker is basically a short circuit. +// infiniteBreaker provides us capability to send unlimited number +// of requests to the downstream system. +// This is to be used only when the container concurrency is unset +// (i.e. infinity). +// The infiniteBreaker will, though, block the requests when +// downstream capacity is 0. +type infiniteBreaker struct { + // mu guards `broadcast` channel. + mu sync.RWMutex + + // broadcast channel is used notify the waiting requests that + // downstream capacity showed up. + // When the downstream capacity switches from 0 to 1, the channel is closed. + // When the downstream capacity disappears, the a new channel is created. + // Reads/Writes to the `broadcast` must be guarded by `mu`. + broadcast chan struct{} + + // concurrency in the infinite breaker takes only two values + // 0 (no downstream capacity) and 1 (infinite downstream capacity). + // `Maybe` checks this value to determine whether to proxy the request + // immediately or wait for capacity to appear. + // `concurrency` should only be manipulated by `sync/atomic` methods. + concurrency int32 + + logger *zap.SugaredLogger +} + +// newInfiniteBreaker creates an infiniteBreaker +func newInfiniteBreaker(logger *zap.SugaredLogger) *infiniteBreaker { + return &infiniteBreaker{ + broadcast: make(chan struct{}), + logger: logger, + } +} + +// Capacity returns the current capacity of the breaker +func (ib *infiniteBreaker) Capacity() int { + return int(atomic.LoadInt32(&ib.concurrency)) +} + +func zeroOrOne(x int) int32 { + if x == 0 { + return 0 + } + return 1 +} + +// UpdateConcurrency sets the concurrency of the breaker +func (ib *infiniteBreaker) UpdateConcurrency(cc int) error { + rcc := zeroOrOne(cc) + // We lock here to make sure two scale up events don't + // stomp on each other's feet. + ib.mu.Lock() + defer ib.mu.Unlock() + old := atomic.SwapInt32(&ib.concurrency, rcc) + + // Scale up/down event. + if old != rcc { + if rcc == 0 { + // Scaled to 0. + ib.broadcast = make(chan struct{}) + } else { + close(ib.broadcast) + } + } + return nil +} + +// Maybe executes thunk when capacity is available +func (ib *infiniteBreaker) Maybe(ctx context.Context, thunk func()) error { + has := ib.Capacity() + // We're scaled to serve. + if has > 0 { + thunk() + return nil + } + + // Make sure we lock to get the channel, to avoid + // race between Maybe and UpdateConcurrency. + var ch chan struct{} + ib.mu.RLock() + ch = ib.broadcast + ib.mu.RUnlock() + select { + case <-ch: + // Scaled up. + thunk() + return nil + case <-ctx.Done(): + ib.logger.Infof("Context is closed: %v", ctx.Err()) + return ctx.Err() + } +} + +func (ib *infiniteBreaker) Reserve(context.Context) (func(), bool) { return noop, true } diff --git a/test/vendor/knative.dev/serving/pkg/activator/net/throttler_test.go b/test/vendor/knative.dev/serving/pkg/activator/net/throttler_test.go new file mode 100644 index 0000000000..4013d0a8e0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/net/throttler_test.go @@ -0,0 +1,932 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "errors" + "math" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakeendpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" + "knative.dev/pkg/controller" + . "knative.dev/pkg/logging/testing" + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + "knative.dev/serving/pkg/queue" +) + +const defaultMaxConcurrency = 1000 + +var defaultParams = queue.BreakerParams{ + QueueDepth: 1, + MaxConcurrency: defaultMaxConcurrency, + InitialCapacity: 0, +} + +type tryResult struct { + dest string + err error +} + +func newTestThrottler(ctx context.Context, numA int32) *Throttler { + throttler := NewThrottler(ctx, defaultParams, "10.10.10.10:8012") + atomic.StoreInt32(&throttler.numActivators, numA) + atomic.StoreInt32(&throttler.activatorIndex, 0) + return throttler +} + +func TestThrottlerUpdateCapacity(t *testing.T) { + logger := TestLogger(t) + throttler := &Throttler{ + revisionThrottlers: make(map[types.NamespacedName]*revisionThrottler), + breakerParams: defaultParams, + numActivators: 1, + logger: logger, + } + rt := &revisionThrottler{ + logger: logger, + breaker: queue.NewBreaker(defaultParams), + containerConcurrency: 10, + } + + rt.updateCapacity(throttler, 1) + if got, want := rt.breaker.Capacity(), 10; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + rt.updateCapacity(throttler, 10) + if got, want := rt.breaker.Capacity(), 100; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + rt.updateCapacity(throttler, defaultMaxConcurrency) // So in theory should be 10x. + if got, want := rt.breaker.Capacity(), defaultMaxConcurrency; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + throttler.numActivators = 10 + rt.updateCapacity(throttler, 10) + if got, want := rt.breaker.Capacity(), 10; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + throttler.numActivators = 200 + rt.updateCapacity(throttler, 10) + if got, want := rt.breaker.Capacity(), 1; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + rt.updateCapacity(throttler, 0) + if got, want := rt.breaker.Capacity(), 0; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + rt.containerConcurrency = 0 + rt.updateCapacity(throttler, 1) + if got, want := rt.breaker.Capacity(), defaultMaxConcurrency; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + rt.updateCapacity(throttler, 10) + if got, want := rt.breaker.Capacity(), defaultMaxConcurrency; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + throttler.numActivators = 200 + rt.updateCapacity(throttler, 1) + if got, want := rt.breaker.Capacity(), defaultMaxConcurrency; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + rt.updateCapacity(throttler, 0) + if got, want := rt.breaker.Capacity(), 0; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + // Now test with podIP trackers in tow. + // Simple case. + throttler.numActivators = 1 + throttler.activatorIndex = 0 + rt.podTrackers = makeTrackers(1, 10) + rt.containerConcurrency = 10 + rt.updateCapacity(throttler, 0 /* doesn't matter here*/) + if got, want := rt.breaker.Capacity(), 10; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + // 2 backends. + rt.podTrackers = makeTrackers(2, 10) + rt.updateCapacity(throttler, -1 /* doesn't matter here*/) + if got, want := rt.breaker.Capacity(), 20; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + // 2 activators. + throttler.numActivators = 2 + rt.updateCapacity(throttler, -1 /* doesn't matter here*/) + if got, want := rt.breaker.Capacity(), 10; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + // 3 pods, index 0. + rt.podTrackers = makeTrackers(3, 10) + rt.updateCapacity(throttler, -1 /* doesn't matter here*/) + if got, want := rt.breaker.Capacity(), 15; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + // 3 pods, index 1. + throttler.activatorIndex = 1 + rt.updateCapacity(throttler, -1 /* doesn't matter here*/) + if got, want := rt.breaker.Capacity(), 15; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + + // Inifinite capacity. + throttler.activatorIndex = 1 + rt.containerConcurrency = 0 + rt.podTrackers = makeTrackers(3, 0) + rt.updateCapacity(throttler, 1) + if got, want := rt.breaker.Capacity(), defaultMaxConcurrency; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + if got, want := len(rt.assignedTrackers), len(rt.podTrackers); got != want { + t.Errorf("Assigned tracker count = %d, want: %d, diff:\n%s", got, want, + cmp.Diff(rt.assignedTrackers, rt.podTrackers)) + } +} + +func makeTrackers(num, cc int) []*podTracker { + x := make([]*podTracker, num) + for i := 0; i < num; i++ { + x[i] = &podTracker{dest: strconv.Itoa(i)} + if cc > 0 { + x[i].b = queue.NewBreaker(queue.BreakerParams{ + QueueDepth: 1, + MaxConcurrency: cc, + InitialCapacity: cc, + }) + } + } + return x +} + +func TestThrottlerErrorNoRevision(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + servfake := fakeservingclient.Get(ctx) + revisions := fakerevisioninformer.Get(ctx) + waitInformers, err := controller.RunInformers(ctx.Done(), revisions.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + // Add the revision we're testing. + revID := types.NamespacedName{testNamespace, testRevision} + revision := revisionCC1(revID, networking.ProtocolHTTP1) + servfake.ServingV1alpha1().Revisions(revision.Namespace).Create(revision) + revisions.Informer().GetIndexer().Add(revision) + + throttler := newTestThrottler(ctx, 1) + throttler.handleUpdate(revisionDestsUpdate{ + Rev: revID, + Dests: sets.NewString("128.0.0.1:1234"), + }) + + // Make sure it now works. + if err := throttler.Try(context.Background(), revID, func(string) error { return nil }); err != nil { + t.Fatalf("Try() = %v, want no error", err) + } + + // Make sure errors are propagated correctly. + innerError := errors.New("inner") + if err := throttler.Try(context.Background(), revID, func(string) error { return innerError }); err != innerError { + t.Fatalf("Try() = %v, want %v", err, innerError) + } + + servfake.ServingV1alpha1().Revisions(revision.Namespace).Delete(revision.Name, nil) + revisions.Informer().GetIndexer().Delete(revID) + + // Eventually it should now fail. + var lastError error + wait.PollInfinite(10*time.Millisecond, func() (bool, error) { + lastError = throttler.Try(context.Background(), revID, func(string) error { return nil }) + return lastError != nil, nil + }) + if lastError == nil || lastError.Error() != `revision.serving.knative.dev "test-revision" not found` { + t.Fatalf("Try() = %v, wanted a not found error", lastError) + } +} + +func TestThrottlerErrorOneTimesOut(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + servfake := fakeservingclient.Get(ctx) + revisions := fakerevisioninformer.Get(ctx) + waitInformers, err := controller.RunInformers(ctx.Done(), revisions.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + // Add the revision we're testing. + revID := types.NamespacedName{testNamespace, testRevision} + revision := revisionCC1(revID, networking.ProtocolHTTP1) + servfake.ServingV1alpha1().Revisions(revision.Namespace).Create(revision) + revisions.Informer().GetIndexer().Add(revision) + + throttler := newTestThrottler(ctx, 1) + throttler.handleUpdate(revisionDestsUpdate{ + Rev: revID, + ClusterIPDest: "129.0.0.1:1234", + Dests: sets.NewString("128.0.0.1:1234"), + }) + + // Send 2 requests, one should time out. + var mux sync.Mutex + mux.Lock() // Lock the mutex so all requests are blocked in the Try function. + + reqCtx, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel2() + resultChan := tryThrottler(throttler, reqCtx, 2 /*requests*/, func(string) error { + mux.Lock() + return nil + }) + + // The first result will be a timeout because of the locking logic. + if result := <-resultChan; result.err != context.DeadlineExceeded { + t.Fatalf("err = %v, want %v", err, context.DeadlineExceeded) + } + + // Allow the successful request to pass through. + mux.Unlock() + if result := <-resultChan; result.err != nil { + t.Fatalf("err = %v, want no error", err) + } +} + +func TestThrottlerSuccesses(t *testing.T) { + for _, tc := range []struct { + name string + revision *v1alpha1.Revision + initUpdates []revisionDestsUpdate + requests int + wantDests sets.String + }{{ + name: "single healthy podIP", + revision: revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + initUpdates: []revisionDestsUpdate{{ + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.1:1234"), + }, { + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.1:1234"), + }}, + requests: 1, + wantDests: sets.NewString("128.0.0.1:1234"), + }, { + name: "single healthy podIP, infinite cc", + revision: revision(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1, 0), + // Double updates exercise additional paths. + initUpdates: []revisionDestsUpdate{{ + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.2:1234"), + }, { + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.1:1234"), + }}, + requests: 1, + wantDests: sets.NewString("128.0.0.1:1234"), + }, { + name: "single healthy clusterIP", + revision: revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + initUpdates: []revisionDestsUpdate{{ + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + }, { + Rev: types.NamespacedName{testNamespace, testRevision}, + ClusterIPDest: "129.0.0.1:1234", + Dests: sets.NewString("128.0.0.1:1234"), + }}, + requests: 1, + wantDests: sets.NewString("129.0.0.1:1234"), + }, { + name: "spread podIP load", + revision: revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + initUpdates: []revisionDestsUpdate{{ + // Double update here excercises some additional paths. + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.3:1234"), + }, { + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + }}, + requests: 2, + wantDests: sets.NewString("128.0.0.2:1234", "128.0.0.1:1234"), + }, { + name: "clumping test", + revision: revision(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1, 3), + initUpdates: []revisionDestsUpdate{{ + Rev: types.NamespacedName{testNamespace, testRevision}, + Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + }}, + requests: 3, + wantDests: sets.NewString("128.0.0.1:1234"), + }, { + name: "multiple ClusterIP requests", + revision: revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1), + initUpdates: []revisionDestsUpdate{{ + Rev: types.NamespacedName{testNamespace, testRevision}, + ClusterIPDest: "129.0.0.1:1234", + Dests: sets.NewString("128.0.0.1:1234", "128.0.0.2:1234"), + }}, + requests: 2, + wantDests: sets.NewString("129.0.0.1:1234"), + }} { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + servfake := fakeservingclient.Get(ctx) + revisions := fakerevisioninformer.Get(ctx) + + waitInformers, err := controller.RunInformers(ctx.Done(), revisions.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + // Add the revision were testing. + servfake.ServingV1alpha1().Revisions(tc.revision.Namespace).Create(tc.revision) + revisions.Informer().GetIndexer().Add(tc.revision) + + throttler := newTestThrottler(ctx, 1) + for _, update := range tc.initUpdates { + throttler.handleUpdate(update) + } + + tryContext, cancel2 := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel2() + + results := tryThrottler(throttler, tryContext, tc.requests, func(string) error { + // Simulate proxying. + time.Sleep(50 * time.Millisecond) + return nil + }) + gotDests := sets.NewString() + for i := 0; i < tc.requests; i++ { + result := <-results + gotDests.Insert(result.dest) + } + + if got, want := gotDests, tc.wantDests; !got.Equal(want) { + t.Errorf("Dests = %v, want: %v, diff: %s", got, want, cmp.Diff(want, got)) + } + }) + } +} + +func trackerDestSet(ts []*podTracker) sets.String { + ret := sets.NewString() + for _, t := range ts { + ret.Insert(t.dest) + } + return ret +} + +func TestPodAssignmentFinite(t *testing.T) { + // An e2e verification test of pod assignment and capacity + // computations. + logger := TestLogger(t) + revName := types.NamespacedName{testNamespace, testRevision} + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + + throttler := newTestThrottler(ctx, 4 /*num activators*/) + rt := newRevisionThrottler(revName, 42 /*cc*/, defaultParams, logger) + throttler.revisionThrottlers[revName] = rt + + update := revisionDestsUpdate{ + Rev: revName, + ClusterIPDest: "", + Dests: sets.NewString("ip4", "ip3", "ip5", "ip2", "ip1", "ip0"), + } + // This should synchronously update throughout the system. + // And now we can inspect `rt`. + throttler.handleUpdate(update) + if got, want := len(rt.podTrackers), len(update.Dests); got != want { + t.Errorf("NumTrackers = %d, want: %d", got, want) + } + if got, want := trackerDestSet(rt.assignedTrackers), sets.NewString("ip0", "ip4", "ip5"); !got.Equal(want) { + t.Errorf("Assigned trackers = %v, want: %v, diff: %s", got, want, cmp.Diff(want, got)) + } + if got, want := rt.breaker.Capacity(), 6*42/4; got != want { + t.Errorf("TotalCapacity = %d, want: %d", got, want) + } + if got, want := rt.assignedTrackers[0].Capacity(), 42; got != want { + t.Errorf("Exclusive tracker capacity: %d, want: %d", got, want) + } + if got, want := rt.assignedTrackers[1].Capacity(), int(math.Ceil(42./4.)); got != want { + t.Errorf("Shared tracker capacity: %d, want: %d", got, want) + } + if got, want := rt.assignedTrackers[2].Capacity(), int(math.Ceil(42./4.)); got != want { + t.Errorf("Shared tracker capacity: %d, want: %d", got, want) + } + + // Now scale to zero. + update.Dests = nil + throttler.handleUpdate(update) + if got, want := len(rt.podTrackers), 0; got != want { + t.Errorf("NumTrackers = %d, want: %d", got, want) + } + if got, want := len(rt.assignedTrackers), 0; got != want { + t.Errorf("NumAssignedTrackers = %d, want: %d", got, want) + } + if got, want := rt.breaker.Capacity(), 0; got != want { + t.Errorf("TotalCapacity = %d, want: %d", got, want) + } +} + +func TestPodAssignmentInfinite(t *testing.T) { + logger := TestLogger(t) + revName := types.NamespacedName{testNamespace, testRevision} + + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + + throttler := newTestThrottler(ctx, 2) + rt := newRevisionThrottler(revName, 0 /*cc*/, defaultParams, logger) + throttler.revisionThrottlers[revName] = rt + + update := revisionDestsUpdate{ + Rev: revName, + ClusterIPDest: "", + Dests: sets.NewString("ip3", "ip2", "ip1"), + } + // This should synchronously update throughout the system. + // And now we can inspect `rt`. + throttler.handleUpdate(update) + if got, want := len(rt.podTrackers), 3; got != want { + t.Errorf("NumTrackers = %d, want: %d", got, want) + } + if got, want := len(rt.assignedTrackers), 3; got != want { + t.Errorf("NumAssigned trackers = %d, want: %d", got, want) + } + if got, want := rt.breaker.Capacity(), 1; got != want { + t.Errorf("TotalCapacity = %d, want: %d", got, want) + } + if got, want := rt.assignedTrackers[0].Capacity(), 1; got != want { + t.Errorf("Exclusive tracker capacity: %d, want: %d", got, want) + } + + // Now scale to zero. + update.Dests = nil + throttler.handleUpdate(update) + if got, want := len(rt.podTrackers), 0; got != want { + t.Errorf("NumTrackers = %d, want: %d", got, want) + } + if got, want := len(rt.assignedTrackers), 0; got != want { + t.Errorf("NumAssignedTrackers = %d, want: %d", got, want) + } + if got, want := rt.breaker.Capacity(), 0; got != want { + t.Errorf("TotalCapacity = %d, want: %d", got, want) + } +} + +func TestMultipleActivators(t *testing.T) { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + + fake := fakekubeclient.Get(ctx) + endpoints := fakeendpointsinformer.Get(ctx) + servfake := fakeservingclient.Get(ctx) + revisions := revisioninformer.Get(ctx) + + waitInformers, err := controller.RunInformers(ctx.Done(), endpoints.Informer(), revisions.Informer()) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + rev := revisionCC1(types.NamespacedName{testNamespace, testRevision}, networking.ProtocolHTTP1) + // Add the revision we're testing. + servfake.ServingV1alpha1().Revisions(rev.Namespace).Create(rev) + revisions.Informer().GetIndexer().Add(rev) + + throttler := NewThrottler(ctx, defaultParams, "130.0.0.2:8012") + + revID := types.NamespacedName{testNamespace, testRevision} + possibleDests := sets.NewString("128.0.0.1:1234", "128.0.0.2:1234", "128.0.0.23:1234") + throttler.handleUpdate(revisionDestsUpdate{ + Rev: revID, + Dests: possibleDests, + }) + + // Add activator endpoint with 2 activators. + activatorEp := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: networking.ActivatorServiceName, + Namespace: system.Namespace(), + }, + Subsets: []corev1.EndpointSubset{*epSubset(8012, "http", []string{"130.0.0.1", "130.0.0.2"})}, + } + fake.CoreV1().Endpoints(system.Namespace()).Create(activatorEp) + endpoints.Informer().GetIndexer().Add(activatorEp) + + // Make sure our informer event has fired. + if err := wait.PollImmediate(10*time.Millisecond, 1*time.Second, func() (bool, error) { + return atomic.LoadInt32(&throttler.activatorIndex) != -1, nil + }); err != nil { + t.Fatal("Timed out waiting for the Activator Endpoints to fire") + } + + // Test with 2 activators, 3 endpoints we can send 1 request and the second times out. + var mux sync.Mutex + mux.Lock() // Lock the mutex so all requests are blocked in the Try function. + + reqCtx, cancel2 := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel2() + resultChan := tryThrottler(throttler, reqCtx, 2 /*requests*/, func(string) error { + mux.Lock() + return nil + }) + + // The first result will be a timeout because of the locking logic. + if result := <-resultChan; result.err != context.DeadlineExceeded { + t.Fatalf("err = %v, want %v", err, context.DeadlineExceeded) + } + + // Allow the successful request to pass through. + mux.Unlock() + if result := <-resultChan; !possibleDests.Has(result.dest) { + t.Fatalf("Request went to an unknown destination: %s, possibles: %v", result.dest, possibleDests) + } +} + +func TestInfiniteBreakerCreation(t *testing.T) { + // This test verifies that we use infiniteBreaker when CC==0. + tttl := newRevisionThrottler(types.NamespacedName{"a", "b"}, 0, /*cc*/ + queue.BreakerParams{}, TestLogger(t)) + if _, ok := tttl.breaker.(*infiniteBreaker); !ok { + t.Errorf("The type of revisionBreker = %T, want %T", tttl, (*infiniteBreaker)(nil)) + } +} + +func tryThrottler(throttler *Throttler, ctx context.Context, requests int, try func(string) error) chan tryResult { + resultChan := make(chan tryResult) + + for i := 0; i < requests; i++ { + go func() { + var result tryResult + if err := throttler.Try(ctx, types.NamespacedName{Namespace: testNamespace, Name: testRevision}, func(dest string) error { + result = tryResult{dest: dest} + return try(dest) + }); err != nil { + result = tryResult{err: err} + } + resultChan <- result + }() + } + + return resultChan +} + +func TestInfiniteBreaker(t *testing.T) { + b := &infiniteBreaker{ + broadcast: make(chan struct{}), + logger: TestLogger(t), + } + + // Verify initial condition. + if got, want := b.Capacity(), 0; got != want { + t.Errorf("Cap=%d, want: %d", got, want) + } + if _, ok := b.Reserve(context.Background()); ok != true { + t.Error("Reserve failed, must always succeed") + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + if err := b.Maybe(ctx, nil); err == nil { + t.Error("Should have failed, but didn't") + } + + b.UpdateConcurrency(1) + if got, want := b.Capacity(), 1; got != want { + t.Errorf("Cap=%d, want: %d", got, want) + } + + // Verify we call the thunk when we have achieved capacity. + // Twice. + for i := 0; i < 2; i++ { + ctx, cancel = context.WithCancel(context.Background()) + cancel() + res := false + if err := b.Maybe(ctx, func() { res = true }); err != nil { + t.Error("Should have succeeded, but didn't") + } + if !res { + t.Error("thunk was not invoked") + } + } + + // Scale to zero + b.UpdateConcurrency(0) + + // Repeat initial test. + ctx, cancel = context.WithCancel(context.Background()) + cancel() + if err := b.Maybe(ctx, nil); err == nil { + t.Error("Should have failed, but didn't") + } + if got, want := b.Capacity(), 0; got != want { + t.Errorf("Cap=%d, want: %d", got, want) + } + + // And now do the async test. + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + + // Unlock the channel after a short delay. + go func() { + time.Sleep(10 * time.Millisecond) + b.UpdateConcurrency(1) + }() + res := false + if err := b.Maybe(ctx, func() { res = true }); err != nil { + t.Error("Should have succeeded, but didn't") + } + if !res { + t.Error("thunk was not invoked") + } +} + +func TestInferIndex(t *testing.T) { + const myIP = "10.10.10.3:1234" + tests := []struct { + label string + ips []string + want int + }{{ + "empty", + []string{}, + -1, + }, { + "missing", + []string{"11.11.11.11:1234", "11.11.11.12:1234"}, + -1, + }, { + "first", + []string{"10.10.10.3:1234,11.11.11.11:1234"}, + -1, + }, { + "middle", + []string{"10.10.10.1:1212", "10.10.10.2:1234", "10.10.10.3:1234", "11.11.11.11:1234"}, + 2, + }, { + "last", + []string{"10.10.10.1:1234", "10.10.10.2:1234", "10.10.10.3:1234"}, + 2, + }} + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + if got, want := inferIndex(test.ips, myIP), test.want; got != want { + t.Errorf("Index = %d, wand: %d", got, want) + } + }) + } +} + +func TestPickIndices(t *testing.T) { + tests := []struct { + l string + pods int + acts int + idx int + wantB, wantE, wantR int + }{{ + l: "1 pod, 1 activator", + pods: 1, + acts: 1, + idx: 0, + wantB: 0, + wantE: 1, + }, { + l: "1 pod, 2 activators, this is 0", + pods: 1, + acts: 2, + idx: 0, + wantB: 0, + wantE: 1, + }, { + l: "1 pod, 2 activators, this is 1", + pods: 1, + acts: 2, + idx: 1, + wantB: 0, + wantE: 1, + }, { + l: "2 pods, 3 activators, this is 1", + pods: 2, + acts: 3, + idx: 1, + wantB: 1, + wantE: 2, + }, { + l: "2 pods, 3 activators, this is 2", + pods: 2, + acts: 3, + idx: 2, + wantB: 0, + wantE: 1, + }, { + l: "3 pods, 3 activators, this is 2", + pods: 3, + acts: 3, + idx: 2, + wantB: 2, + wantE: 3, + }, { + l: "10 pods, 3 activators this is 0", + pods: 10, + acts: 3, + idx: 0, + wantB: 0, + wantE: 3, + wantR: 1, + }, { + l: "10 pods, 3 activators this is 1", + pods: 10, + acts: 3, + idx: 1, + wantB: 3, + wantE: 6, + wantR: 1, + }, { + l: "10 pods, 3 activators this is 2", + pods: 10, + acts: 3, + idx: 2, + wantB: 6, + wantE: 9, + wantR: 1, + }, { + l: "150 pods, 5 activators this is 0", + pods: 150, + acts: 5, + idx: 0, + wantB: 0, + wantE: 30, + }, { + l: "150 pods, 5 activators this is 1", + pods: 150, + acts: 5, + idx: 1, + wantB: 30, + wantE: 60, + }, { + l: "150 pods, 5 activators this is 4", + pods: 150, + acts: 5, + idx: 4, + wantB: 120, + wantE: 150, + }} + for _, test := range tests { + t.Run(test.l, func(tt *testing.T) { + bi, ei, rem := pickIndices(test.pods, test.idx, test.acts) + if got, want := bi, test.wantB; got != want { + t.Errorf("BeginIndex = %d, want: %d", got, want) + } + if got, want := ei, test.wantE; got != want { + t.Errorf("EndIndex = %d, want: %d", got, want) + } + if got, want := rem, test.wantR; got != want { + t.Errorf("Remanants = %d, want: %d", got, want) + } + }) + } +} + +func TestAssignSlice(t *testing.T) { + opts := []cmp.Option{ + cmpopts.IgnoreUnexported(queue.Breaker{}), + cmp.AllowUnexported(podTracker{}), + } + trackers := []*podTracker{{ + dest: "2", + }, { + dest: "1", + }, { + dest: "3", + }} + t.Run("notrackers", func(t *testing.T) { + got := assignSlice([]*podTracker{}, 0, 1, 0) + if !cmp.Equal(got, []*podTracker{}, opts...) { + t.Errorf("Got=%v, want: %v, diff: %s", got, trackers, + cmp.Diff([]*podTracker{}, got, opts...)) + } + }) + t.Run("idx=-1", func(t *testing.T) { + got := assignSlice(trackers, -1, 1, 0) + if !cmp.Equal(got, trackers, opts...) { + t.Errorf("Got=%v, want: %v, diff: %s", got, trackers, + cmp.Diff(trackers, got, opts...)) + } + }) + t.Run("idx=1", func(t *testing.T) { + cp := append(trackers[:0:0], trackers...) + got := assignSlice(cp, 1, 3, 0) + if !cmp.Equal(got, trackers[0:1], opts...) { + t.Errorf("Got=%v, want: %v; diff: %s", got, trackers[0:1], + cmp.Diff(trackers[0:1], got, opts...)) + } + }) + t.Run("len=1", func(t *testing.T) { + got := assignSlice(trackers[0:1], 1, 3, 0) + if !cmp.Equal(got, trackers[0:1], opts...) { + t.Errorf("Got=%v, want: %v; diff: %s", got, trackers[0:1], + cmp.Diff(trackers[0:1], got, opts...)) + } + }) + + t.Run("idx=1, cc=5", func(t *testing.T) { + trackers := []*podTracker{{ + dest: "2", + b: queue.NewBreaker(defaultParams), + }, { + dest: "1", + b: queue.NewBreaker(defaultParams), + }, { + dest: "3", + b: queue.NewBreaker(defaultParams), + }} + cp := append(trackers[:0:0], trackers...) + got := assignSlice(cp, 1, 2, 5) + want := append(trackers[0:1], trackers[2:]...) + if !cmp.Equal(got, want, opts...) { + t.Errorf("Got=%v, want: %v; diff: %s", got, want, + cmp.Diff(trackers[0:1], got, opts...)) + } + if got, want := got[1].b.Capacity(), 5/2+1; got != want { + t.Errorf("Capacity for the tail pod = %d, want: %d", got, want) + } + }) + t.Run("idx=1, cc=6", func(t *testing.T) { + trackers := []*podTracker{{ + dest: "2", + b: queue.NewBreaker(defaultParams), + }, { + dest: "1", + b: queue.NewBreaker(defaultParams), + }, { + dest: "3", + b: queue.NewBreaker(defaultParams), + }} + cp := append(trackers[:0:0], trackers...) + got := assignSlice(cp, 1, 2, 6) + want := append(trackers[0:1], trackers[2:]...) + if !cmp.Equal(got, want, opts...) { + t.Errorf("Got=%v, want: %v; diff: %s", got, want, + cmp.Diff(trackers[0:1], got, opts...)) + } + if got, want := got[1].b.Capacity(), 3; got != want { + t.Errorf("Capacity for the tail pod = %d, want: %d", got, want) + } + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/stats_reporter.go b/test/vendor/knative.dev/serving/pkg/activator/stats_reporter.go new file mode 100644 index 0000000000..a06e55611a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/stats_reporter.go @@ -0,0 +1,191 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activator + +import ( + "context" + "errors" + "strconv" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + pkgmetrics "knative.dev/pkg/metrics" + "knative.dev/pkg/metrics/metricskey" + "knative.dev/serving/pkg/metrics" +) + +var ( + requestConcurrencyM = stats.Int64( + "request_concurrency", + "Concurrent requests that are routed to Activator", + stats.UnitDimensionless) + requestCountM = stats.Int64( + "request_count", + "The number of requests that are routed to Activator", + stats.UnitDimensionless) + responseTimeInMsecM = stats.Float64( + "request_latencies", + "The response time in millisecond", + stats.UnitMilliseconds) + + // NOTE: 0 should not be used as boundary. See + // https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/98 + defaultLatencyDistribution = view.Distribution(5, 10, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// StatsReporter defines the interface for sending activator metrics +type StatsReporter interface { + ReportRequestConcurrency(ns, service, config, rev string, v int64) error + ReportRequestCount(ns, service, config, rev string, responseCode, numTries int) error + ReportResponseTime(ns, service, config, rev string, responseCode int, d time.Duration) error +} + +// Reporter holds cached metric objects to report autoscaler metrics +type Reporter struct { + initialized bool + ctx context.Context +} + +// NewStatsReporter creates a reporter that collects and reports activator metrics +func NewStatsReporter(pod string) (*Reporter, error) { + ctx, err := tag.New( + context.Background(), + tag.Upsert(metrics.PodTagKey, pod), + tag.Upsert(metrics.ContainerTagKey, Name), + ) + if err != nil { + return nil, err + } + + var r = &Reporter{ + initialized: true, + ctx: ctx, + } + + // Create view to see our measurements. + err = view.Register( + &view.View{ + Description: "Concurrent requests that are routed to Activator", + Measure: requestConcurrencyM, + Aggregation: view.LastValue(), + TagKeys: append(metrics.CommonRevisionKeys, metrics.PodTagKey, metrics.ContainerTagKey), + }, + &view.View{ + Description: "The number of requests that are routed to Activator", + Measure: requestCountM, + Aggregation: view.Count(), + TagKeys: append(metrics.CommonRevisionKeys, metrics.PodTagKey, metrics.ContainerTagKey, + metrics.ResponseCodeKey, metrics.ResponseCodeClassKey, metrics.NumTriesKey), + }, + &view.View{ + Description: "The response time in millisecond", + Measure: responseTimeInMsecM, + Aggregation: defaultLatencyDistribution, + TagKeys: append(metrics.CommonRevisionKeys, metrics.PodTagKey, metrics.ContainerTagKey, + metrics.ResponseCodeKey, metrics.ResponseCodeClassKey), + }, + ) + if err != nil { + return nil, err + } + + return r, nil +} + +func valueOrUnknown(v string) string { + if v != "" { + return v + } + return metricskey.ValueUnknown +} + +// ReportRequestConcurrency captures request concurrency metric with value v. +func (r *Reporter) ReportRequestConcurrency(ns, service, config, rev string, v int64) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + // Note that service names can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + r.ctx, + tag.Upsert(metrics.NamespaceTagKey, ns), + tag.Upsert(metrics.ServiceTagKey, valueOrUnknown(service)), + tag.Upsert(metrics.ConfigTagKey, config), + tag.Upsert(metrics.RevisionTagKey, rev)) + if err != nil { + return err + } + + pkgmetrics.Record(ctx, requestConcurrencyM.M(v)) + return nil +} + +// ReportRequestCount captures request count. +func (r *Reporter) ReportRequestCount(ns, service, config, rev string, responseCode, numTries int) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + // Note that service names can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + r.ctx, + tag.Upsert(metrics.NamespaceTagKey, ns), + tag.Upsert(metrics.ServiceTagKey, valueOrUnknown(service)), + tag.Upsert(metrics.ConfigTagKey, config), + tag.Upsert(metrics.RevisionTagKey, rev), + tag.Upsert(metrics.ResponseCodeKey, strconv.Itoa(responseCode)), + tag.Upsert(metrics.ResponseCodeClassKey, responseCodeClass(responseCode)), + tag.Upsert(metrics.NumTriesKey, strconv.Itoa(numTries))) + if err != nil { + return err + } + + pkgmetrics.Record(ctx, requestCountM.M(1)) + return nil +} + +// ReportResponseTime captures response time requests +func (r *Reporter) ReportResponseTime(ns, service, config, rev string, responseCode int, d time.Duration) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + // Note that service names can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + r.ctx, + tag.Upsert(metrics.NamespaceTagKey, ns), + tag.Upsert(metrics.ServiceTagKey, valueOrUnknown(service)), + tag.Upsert(metrics.ConfigTagKey, config), + tag.Upsert(metrics.RevisionTagKey, rev), + tag.Upsert(metrics.ResponseCodeKey, strconv.Itoa(responseCode)), + tag.Upsert(metrics.ResponseCodeClassKey, responseCodeClass(responseCode))) + if err != nil { + return err + } + + pkgmetrics.Record(ctx, responseTimeInMsecM.M(float64(d.Milliseconds()))) + return nil +} + +// responseCodeClass converts response code to a string of response code class. +// e.g. The response code class is "5xx" for response code 503. +func responseCodeClass(responseCode int) string { + // Get the hundred digit of the response code and concatenate "xx". + return strconv.Itoa(responseCode/100) + "xx" +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/stats_reporter_test.go b/test/vendor/knative.dev/serving/pkg/activator/stats_reporter_test.go new file mode 100644 index 0000000000..f6604bfbfb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/stats_reporter_test.go @@ -0,0 +1,173 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activator + +import ( + "net/http" + "testing" + "time" + + "knative.dev/pkg/metrics/metricskey" + "knative.dev/pkg/metrics/metricstest" +) + +// unregister, ehm, unregisters the metrics that were registered, by +// virtue of StatsReporter creation. +// Since golang executes test iterations within the same process, the stats reporter +// returns an error if the metric is already registered and the test panics. +func unregister() { + metricstest.Unregister("request_count", "request_latencies", "request_concurrency") +} + +func TestActivatorReporter(t *testing.T) { + r := &Reporter{} + + if err := r.ReportRequestCount("testns", "testsvc", "testconfig", "testrev", http.StatusOK, 1); err == nil { + t.Error("Reporter expected an error for Report call before init. Got success.") + } + + var err error + if r, err = NewStatsReporter("testpod"); err != nil { + t.Fatalf("Failed to create a new reporter: %v", err) + } + // Without this `go test ... -count=X`, where X > 1, fails, since + // we get an error about view already being registered. + defer unregister() + + // test ReportResponseConcurrency + wantTags1 := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: "testsvc", + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + "pod_name": "testpod", + "container_name": "activator", + } + expectSuccess(t, func() error { + return r.ReportRequestConcurrency("testns", "testsvc", "testconfig", "testrev", 100) + }) + metricstest.CheckLastValueData(t, "request_concurrency", wantTags1, 100) + expectSuccess(t, func() error { + return r.ReportRequestConcurrency("testns", "testsvc", "testconfig", "testrev", 200) + }) + metricstest.CheckLastValueData(t, "request_concurrency", wantTags1, 200) + + // test ReportRequestCount + wantTags2 := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: "testsvc", + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + "pod_name": "testpod", + "container_name": "activator", + "response_code": "200", + "response_code_class": "2xx", + "num_tries": "6", + } + expectSuccess(t, func() error { + return r.ReportRequestCount("testns", "testsvc", "testconfig", "testrev", http.StatusOK, 6) + }) + expectSuccess(t, func() error { + return r.ReportRequestCount("testns", "testsvc", "testconfig", "testrev", http.StatusOK, 6) + }) + metricstest.CheckCountData(t, "request_count", wantTags2, 2) + + // test ReportResponseTime + wantTags3 := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: "testsvc", + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + "pod_name": "testpod", + "container_name": "activator", + "response_code": "200", + "response_code_class": "2xx", + } + expectSuccess(t, func() error { + return r.ReportResponseTime("testns", "testsvc", "testconfig", "testrev", http.StatusOK, 1100*time.Millisecond) + }) + expectSuccess(t, func() error { + return r.ReportResponseTime("testns", "testsvc", "testconfig", "testrev", http.StatusOK, 9100*time.Millisecond) + }) + metricstest.CheckDistributionData(t, "request_latencies", wantTags3, 2, 1100.0, 9100.0) +} + +func TestActivatorReporterEmptyServiceName(t *testing.T) { + r, err := NewStatsReporter("testpod") + defer unregister() + + if err != nil { + t.Fatalf("Failed to create a new reporter: %v", err) + } + + // test ReportResponseConcurrency + wantTags1 := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: metricskey.ValueUnknown, + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + "pod_name": "testpod", + "container_name": "activator", + } + expectSuccess(t, func() error { + return r.ReportRequestConcurrency("testns", "" /*service=*/, "testconfig", "testrev", 100) + }) + metricstest.CheckLastValueData(t, "request_concurrency", wantTags1, 100) + + // test ReportRequestCount + wantTags2 := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: metricskey.ValueUnknown, + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + "pod_name": "testpod", + "container_name": "activator", + "response_code": "200", + "response_code_class": "2xx", + "num_tries": "6", + } + expectSuccess(t, func() error { + return r.ReportRequestCount("testns", "" /*service=*/, "testconfig", "testrev", 200, 6) + }) + metricstest.CheckCountData(t, "request_count", wantTags2, 1) + + // test ReportResponseTime + wantTags3 := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: metricskey.ValueUnknown, + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + "pod_name": "testpod", + "container_name": "activator", + "response_code": "200", + "response_code_class": "2xx", + } + expectSuccess(t, func() error { + return r.ReportResponseTime("testns", "" /*service=*/, "testconfig", "testrev", 200, 7100*time.Millisecond) + }) + expectSuccess(t, func() error { + return r.ReportResponseTime("testns", "" /*service=*/, "testconfig", "testrev", 200, 5100*time.Millisecond) + }) + metricstest.CheckDistributionData(t, "request_latencies", wantTags3, 2, 5100.0, 7100.0) +} + +func expectSuccess(t *testing.T, f func() error) { + t.Helper() + if err := f(); err != nil { + t.Errorf("Reporter expected success but got error: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/activator/testing/roundtripper.go b/test/vendor/knative.dev/serving/pkg/activator/testing/roundtripper.go new file mode 100644 index 0000000000..46e106d761 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/testing/roundtripper.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "net/http" + "net/http/httptest" + "sync" + "sync/atomic" + + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" +) + +// FakeResponse is a response given by the FakeRoundTripper +type FakeResponse struct { + Err error + Code int + Body string +} + +// FakeRoundTripper is a roundtripper emulator useful in testing +type FakeRoundTripper struct { + // Return an error if host header does not match this + ExpectHost string + + // LockerCh blocks responses being sent until a struct is written to the channel + LockerCh chan struct{} + + // ProbeHostResponses are popped when a probe request is made to a given host. If + // no host is matched then this falls back to the behavior or ProbeResponses + ProbeHostResponses map[string][]FakeResponse + + // Responses to probe requests are popeed from this list until it is size 1 then + // that response is returned indefinitely + ProbeResponses []FakeResponse + + // Response to non-probe requests + RequestResponse *FakeResponse + responseMux sync.Mutex + + NumProbes int32 +} + +func defaultProbeResponse() *FakeResponse { + return &FakeResponse{ + Err: nil, + Code: http.StatusOK, + Body: queue.Name, + } +} + +func defaultRequestResponse() *FakeResponse { + return &FakeResponse{ + Err: nil, + Code: http.StatusOK, + Body: "default response", + } +} + +func response(fr *FakeResponse) (*http.Response, error) { + recorder := httptest.NewRecorder() + recorder.WriteHeader(fr.Code) + recorder.WriteString(fr.Body) + return recorder.Result(), nil +} + +func popResponseSlice(in []FakeResponse) (*FakeResponse, []FakeResponse) { + if len(in) == 0 { + return defaultProbeResponse(), in + } + resp := &in[0] + if len(in) > 1 { + in = in[1:] + } + + return resp, in +} + +func (rt *FakeRoundTripper) popResponse(host string) *FakeResponse { + rt.responseMux.Lock() + defer rt.responseMux.Unlock() + + if v, ok := rt.ProbeHostResponses[host]; ok { + resp, responses := popResponseSlice(v) + rt.ProbeHostResponses[host] = responses + return resp + } + + resp, responses := popResponseSlice(rt.ProbeResponses) + rt.ProbeResponses = responses + return resp +} + +// RT is a RoundTripperFunc +func (rt *FakeRoundTripper) RT(req *http.Request) (*http.Response, error) { + if req.Header.Get(network.ProbeHeaderName) != "" { + atomic.AddInt32(&rt.NumProbes, 1) + resp := rt.popResponse(req.URL.Host) + if resp.Err != nil { + return nil, resp.Err + } + + // Make sure the probe is attributed with correct header. + if req.Header.Get(network.ProbeHeaderName) != queue.Name { + return response(&FakeResponse{ + Code: http.StatusBadRequest, + Body: "probe sent to a wrong system", + }) + } + if req.Header.Get(network.UserAgentKey) != network.ActivatorUserAgent { + return response(&FakeResponse{ + Code: http.StatusBadRequest, + Body: "probe set with a wrong User-Agent value", + }) + } + return response(resp) + } + resp := rt.RequestResponse + if resp == nil { + resp = defaultRequestResponse() + } + + if resp.Err != nil { + return nil, resp.Err + } + + // Verify that the request has the required rewritten host header. + if got, want := req.Host, ""; got != want { + return nil, fmt.Errorf("the req.Host has not been cleared out, was: %q", got) + } + if got, want := req.Header.Get("Host"), ""; got != want { + return nil, fmt.Errorf("the Host header has not been cleared out, was: %q", got) + } + + if rt.ExpectHost != "" { + if got, want := req.Header.Get(network.OriginalHostHeader), rt.ExpectHost; got != want { + return nil, fmt.Errorf("the %s header = %q, want: %q", network.OriginalHostHeader, got, want) + } + } + + if rt.LockerCh != nil { + rt.LockerCh <- struct{}{} + } + + return response(resp) +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/unstructured.go b/test/vendor/knative.dev/serving/pkg/activator/util/header.go similarity index 53% rename from test/vendor/github.com/knative/pkg/apis/duck/unstructured.go rename to test/vendor/knative.dev/serving/pkg/activator/util/header.go index 98b3cef946..f98df48db8 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/unstructured.go +++ b/test/vendor/knative.dev/serving/pkg/activator/util/header.go @@ -14,24 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -package duck +package util import ( - "encoding/json" + "net/http" + "net/http/httputil" + + "knative.dev/serving/pkg/activator" ) -// Marshallable is implementated by the Unstructured K8s types. -type Marshalable interface { - MarshalJSON() ([]byte, error) +var headersToRemove = []string{ + activator.RevisionHeaderName, + activator.RevisionHeaderNamespace, } -// FromUnstructured takes unstructured object from (say from client-go/dynamic) and -// converts it into our duck types. -func FromUnstructured(obj Marshalable, target interface{}) error { - // Use the unstructured marshaller to ensure it's proper JSON - raw, err := obj.MarshalJSON() - if err != nil { - return err +// SetupHeaderPruning will cause the http.ReverseProxy +// to not forward activator headers +func SetupHeaderPruning(p *httputil.ReverseProxy) { + // Director is never null - otherwise ServeHTTP panics + orig := p.Director + p.Director = func(r *http.Request) { + orig(r) + + for _, h := range headersToRemove { + r.Header.Del(h) + } } - return json.Unmarshal(raw, &target) } diff --git a/test/vendor/knative.dev/serving/pkg/activator/util/header_test.go b/test/vendor/knative.dev/serving/pkg/activator/util/header_test.go new file mode 100644 index 0000000000..07d113cb7e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/activator/util/header_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "testing" + + "knative.dev/serving/pkg/activator" +) + +func TestHeaderPruning(t *testing.T) { + var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(activator.RevisionHeaderName) != "" { + w.WriteHeader(http.StatusBadRequest) + return + } + + if r.Header.Get(activator.RevisionHeaderNamespace) != "" { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(http.StatusOK) + } + + server := httptest.NewServer(handler) + serverURL, _ := url.Parse(server.URL) + + defer server.Close() + + tests := []struct { + name string + header string + }{{ + name: "revision name header", + header: activator.RevisionHeaderName, + }, { + name: "revision namespace header", + header: activator.RevisionHeaderNamespace, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + proxy := httputil.NewSingleHostReverseProxy(serverURL) + SetupHeaderPruning(proxy) + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) + req.Header.Set(test.header, "some-value") + + proxy.ServeHTTP(resp, req) + + if resp.Code != http.StatusOK { + t.Errorf("expected header %q to be filtered", test.header) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/OWNERS b/test/vendor/knative.dev/serving/pkg/apis/OWNERS new file mode 100644 index 0000000000..63f701abf6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/OWNERS @@ -0,0 +1,13 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +# TOC +- evankanderson +- mattmoor +- vaikas-google +- vaikas +# Serving WG Leads +- dgerd + +labels: +- area/API diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/OWNERS b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/OWNERS new file mode 100644 index 0000000000..690ff0e48e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers + +reviewers: +- autoscaling-reviewers + +labels: +- area/autoscale diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go new file mode 100644 index 0000000000..cfbd9a255b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "fmt" + "math" + "strconv" + "time" + + "knative.dev/pkg/apis" +) + +func getIntGE0(m map[string]string, k string) (int64, *apis.FieldError) { + v, ok := m[k] + if !ok { + return 0, nil + } + i, err := strconv.ParseInt(v, 10, 32) + if err != nil || i < 0 { + return 0, apis.ErrOutOfBoundsValue(v, 1, math.MaxInt32, k) + } + return i, nil +} + +func ValidateAnnotations(anns map[string]string) *apis.FieldError { + if len(anns) == 0 { + return nil + } + return validateMinMaxScale(anns).Also(validateFloats(anns)).Also(validateWindows(anns).Also(validateMetric(anns))) +} + +func validateFloats(annotations map[string]string) *apis.FieldError { + var errs *apis.FieldError + if v, ok := annotations[PanicWindowPercentageAnnotationKey]; ok { + if fv, err := strconv.ParseFloat(v, 64); err != nil { + errs = errs.Also(apis.ErrInvalidValue(v, PanicWindowPercentageAnnotationKey)) + } else if fv < PanicWindowPercentageMin || fv > PanicWindowPercentageMax { + errs = apis.ErrOutOfBoundsValue(v, PanicWindowPercentageMin, + PanicWindowPercentageMax, PanicWindowPercentageAnnotationKey) + } + } + if v, ok := annotations[PanicThresholdPercentageAnnotationKey]; ok { + if fv, err := strconv.ParseFloat(v, 64); err != nil { + errs = errs.Also(apis.ErrInvalidValue(v, PanicThresholdPercentageAnnotationKey)) + } else if fv < PanicThresholdPercentageMin || fv > PanicThresholdPercentageMax { + errs = errs.Also(apis.ErrOutOfBoundsValue(v, PanicThresholdPercentageMin, PanicThresholdPercentageMax, + PanicThresholdPercentageAnnotationKey)) + } + } + + if v, ok := annotations[TargetAnnotationKey]; ok { + if fv, err := strconv.ParseFloat(v, 64); err != nil || fv < TargetMin { + errs = errs.Also(apis.ErrInvalidValue(v, TargetAnnotationKey)) + } + } + + if v, ok := annotations[TargetUtilizationPercentageKey]; ok { + if fv, err := strconv.ParseFloat(v, 64); err != nil { + errs = errs.Also(apis.ErrInvalidValue(v, TargetUtilizationPercentageKey)) + } else if fv < 1 || fv > 100 { + errs = errs.Also(apis.ErrOutOfBoundsValue(v, 1, 100, TargetUtilizationPercentageKey)) + } + } + + if v, ok := annotations[TargetBurstCapacityKey]; ok { + if fv, err := strconv.ParseFloat(v, 64); err != nil || fv < 0 && fv != -1 { + errs = errs.Also(apis.ErrInvalidValue(v, TargetBurstCapacityKey)) + } + } + return errs +} + +func validateWindows(annotations map[string]string) *apis.FieldError { + var errs *apis.FieldError + if w, ok := annotations[WindowAnnotationKey]; ok { + if annotations[ClassAnnotationKey] == HPA && annotations[MetricAnnotationKey] == CPU { + return apis.ErrInvalidKeyName(WindowAnnotationKey, fmt.Sprintf("%s for %s %s", HPA, MetricAnnotationKey, CPU)) + } + d, err := time.ParseDuration(w) + if err != nil { + errs = apis.ErrInvalidValue(w, WindowAnnotationKey) + } else if d < WindowMin || d > WindowMax { + errs = apis.ErrOutOfBoundsValue(w, WindowMin, WindowMax, WindowAnnotationKey) + } + } + return errs +} + +func validateMinMaxScale(annotations map[string]string) *apis.FieldError { + var errs *apis.FieldError + + min, err := getIntGE0(annotations, MinScaleAnnotationKey) + errs = errs.Also(err) + + max, err := getIntGE0(annotations, MaxScaleAnnotationKey) + errs = errs.Also(err) + + if max != 0 && max < min { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("maxScale=%d is less than minScale=%d", max, min), + Paths: []string{MaxScaleAnnotationKey, MinScaleAnnotationKey}, + }) + } + return errs +} + +func validateMetric(annotations map[string]string) *apis.FieldError { + if metric, ok := annotations[MetricAnnotationKey]; ok { + classValue := KPA + if c, ok := annotations[ClassAnnotationKey]; ok { + classValue = c + } + switch classValue { + case KPA: + switch metric { + case Concurrency, RPS: + return nil + } + case HPA: + switch metric { + case CPU, Concurrency, RPS: + return nil + } + default: + // Leave other classes of PodAutoscaler alone. + return nil + } + return apis.ErrInvalidValue(metric, MetricAnnotationKey) + } + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation_test.go new file mode 100644 index 0000000000..e7ee91b64f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation_test.go @@ -0,0 +1,246 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "fmt" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestValidateScaleBoundAnnotations(t *testing.T) { + cases := []struct { + name string + annotations map[string]string + expectErr string + }{{ + name: "nil annotations", + annotations: nil, + }, { + name: "empty annotations", + annotations: map[string]string{}, + }, { + name: "minScale is 0", + annotations: map[string]string{MinScaleAnnotationKey: "0"}, + }, { + name: "maxScale is 0", + annotations: map[string]string{MaxScaleAnnotationKey: "0"}, + }, { + name: "minScale is -1", + annotations: map[string]string{MinScaleAnnotationKey: "-1"}, + expectErr: "expected 1 <= -1 <= 2147483647: " + MinScaleAnnotationKey, + }, { + name: "maxScale is -1", + annotations: map[string]string{MaxScaleAnnotationKey: "-1"}, + expectErr: "expected 1 <= -1 <= 2147483647: " + MaxScaleAnnotationKey, + }, { + name: "minScale is foo", + annotations: map[string]string{MinScaleAnnotationKey: "foo"}, + expectErr: "expected 1 <= foo <= 2147483647: " + MinScaleAnnotationKey, + }, { + name: "maxScale is bar", + annotations: map[string]string{MaxScaleAnnotationKey: "bar"}, + expectErr: "expected 1 <= bar <= 2147483647: " + MaxScaleAnnotationKey, + }, { + name: "max/minScale is bar", + annotations: map[string]string{MaxScaleAnnotationKey: "bar", MinScaleAnnotationKey: "bar"}, + expectErr: "expected 1 <= bar <= 2147483647: " + MaxScaleAnnotationKey + ", " + MinScaleAnnotationKey, + }, { + name: "minScale is 5", + annotations: map[string]string{MinScaleAnnotationKey: "5"}, + }, { + name: "maxScale is 2", + annotations: map[string]string{MaxScaleAnnotationKey: "2"}, + }, { + name: "minScale is 2, maxScale is 5", + annotations: map[string]string{MinScaleAnnotationKey: "2", MaxScaleAnnotationKey: "5"}, + }, { + name: "minScale is 5, maxScale is 2", + annotations: map[string]string{MinScaleAnnotationKey: "5", MaxScaleAnnotationKey: "2"}, + expectErr: "maxScale=2 is less than minScale=5: " + MaxScaleAnnotationKey + ", " + MinScaleAnnotationKey, + }, { + name: "minScale is 0, maxScale is 0", + annotations: map[string]string{ + MinScaleAnnotationKey: "0", + MaxScaleAnnotationKey: "0", + }, + }, { + name: "panic window percentange bad", + annotations: map[string]string{PanicWindowPercentageAnnotationKey: "-1"}, + expectErr: "expected 1 <= -1 <= 100: " + PanicWindowPercentageAnnotationKey, + }, { + name: "panic window percentange bad2", + annotations: map[string]string{PanicWindowPercentageAnnotationKey: "202"}, + expectErr: "expected 1 <= 202 <= 100: " + PanicWindowPercentageAnnotationKey, + }, { + name: "panic window percentange bad3", + annotations: map[string]string{PanicWindowPercentageAnnotationKey: "fifty"}, + expectErr: "invalid value: fifty: " + PanicWindowPercentageAnnotationKey, + }, { + name: "panic window percentange good", + annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "210"}, + }, { + name: "panic threshold percentange bad2", + annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "109"}, + expectErr: "expected 110 <= 109 <= 1000: " + PanicThresholdPercentageAnnotationKey, + }, { + name: "panic threshold percentange bad2.5", + annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "10009"}, + expectErr: "expected 110 <= 10009 <= 1000: " + PanicThresholdPercentageAnnotationKey, + }, { + name: "panic threshold percentange bad3", + annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "fifty"}, + expectErr: "invalid value: fifty: " + PanicThresholdPercentageAnnotationKey, + }, { + name: "target negative", + annotations: map[string]string{TargetAnnotationKey: "-11"}, + expectErr: "invalid value: -11: " + TargetAnnotationKey, + }, { + name: "target 0", + annotations: map[string]string{TargetAnnotationKey: "0"}, + expectErr: "invalid value: 0: " + TargetAnnotationKey, + }, { + name: "target okay", + annotations: map[string]string{TargetAnnotationKey: "11"}, + }, { + name: "TBC negative", + annotations: map[string]string{TargetBurstCapacityKey: "-11"}, + expectErr: "invalid value: -11: " + TargetBurstCapacityKey, + }, { + name: "TBC 0", + annotations: map[string]string{TargetBurstCapacityKey: "0"}, + }, { + name: "TBC 19880709", + annotations: map[string]string{TargetBurstCapacityKey: "19870709"}, + }, { + name: "TBC -1", + annotations: map[string]string{TargetBurstCapacityKey: "-1"}, + }, { + name: "TBC invalid", + annotations: map[string]string{TargetBurstCapacityKey: "qarashen"}, + expectErr: "invalid value: qarashen: " + TargetBurstCapacityKey, + }, { + name: "TU too small", + annotations: map[string]string{TargetUtilizationPercentageKey: "0"}, + expectErr: "expected 1 <= 0 <= 100: " + TargetUtilizationPercentageKey, + }, { + name: "TU too big", + annotations: map[string]string{TargetUtilizationPercentageKey: "101"}, + expectErr: "expected 1 <= 101 <= 100: " + TargetUtilizationPercentageKey, + }, { + name: "TU invalid", + annotations: map[string]string{TargetUtilizationPercentageKey: "dghyak"}, + expectErr: "invalid value: dghyak: " + TargetUtilizationPercentageKey, + }, { + name: "window invalid", + annotations: map[string]string{WindowAnnotationKey: "jerry-was-a-racecar-driver"}, + expectErr: "invalid value: jerry-was-a-racecar-driver: " + WindowAnnotationKey, + }, { + name: "window too short", + annotations: map[string]string{WindowAnnotationKey: "1s"}, + expectErr: "expected 6s <= 1s <= 1h0m0s: " + WindowAnnotationKey, + }, { + name: "window too long", + annotations: map[string]string{WindowAnnotationKey: "365h"}, + expectErr: "expected 6s <= 365h <= 1h0m0s: " + WindowAnnotationKey, + }, { + name: "annotation /window is invalid for class HPA and metric CPU", + annotations: map[string]string{WindowAnnotationKey: "7s", ClassAnnotationKey: HPA, MetricAnnotationKey: CPU}, + expectErr: fmt.Sprintf(`invalid key name %q: %s for %s %s`, WindowAnnotationKey, HPA, MetricAnnotationKey, CPU), + }, { + name: "annotation /window is valid for class KPA", + annotations: map[string]string{WindowAnnotationKey: "7s", ClassAnnotationKey: KPA}, + expectErr: "", + }, { + name: "annotation /window is valid for class HPA and metric RPS", + annotations: map[string]string{WindowAnnotationKey: "7s", ClassAnnotationKey: HPA, MetricAnnotationKey: RPS}, + expectErr: "", + }, { + name: "annotation /window is valid for class HPA and metric Concurrency", + annotations: map[string]string{WindowAnnotationKey: "7s", ClassAnnotationKey: HPA, MetricAnnotationKey: Concurrency}, + expectErr: "", + }, { + name: "annotation /window is valid for other than HPA and KPA class", + annotations: map[string]string{WindowAnnotationKey: "7s", ClassAnnotationKey: "test"}, + expectErr: "", + }, { + name: "value too short and invalid class for /window annotation", + annotations: map[string]string{WindowAnnotationKey: "1s", ClassAnnotationKey: HPA, MetricAnnotationKey: CPU}, + expectErr: fmt.Sprintf(`invalid key name %q: %s for %s %s`, WindowAnnotationKey, HPA, MetricAnnotationKey, CPU), + }, { + name: "value too long and valid class for /window annotation", + annotations: map[string]string{WindowAnnotationKey: "365h", ClassAnnotationKey: KPA}, + expectErr: "expected 6s <= 365h <= 1h0m0s: " + WindowAnnotationKey, + }, { + name: "invalid format and valid class for /window annotation", + annotations: map[string]string{WindowAnnotationKey: "jerry-was-a-racecar-driver", ClassAnnotationKey: KPA}, + expectErr: "invalid value: jerry-was-a-racecar-driver: " + WindowAnnotationKey, + }, { + name: "all together now fail", + annotations: map[string]string{ + PanicThresholdPercentageAnnotationKey: "fifty", + PanicWindowPercentageAnnotationKey: "-11", + MinScaleAnnotationKey: "-4", + MaxScaleAnnotationKey: "never", + }, + expectErr: "expected 1 <= -11 <= 100: " + PanicWindowPercentageAnnotationKey + "\nexpected 1 <= -4 <= 2147483647: " + MinScaleAnnotationKey + "\nexpected 1 <= never <= 2147483647: " + MaxScaleAnnotationKey + "\ninvalid value: fifty: " + PanicThresholdPercentageAnnotationKey, + }, { + name: "all together now, succeed", + annotations: map[string]string{ + PanicThresholdPercentageAnnotationKey: "125", + PanicWindowPercentageAnnotationKey: "75", + MinScaleAnnotationKey: "5", + MaxScaleAnnotationKey: "8", + WindowAnnotationKey: "1984s", + }, + }, { + name: "invalid metric for default class(KPA)", + annotations: map[string]string{MetricAnnotationKey: CPU}, + expectErr: "invalid value: cpu: " + MetricAnnotationKey, + }, { + name: "invalid metric for HPA class", + annotations: map[string]string{MetricAnnotationKey: "metrics", ClassAnnotationKey: HPA}, + expectErr: "invalid value: metrics: " + MetricAnnotationKey, + }, { + name: "valid class KPA with metric RPS", + annotations: map[string]string{MetricAnnotationKey: RPS}, + }, { + name: "valid class KPA with metric Concurrency", + annotations: map[string]string{MetricAnnotationKey: Concurrency}, + }, { + name: "valid class HPA with metric Concurrency", + annotations: map[string]string{ClassAnnotationKey: HPA, MetricAnnotationKey: Concurrency}, + }, { + name: "valid class HPA with metric CPU", + annotations: map[string]string{ClassAnnotationKey: HPA, MetricAnnotationKey: CPU}, + }, { + name: "valid class HPA with metric RPS", + annotations: map[string]string{ClassAnnotationKey: HPA, MetricAnnotationKey: RPS}, + }, { + name: "other than HPA and KPA class", + annotations: map[string]string{ClassAnnotationKey: "other", MetricAnnotationKey: RPS}, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got, want := ValidateAnnotations(c.annotations).Error(), c.expectErr; !reflect.DeepEqual(got, want) { + t.Errorf("Err = %q, want: %q, diff:\n%s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/register.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go similarity index 84% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/register.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go index 6807e7194a..88526ce583 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go @@ -19,8 +19,10 @@ package autoscaling import "time" const ( + // The internal autoscaling group name. This is used for CRDs. InternalGroupName = "autoscaling.internal.knative.dev" + // The publuc autoscaling group name. This is used for annotations, labels, etc. GroupName = "autoscaling.knative.dev" // ClassAnnotationKey is the annotation for the explicit class of autoscaler @@ -50,6 +52,8 @@ const ( Concurrency = "concurrency" // CPU is the amount of the requested cpu actually being consumed by the Pod. CPU = "cpu" + // RPS is the requests per second reaching the Pod. + RPS = "rps" // TargetAnnotationKey is the annotation to specify what metric value the // PodAutoscaler should attempt to maintain. For example, @@ -76,6 +80,23 @@ const ( // smaller than the stable window. Anything less than 6 second // isn't going to work well. WindowMin = 6 * time.Second + // WindowMax is the maximum permitted stable autoscaling window. + // This keeps the event horizon to a resonable enough limit. + WindowMax = 1 * time.Hour + + // TargetUtilizationPercentageKey is the annotation which specifies the + // desired target resource utilization for the revision. + // TargetUtilization is a percentage in the 1 <= TU <= 100 range. + // This annotation takes precedence over the config map value. + TargetUtilizationPercentageKey = GroupName + "/targetUtilizationPercentage" + + // TargetBurstCapacityKey specifies the desired burst capacity for the + // revision. Possible values are: + // -1 -- infinite; + // 0 -- no TBC; + // >0 -- actual TBC. + // <0 && != -1 -- an error. + TargetBurstCapacityKey = GroupName + "/targetBurstCapacity" // PanicWindowPercentageAnnotationKey is the annotation to // specify the time interval over which to calculate the average @@ -129,6 +150,10 @@ const ( // smallest useful value. PanicThresholdPercentageMin = 110.0 + // PanicThresholdPercentageMax is the counterpart to the PanicThresholdPercentageMin + // but bounding from above. + PanicThresholdPercentageMax = 1000.0 + // KPALabelKey is the label key attached to a K8s Service to hint to the KPA // which services/endpoints should trigger reconciles. KPALabelKey = GroupName + "/kpa" diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/doc.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/doc.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/doc.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/doc.go diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_defaults.go new file mode 100644 index 0000000000..635297923a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_defaults.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "knative.dev/pkg/apis" +) + +// SetDefaults sets defaults on the entire Metric if applicable. +func (m *Metric) SetDefaults(ctx context.Context) { + m.Spec.SetDefaults(apis.WithinSpec(ctx)) +} + +// SetDefaults sets defaults on the Metric's Spec if applicable. +func (ms *MetricSpec) SetDefaults(ctx context.Context) {} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle.go new file mode 100644 index 0000000000..838577a5b9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +const ( + // MetricConditionReady is set when the Metric's latest + // underlying revision has reported readiness. + MetricConditionReady = apis.ConditionReady +) + +var condSet = apis.NewLivingConditionSet( + MetricConditionReady, +) + +// GetGroupVersionKind implements OwnerRefable. +func (m *Metric) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("Metric") +} + +// GetCondition gets the condition `t`. +func (ms *MetricStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return condSet.Manage(ms).GetCondition(t) +} + +// InitializeConditions initializes the conditions of the Metric. +func (ms *MetricStatus) InitializeConditions() { + condSet.Manage(ms).InitializeConditions() +} + +// MarkMetricReady marks the metric status as ready +func (ms *MetricStatus) MarkMetricReady() { + condSet.Manage(ms).MarkTrue(MetricConditionReady) +} + +// MarkMetricNotReady marks the metric status as ready == Unknown +func (ms *MetricStatus) MarkMetricNotReady(reason, message string) { + condSet.Manage(ms).MarkUnknown(MetricConditionReady, reason, message) +} + +// MarkMetricFailed marks the metric status as failed +func (ms *MetricStatus) MarkMetricFailed(reason, message string) { + condSet.Manage(ms).MarkFalse(MetricConditionReady, reason, message) +} + +// IsReady looks at the conditions and if the condition MetricConditionReady +// is true +func (ms *MetricStatus) IsReady() bool { + return condSet.Manage(ms.duck()).IsHappy() +} + +func (ms *MetricStatus) duck() *duckv1.Status { + return (*duckv1.Status)(&ms.Status) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle_test.go new file mode 100644 index 0000000000..d03ce1051b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_lifecycle_test.go @@ -0,0 +1,175 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" + "knative.dev/serving/pkg/apis/autoscaling" +) + +func TestMetricDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Metric{}, test.t) + if err != nil { + t.Errorf("VerifyType(Metric, %T) = %v", test.t, err) + } + }) + } +} + +func TestMetricIsReady(t *testing.T) { + cases := []struct { + name string + status MetricStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: MetricStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: MetricStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "FooCondition", + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: MetricStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: MetricConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: MetricStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: MetricConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: MetricStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: MetricConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: MetricStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: MetricConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.isReady, tc.status.IsReady(); e != a { + t.Errorf("Ready = %v, want: %v", a, e) + } + }) + } +} + +func TestMetricGetSetCondition(t *testing.T) { + ms := &MetricStatus{} + if a := ms.GetCondition(MetricConditionReady); a != nil { + t.Errorf("empty MetricStatus returned %v when expected nil", a) + } + mc := &apis.Condition{ + Type: MetricConditionReady, + Status: corev1.ConditionTrue, + } + ms.MarkMetricReady() + if diff := cmp.Diff(mc, ms.GetCondition(MetricConditionReady), cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime")); diff != "" { + t.Errorf("GetCondition refs diff (-want +got): %v", diff) + } +} + +func TestTypicalFlowWithMetricCondition(t *testing.T) { + m := &MetricStatus{} + m.InitializeConditions() + apitestv1.CheckConditionOngoing(m.duck(), MetricConditionReady, t) + + const ( + wantReason = "reason" + wantMessage = "the error message" + ) + m.MarkMetricFailed(wantReason, wantMessage) + apitestv1.CheckConditionFailed(m.duck(), MetricConditionReady, t) + if got := m.GetCondition(MetricConditionReady); got == nil || got.Reason != wantReason || got.Message != wantMessage { + t.Errorf("MarkMetricFailed = %v, wantReason %v, wantMessage %v", got, wantReason, wantMessage) + } + + m.MarkMetricNotReady(wantReason, wantMessage) + apitestv1.CheckConditionOngoing(m.duck(), MetricConditionReady, t) + if got := m.GetCondition(MetricConditionReady); got == nil || got.Reason != wantReason || got.Message != wantMessage { + t.Errorf("MarkMetricNotReady = %v, wantReason %v, wantMessage %v", got, wantReason, wantMessage) + } + + m.MarkMetricReady() + apitestv1.CheckConditionSucceeded(m.duck(), MetricConditionReady, t) +} + +func TestMetricGetGroupVersionKind(t *testing.T) { + r := &Metric{} + want := schema.GroupVersionKind{ + Group: autoscaling.InternalGroupName, + Version: "v1alpha1", + Kind: "Metric", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_types.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_types.go new file mode 100644 index 0000000000..dca3e6d7c7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_types.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" +) + +// Metric represents a resource to configure the metric collector with. +// +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Metric struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds the desired state of the Metric (from the client). + // +optional + Spec MetricSpec `json:"spec,omitempty"` + + // Status communicates the observed state of the Metric (from the controller). + // +optional + Status MetricStatus `json:"status,omitempty"` +} + +// Verify that Metric adheres to the appropriate interfaces. +var ( + // Check that Metric can be validated and can be defaulted. + _ apis.Validatable = (*Metric)(nil) + _ apis.Defaultable = (*Metric)(nil) + + // Check that we can create OwnerReferences to a Metric. + _ kmeta.OwnerRefable = (*Metric)(nil) +) + +// MetricSpec contains all values a metric collector needs to operate. +type MetricSpec struct { + // StableWindow is the aggregation window for metrics in a stable state. + StableWindow time.Duration `json:"stableWindow"` + // PanicWindow is the aggregation window for metrics where quick reactions are needed. + PanicWindow time.Duration `json:"panicWindow"` + // ScrapeTarget is the K8s service that publishes the metric endpoint. + ScrapeTarget string `json:"scrapeTarget"` +} + +// MetricStatus reflects the status of metric collection for this specific entity. +type MetricStatus struct { + duckv1.Status `json:",inline"` +} + +// MetricList is a list of Metric resources +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type MetricList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Metric `json:"items"` +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation.go new file mode 100644 index 0000000000..e0c8d182d9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation.go @@ -0,0 +1,39 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" +) + +// Validate validates the entire Metric. +func (m *Metric) Validate(ctx context.Context) *apis.FieldError { + errs := serving.ValidateObjectMetadata(m.GetObjectMeta()).ViaField("metadata") + return errs.Also(m.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) +} + +// Validate validates Metric's Spec. +func (ms *MetricSpec) Validate(ctx context.Context) *apis.FieldError { + if equality.Semantic.DeepEqual(ms, &MetricSpec{}) { + return apis.ErrMissingField(apis.CurrentField) + } + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation_test.go new file mode 100644 index 0000000000..ad48f96946 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/metric_validation_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" +) + +func TestMetricValidation(t *testing.T) { + tests := []struct { + name string + m *Metric + want *apis.FieldError + }{{ + name: "invalid BYO name", + m: &Metric{ + ObjectMeta: metav1.ObjectMeta{ + Name: "@-invalid", + }, + Spec: MetricSpec{ + ScrapeTarget: "hello-12ft", + }, + }, + want: &apis.FieldError{ + Message: fmt.Sprintf("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]"), + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid BYO generateName", + m: &Metric{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "@-invalid", + }, + Spec: MetricSpec{ + ScrapeTarget: "hello-12ft", + }, + }, + want: &apis.FieldError{ + Message: fmt.Sprintf("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]"), + Paths: []string{"metadata.generateName"}, + }, + }, { + name: "empty name or generateName", + m: &Metric{ + ObjectMeta: metav1.ObjectMeta{ + Name: "", + GenerateName: "", + }, + Spec: MetricSpec{ + ScrapeTarget: "hello-12ft", + }, + }, + want: &apis.FieldError{ + Message: fmt.Sprintf("name or generateName is required"), + Paths: []string{"metadata.name"}, + }, + }, { + name: "empty metric spec", + m: &Metric{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: MetricSpec{}, + }, + want: &apis.FieldError{ + Message: fmt.Sprintf("missing field(s)"), + Paths: []string{"spec"}, + }, + }, { + name: "valid name and spec", + m: &Metric{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: MetricSpec{ + ScrapeTarget: "hello-12ft", + }, + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got, want := test.m.Validate(context.Background()).Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go index a13de8fa9c..9213116351 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults.go @@ -19,8 +19,8 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/autoscaling" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/autoscaling" ) func defaultMetric(class string) string { diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults_test.go new file mode 100644 index 0000000000..0d16b1cab3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_defaults_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/autoscaling" +) + +func TestPodAutoscalerDefaulting(t *testing.T) { + tests := []struct { + name string + in *PodAutoscaler + want *PodAutoscaler + }{{ + name: "empty", + in: &PodAutoscaler{}, + want: &PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + autoscaling.MetricAnnotationKey: autoscaling.Concurrency, + }, + }, + Spec: PodAutoscalerSpec{ + ContainerConcurrency: 0, + }, + }, + }, { + name: "no overwrite", + in: &PodAutoscaler{ + Spec: PodAutoscalerSpec{ + ContainerConcurrency: 1, + }, + }, + want: &PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + autoscaling.MetricAnnotationKey: autoscaling.Concurrency, + }, + }, + Spec: PodAutoscalerSpec{ + ContainerConcurrency: 1, + }, + }, + }, { + name: "partially initialized", + in: &PodAutoscaler{ + Spec: PodAutoscalerSpec{}, + }, + want: &PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + autoscaling.MetricAnnotationKey: autoscaling.Concurrency, + }, + }, + Spec: PodAutoscalerSpec{ + ContainerConcurrency: 0, + }, + }, + }, { + name: "hpa class is not overwritten and defaults to cpu", + in: &PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.HPA, + }, + }, + }, + want: &PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.HPA, + autoscaling.MetricAnnotationKey: autoscaling.CPU, + }, + }, + Spec: PodAutoscalerSpec{ + ContainerConcurrency: 0, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + got.SetDefaults(context.Background()) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("SetDefaults (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go similarity index 68% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go index 8c465b5037..53203e56fb 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go @@ -18,15 +18,15 @@ package v1alpha1 import ( "fmt" - "math" "strconv" "time" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/serving/pkg/apis/autoscaling" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/serving/pkg/apis/autoscaling" ) var podCondSet = apis.NewLivingConditionSet( @@ -68,68 +68,66 @@ func (pa *PodAutoscaler) annotationInt32(key string) int32 { func (pa *PodAutoscaler) annotationFloat64(key string) (float64, bool) { if s, ok := pa.Annotations[key]; ok { - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f, true - } + f, err := strconv.ParseFloat(s, 64) + return f, err == nil } return 0.0, false } // ScaleBounds returns scale bounds annotations values as a tuple: // `(min, max int32)`. The value of 0 for any of min or max means the bound is -// not set +// not set. +// Note: min will be ignored if the PA is not reachable func (pa *PodAutoscaler) ScaleBounds() (min, max int32) { - min = pa.annotationInt32(autoscaling.MinScaleAnnotationKey) + if pa.Spec.Reachability != ReachabilityUnreachable { + min = pa.annotationInt32(autoscaling.MinScaleAnnotationKey) + } max = pa.annotationInt32(autoscaling.MaxScaleAnnotationKey) + return } // Target returns the target annotation value or false if not present, or invalid. func (pa *PodAutoscaler) Target() (float64, bool) { - if s, ok := pa.Annotations[autoscaling.TargetAnnotationKey]; ok { - if ta, err := strconv.ParseFloat(s, 64 /*width*/); err == nil { - // Max check for backwards compatibility. - if ta < 1 || ta > math.MaxInt32 { - return 0, false - } - return ta, true - } + return pa.annotationFloat64(autoscaling.TargetAnnotationKey) +} + +// TargetUtilization returns the target capacity utilization as a fraction, +// if the corresponding annotation is set. +func (pa *PodAutoscaler) TargetUtilization() (float64, bool) { + if tu, ok := pa.annotationFloat64(autoscaling.TargetUtilizationPercentageKey); ok { + return tu / 100, true } return 0, false } +// TargetBC returns the target burst capacity, +// if the corresponding annotation is set. +func (pa *PodAutoscaler) TargetBC() (float64, bool) { + // The value is validated in the webhook. + return pa.annotationFloat64(autoscaling.TargetBurstCapacityKey) +} + // Window returns the window annotation value or false if not present. func (pa *PodAutoscaler) Window() (window time.Duration, ok bool) { + // The value is validated in the webhook. if s, ok := pa.Annotations[autoscaling.WindowAnnotationKey]; ok { d, err := time.ParseDuration(s) - if err != nil { - return 0, false - } - if d < autoscaling.WindowMin { - return 0, false - } - return d, true + return d, err == nil } return 0, false } // PanicWindowPercentage returns panic window annotation value or false if not present. func (pa *PodAutoscaler) PanicWindowPercentage() (percentage float64, ok bool) { - percentage, ok = pa.annotationFloat64(autoscaling.PanicWindowPercentageAnnotationKey) - if !ok || percentage > autoscaling.PanicWindowPercentageMax || - percentage < autoscaling.PanicWindowPercentageMin { - return 0, false - } - return percentage, ok + // The value is validated in the webhook. + return pa.annotationFloat64(autoscaling.PanicWindowPercentageAnnotationKey) } // PanicThresholdPercentage return the panic target annotation value or false if not present. func (pa *PodAutoscaler) PanicThresholdPercentage() (percentage float64, ok bool) { - percentage, ok = pa.annotationFloat64(autoscaling.PanicThresholdPercentageAnnotationKey) - if !ok || percentage < autoscaling.PanicThresholdPercentageMin { - return 0, false - } - return percentage, ok + // The value is validated in the webhook. + return pa.annotationFloat64(autoscaling.PanicThresholdPercentageAnnotationKey) } // IsReady looks at the conditions and if the Status has a condition @@ -192,24 +190,48 @@ func (pas *PodAutoscalerStatus) MarkResourceFailedCreation(kind, name string) { // CanScaleToZero checks whether the pod autoscaler has been in an inactive state // for at least the specified grace period. -func (pas *PodAutoscalerStatus) CanScaleToZero(gracePeriod time.Duration) bool { - return pas.inStatusFor(corev1.ConditionFalse, gracePeriod) +func (pas *PodAutoscalerStatus) CanScaleToZero(now time.Time, gracePeriod time.Duration) bool { + return pas.inStatusFor(corev1.ConditionFalse, now, gracePeriod) > 0 +} + +// ActiveFor returns the time PA spent being active. +func (pas *PodAutoscalerStatus) ActiveFor(now time.Time) time.Duration { + return pas.inStatusFor(corev1.ConditionTrue, now, 0) } -// CanMarkInactive checks whether the pod autoscaler has been in an active state +// CanFailActivation checks whether the pod autoscaler has been activating // for at least the specified idle period. -func (pas *PodAutoscalerStatus) CanMarkInactive(idlePeriod time.Duration) bool { - return pas.inStatusFor(corev1.ConditionTrue, idlePeriod) +func (pas *PodAutoscalerStatus) CanFailActivation(now time.Time, idlePeriod time.Duration) bool { + return pas.inStatusFor(corev1.ConditionUnknown, now, idlePeriod) > 0 } -// inStatusFor returns true if the PodAutoscalerStatus's Active condition has stayed in -// the specified status for at least the specified duration. Otherwise it returns false, +// inStatusFor returns positive duration if the PodAutoscalerStatus's Active condition has stayed in +// the specified status for at least the specified duration. Otherwise it returns negative duration, // including when the status is undetermined (Active condition is not found.) -func (pas *PodAutoscalerStatus) inStatusFor(status corev1.ConditionStatus, dur time.Duration) bool { +func (pas *PodAutoscalerStatus) inStatusFor(status corev1.ConditionStatus, now time.Time, dur time.Duration) time.Duration { cond := pas.GetCondition(PodAutoscalerConditionActive) - return cond != nil && cond.Status == status && time.Now().After(cond.LastTransitionTime.Inner.Add(dur)) + if cond == nil || cond.Status != status { + return -1 + } + return now.Sub(cond.LastTransitionTime.Inner.Add(dur)) } -func (pas *PodAutoscalerStatus) duck() *duckv1beta1.Status { - return (*duckv1beta1.Status)(&pas.Status) +func (pas *PodAutoscalerStatus) duck() *duckv1.Status { + return (*duckv1.Status)(&pas.Status) +} + +// GetDesiredScale returns the desired scale if ever set, or -1. +func (pas *PodAutoscalerStatus) GetDesiredScale() int32 { + if pas.DesiredScale != nil { + return *pas.DesiredScale + } + return -1 +} + +// GetActualScale returns the desired scale if ever set, or -1. +func (pas *PodAutoscalerStatus) GetActualScale() int32 { + if pas.ActualScale != nil { + return *pas.ActualScale + } + return -1 } diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle_test.go new file mode 100644 index 0000000000..abfd4de7f1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_lifecycle_test.go @@ -0,0 +1,986 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" +) + +func TestPodAutoscalerDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&PodAutoscaler{}, test.t) + if err != nil { + t.Errorf("VerifyType(PodAutoscaler, %T) = %v", test.t, err) + } + }) + } +} + +func TestGeneration(t *testing.T) { + r := PodAutoscaler{} + if a := r.GetGeneration(); a != 0 { + t.Errorf("empty pa generation should be 0 was: %d", a) + } + + r.SetGeneration(5) + if e, a := int64(5), r.GetGeneration(); e != a { + t.Errorf("getgeneration mismatch expected: %d got: %d", e, a) + } + +} + +func TestCanScaleToZero(t *testing.T) { + now := time.Now() + cases := []struct { + name string + status PodAutoscalerStatus + result bool + grace time.Duration + }{{ + name: "empty status", + status: PodAutoscalerStatus{}, + result: false, + grace: 10 * time.Second, + }, { + name: "active condition", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }}, + }, + }, + result: false, + grace: 10 * time.Second, + }, { + name: "inactive condition (no LTT)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionFalse, + // No LTT = beginning of time, so for sure we can. + }}, + }, + }, + result: true, + grace: 10 * time.Second, + }, { + name: "inactive condition (LTT longer than grace period ago)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionFalse, + LastTransitionTime: apis.VolatileTime{ + Inner: metav1.NewTime(now.Add(-30 * time.Second)), + }, + // LTT = 30 seconds ago. + }}, + }, + }, + result: true, + grace: 10 * time.Second, + }, { + name: "inactive condition (LTT less than grace period ago)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionFalse, + LastTransitionTime: apis.VolatileTime{ + Inner: metav1.NewTime(now.Add(-10 * time.Second)), + }, + // LTT = 10 seconds ago. + }}, + }, + }, + result: false, + grace: 30 * time.Second, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.result, tc.status.CanScaleToZero(now, tc.grace); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + }) + } +} + +func TestActiveFor(t *testing.T) { + now := time.Now() + cases := []struct { + name string + status PodAutoscalerStatus + result time.Duration + }{{ + name: "empty status", + status: PodAutoscalerStatus{}, + result: -1, + }, { + name: "unknown condition", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionUnknown, + }}, + }, + }, + result: -1, + }, { + name: "inactive condition", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionFalse, + }}, + }, + }, + result: -1, + }, { + name: "active condition (no LTT)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + // No LTT = beginning of time, so for sure we can. + }}, + }, + }, + result: time.Since(time.Time{}), + }, { + name: "active condition (LTT longer than idle period ago)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + LastTransitionTime: apis.VolatileTime{ + Inner: metav1.NewTime(now.Add(-30 * time.Second)), + }, + // LTT = 30 seconds ago. + }}, + }, + }, + result: 30 * time.Second, + }, { + name: "active condition (LTT less than idle period ago)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + LastTransitionTime: apis.VolatileTime{ + Inner: metav1.NewTime(now.Add(-10 * time.Second)), + }, + // LTT = 10 seconds ago. + }}, + }, + }, + result: 10 * time.Second, + }} + + for _, tc := range cases { + if got, want := tc.status.ActiveFor(now), tc.result; absDiff(got, want) > 10*time.Millisecond { + t.Errorf("ActiveFor = %v, want: %v", got, want) + } + } +} + +func absDiff(a, b time.Duration) time.Duration { + a -= b + if a < 0 { + a *= -1 + } + return a +} + +func TestCanFailActivation(t *testing.T) { + now := time.Now() + cases := []struct { + name string + status PodAutoscalerStatus + result bool + grace time.Duration + }{{ + name: "empty status", + status: PodAutoscalerStatus{}, + result: false, + grace: 10 * time.Second, + }, { + name: "active condition", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }}, + }, + }, + result: false, + grace: 10 * time.Second, + }, { + name: "activating condition (no LTT)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionUnknown, + // No LTT = beginning of time, so for sure we can. + }}, + }, + }, + result: true, + grace: 10 * time.Second, + }, { + name: "activating condition (LTT longer than grace period ago)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionUnknown, + LastTransitionTime: apis.VolatileTime{ + Inner: metav1.NewTime(now.Add(-30 * time.Second)), + }, + // LTT = 30 seconds ago. + }}, + }, + }, + result: true, + grace: 10 * time.Second, + }, { + name: "activating condition (LTT less than grace period ago)", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionUnknown, + LastTransitionTime: apis.VolatileTime{ + Inner: metav1.NewTime(now.Add(-10 * time.Second)), + }, + // LTT = 10 seconds ago. + }}, + }, + }, + result: false, + grace: 30 * time.Second, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.result, tc.status.CanFailActivation(now, tc.grace); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + }) + } +} + +func TestIsActivating(t *testing.T) { + cases := []struct { + name string + status PodAutoscalerStatus + isActivating bool + }{{ + name: "empty status", + status: PodAutoscalerStatus{}, + isActivating: false, + }, { + name: "active=unknown", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isActivating: true, + }, { + name: "active=true", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }}, + }, + }, + isActivating: false, + }, { + name: "active=false", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }}, + }, + }, + isActivating: false, + }} + + for _, tc := range cases { + if e, a := tc.isActivating, tc.status.IsActivating(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + } +} + +func TestIsReady(t *testing.T) { + cases := []struct { + name string + status PodAutoscalerStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: PodAutoscalerStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status should be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }, { + Type: PodAutoscalerConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status false should not be ready", + status: PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: PodAutoscalerConditionActive, + Status: corev1.ConditionTrue, + }, { + Type: PodAutoscalerConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }} + + for _, tc := range cases { + if e, a := tc.isReady, tc.status.IsReady(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + } +} + +func TestTargetAnnotation(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + wantTarget float64 + wantOK bool + }{{ + name: "not present", + pa: pa(map[string]string{}), + wantTarget: 0, + wantOK: false, + }, { + name: "present", + pa: pa(map[string]string{ + autoscaling.TargetAnnotationKey: "1", + }), + wantTarget: 1, + wantOK: true, + }, { + name: "present float", + pa: pa(map[string]string{ + autoscaling.TargetAnnotationKey: "19.82", + }), + wantTarget: 19.82, + wantOK: true, + }, { + name: "invalid format", + pa: pa(map[string]string{ + autoscaling.TargetAnnotationKey: "sandwich", + }), + wantTarget: 0, + wantOK: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + gotTarget, gotOK := tc.pa.Target() + if gotTarget != tc.wantTarget { + t.Errorf("got target: %v wanted: %v", gotTarget, tc.wantTarget) + } + if gotOK != tc.wantOK { + t.Errorf("got ok: %v wanted %v", gotOK, tc.wantOK) + } + }) + } +} + +func TestScaleBounds(t *testing.T) { + cases := []struct { + name string + min string + max string + reachability ReachabilityType + wantMin int32 + wantMax int32 + }{{ + name: "present", + min: "1", + max: "100", + wantMin: 1, + wantMax: 100, + }, { + name: "absent", + wantMin: 0, + wantMax: 0, + }, { + name: "only min", + min: "1", + wantMin: 1, + wantMax: 0, + }, { + name: "only max", + max: "1", + wantMin: 0, + wantMax: 1, + }, { + name: "reachable", + min: "1", + max: "100", + reachability: ReachabilityReachable, + wantMin: 1, + wantMax: 100, + }, { + name: "unreachable", + min: "1", + max: "100", + reachability: ReachabilityUnreachable, + wantMin: 0, + wantMax: 100, + }, { + name: "malformed", + min: "ham", + max: "sandwich", + wantMin: 0, + wantMax: 0, + }, { + name: "too small", + min: "-1", + max: "-1", + wantMin: 0, + wantMax: 0, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + pa := pa(map[string]string{}) + if tc.min != "" { + pa.Annotations[autoscaling.MinScaleAnnotationKey] = tc.min + } + if tc.max != "" { + pa.Annotations[autoscaling.MaxScaleAnnotationKey] = tc.max + } + pa.Spec.Reachability = tc.reachability + + min, max := pa.ScaleBounds() + + if min != tc.wantMin { + t.Errorf("got min: %v wanted: %v", min, tc.wantMin) + } + if max != tc.wantMax { + t.Errorf("got max: %v wanted: %v", max, tc.wantMax) + } + }) + } +} + +func TestMarkResourceNotOwned(t *testing.T) { + pa := pa(map[string]string{}) + pa.Status.MarkResourceNotOwned("doesn't", "matter") + active := pa.Status.GetCondition("Active") + if active.Status != corev1.ConditionFalse { + t.Errorf("TestMarkResourceNotOwned expected active.Status: False got: %v", active.Status) + } + if active.Reason != "NotOwned" { + t.Errorf("TestMarkResourceNotOwned expected active.Reason: NotOwned got: %v", active.Reason) + } +} + +func TestMarkResourceFailedCreation(t *testing.T) { + pa := &PodAutoscalerStatus{} + pa.MarkResourceFailedCreation("doesn't", "matter") + apitestv1.CheckConditionFailed(pa.duck(), PodAutoscalerConditionActive, t) + + active := pa.GetCondition("Active") + if active.Status != corev1.ConditionFalse { + t.Errorf("TestMarkResourceFailedCreation expected active.Status: False got: %v", active.Status) + } + if active.Reason != "FailedCreate" { + t.Errorf("TestMarkResourceFailedCreation expected active.Reason: FailedCreate got: %v", active.Reason) + } +} + +func TestClass(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + want string + }{{ + name: "kpa class", + pa: pa(map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + }), + want: autoscaling.KPA, + }, { + name: "hpa class", + pa: pa(map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.HPA, + }), + want: autoscaling.HPA, + }, { + name: "default class", + pa: pa(map[string]string{}), + want: autoscaling.KPA, + }, { + name: "custom class", + pa: pa(map[string]string{ + autoscaling.ClassAnnotationKey: "yolo.sandwich.com", + }), + want: "yolo.sandwich.com", + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := tc.pa.Class() + if got != tc.want { + t.Errorf("got class: %q wanted: %q", got, tc.want) + } + }) + } +} + +func TestMetric(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + want string + }{{ + name: "default class, annotation set", + pa: pa(map[string]string{ + autoscaling.MetricAnnotationKey: autoscaling.Concurrency, + }), + want: autoscaling.Concurrency, + }, { + name: "hpa class", + pa: pa(map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.HPA, + }), + want: autoscaling.CPU, + }, { + name: "kpa class", + pa: pa(map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + }), + want: autoscaling.Concurrency, + }, { + name: "custom class", + pa: pa(map[string]string{ + autoscaling.ClassAnnotationKey: "yolo.sandwich.com", + }), + want: "", + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := tc.pa.Metric() + if got != tc.want { + t.Errorf("Metric() = %q, want %q", got, tc.want) + } + }) + } +} + +func TestWindowAnnotation(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + wantWindow time.Duration + wantOK bool + }{{ + name: "not present", + pa: pa(map[string]string{}), + wantWindow: 0, + wantOK: false, + }, { + name: "present", + pa: pa(map[string]string{ + autoscaling.WindowAnnotationKey: "120s", + }), + wantWindow: time.Second * 120, + wantOK: true, + }, { + name: "invalid", + pa: pa(map[string]string{ + autoscaling.WindowAnnotationKey: "365d", + }), + wantWindow: 0, + wantOK: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + gotWindow, gotOK := tc.pa.Window() + if gotWindow != tc.wantWindow { + t.Errorf("%q expected target: %v got: %v", tc.name, tc.wantWindow, gotWindow) + } + if gotOK != tc.wantOK { + t.Errorf("%q expected ok: %v got %v", tc.name, tc.wantOK, gotOK) + } + }) + } +} + +func TestPanicWindowPercentageAnnotation(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + wantPercentage float64 + wantOK bool + }{{ + name: "not present", + pa: pa(map[string]string{}), + wantPercentage: 0.0, + wantOK: false, + }, { + name: "present", + pa: pa(map[string]string{ + autoscaling.PanicWindowPercentageAnnotationKey: "10.0", + }), + wantPercentage: 10.0, + wantOK: true, + }, { + name: "malformed", + pa: pa(map[string]string{ + autoscaling.PanicWindowPercentageAnnotationKey: "sandwich", + }), + wantPercentage: 0.0, + wantOK: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + gotPercentage, gotOK := tc.pa.PanicWindowPercentage() + if gotPercentage != tc.wantPercentage { + t.Errorf("%q expected target: %v got: %v", tc.name, tc.wantPercentage, gotPercentage) + } + if gotOK != tc.wantOK { + t.Errorf("%q expected ok: %v got %v", tc.name, tc.wantOK, gotOK) + } + }) + } +} + +func TestTargetUtilization(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + want float64 + wantOK bool + }{{ + name: "not present", + pa: pa(map[string]string{}), + want: 0.0, + wantOK: false, + }, { + name: "present", + pa: pa(map[string]string{ + autoscaling.TargetUtilizationPercentageKey: "10.0", + }), + want: .1, + wantOK: true, + }, { + name: "malformed", + pa: pa(map[string]string{ + autoscaling.TargetUtilizationPercentageKey: "NPH", + }), + want: 0.0, + wantOK: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got, gotOK := tc.pa.TargetUtilization() + if got, want := got, tc.want; got != want { + t.Errorf("%q target utilization: %v want: %v", tc.name, got, want) + } + if gotOK != tc.wantOK { + t.Errorf("%q expected ok: %v got %v", tc.name, tc.wantOK, gotOK) + } + }) + } +} + +func TestPanicThresholdPercentage(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + wantPercentage float64 + wantOK bool + }{{ + name: "not present", + pa: pa(map[string]string{}), + wantPercentage: 0.0, + wantOK: false, + }, { + name: "present", + pa: pa(map[string]string{ + autoscaling.PanicThresholdPercentageAnnotationKey: "300.0", + }), + wantPercentage: 300.0, + wantOK: true, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + gotPercentage, gotOK := tc.pa.PanicThresholdPercentage() + if gotPercentage != tc.wantPercentage { + t.Errorf("%q expected target: %v got: %v", tc.name, tc.wantPercentage, gotPercentage) + } + if gotOK != tc.wantOK { + t.Errorf("%q expected ok: %v got %v", tc.name, tc.wantOK, gotOK) + } + }) + } +} + +func pa(annotations map[string]string) *PodAutoscaler { + p := &PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-name", + Annotations: annotations, + }, + Spec: PodAutoscalerSpec{ + ContainerConcurrency: 0, + }, + Status: PodAutoscalerStatus{}, + } + return p +} + +func TestTypicalFlow(t *testing.T) { + r := &PodAutoscalerStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), PodAutoscalerConditionActive, t) + apitestv1.CheckConditionOngoing(r.duck(), PodAutoscalerConditionReady, t) + + // When we see traffic, mark ourselves active. + r.MarkActive() + apitestv1.CheckConditionSucceeded(r.duck(), PodAutoscalerConditionActive, t) + apitestv1.CheckConditionSucceeded(r.duck(), PodAutoscalerConditionReady, t) + + // Check idempotency. + r.MarkActive() + apitestv1.CheckConditionSucceeded(r.duck(), PodAutoscalerConditionActive, t) + apitestv1.CheckConditionSucceeded(r.duck(), PodAutoscalerConditionReady, t) + + // When we stop seeing traffic, mark outselves inactive. + r.MarkInactive("TheReason", "the message") + apitestv1.CheckConditionFailed(r.duck(), PodAutoscalerConditionActive, t) + if !r.IsInactive() { + t.Errorf("IsInactive was not set.") + } + apitestv1.CheckConditionFailed(r.duck(), PodAutoscalerConditionReady, t) + + // When traffic hits the activator and we scale up the deployment we mark + // ourselves as activating. + r.MarkActivating("Activating", "Red team, GO!") + apitestv1.CheckConditionOngoing(r.duck(), PodAutoscalerConditionActive, t) + apitestv1.CheckConditionOngoing(r.duck(), PodAutoscalerConditionReady, t) + + // When the activator successfully forwards traffic to the deployment, + // we mark ourselves as active once more. + r.MarkActive() + apitestv1.CheckConditionSucceeded(r.duck(), PodAutoscalerConditionActive, t) + apitestv1.CheckConditionSucceeded(r.duck(), PodAutoscalerConditionReady, t) +} + +func TestTargetBC(t *testing.T) { + cases := []struct { + name string + pa *PodAutoscaler + want float64 + wantOK bool + }{{ + name: "not present", + pa: pa(map[string]string{}), + want: 0.0, + }, { + name: "present", + pa: pa(map[string]string{ + autoscaling.TargetBurstCapacityKey: "101.0", + }), + want: 101, + wantOK: true, + }, { + name: "present 0", + pa: pa(map[string]string{ + autoscaling.TargetBurstCapacityKey: "0", + }), + want: 0, + wantOK: true, + }, { + name: "present -1", + pa: pa(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + }), + want: -1, + wantOK: true, + }, { + name: "malformed", + pa: pa(map[string]string{ + autoscaling.TargetBurstCapacityKey: "NPH", + }), + want: 0.0, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got, gotOK := tc.pa.TargetBC() + if got, want := got, tc.want; got != want { + t.Errorf("%q target burst capacity: %v want: %v", tc.name, got, want) + } + if gotOK != tc.wantOK { + t.Errorf("%q expected ok: %v got %v", tc.name, tc.wantOK, gotOK) + } + }) + } +} + +func TestScaleStatus(t *testing.T) { + pas := &PodAutoscalerStatus{} + if got, want := pas.GetDesiredScale(), int32(-1); got != want { + t.Errorf("GetDesiredScale = %d, want: %v", got, want) + } + pas.DesiredScale = ptr.Int32(19980709) + if got, want := pas.GetDesiredScale(), int32(19980709); got != want { + t.Errorf("GetDesiredScale = %d, want: %v", got, want) + } + + if got, want := pas.GetActualScale(), int32(-1); got != want { + t.Errorf("GetActualScale = %d, want: %v", got, want) + } + pas.ActualScale = ptr.Int32(20060907) + if got, want := pas.GetActualScale(), int32(20060907); got != want { + t.Errorf("GetActualScale = %d, want: %v", got, want) + } +} + +func TestPodAutoscalerGetGroupVersionKind(t *testing.T) { + p := &PodAutoscaler{} + want := schema.GroupVersionKind{ + Group: autoscaling.InternalGroupName, + Version: "v1alpha1", + Kind: "PodAutoscaler", + } + if got := p.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go similarity index 74% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go index 5d50ffa04d..9f0545417b 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_types.go @@ -17,14 +17,12 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" - net "github.com/knative/serving/pkg/apis/networking" - servingv1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - servingv1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" + net "knative.dev/serving/pkg/apis/networking" ) // +genclient @@ -58,6 +56,22 @@ var ( _ kmeta.OwnerRefable = (*PodAutoscaler)(nil) ) +// ReachabilityType is the enumeration type for the different states of reachability +// to the `ScaleTarget` of a `PodAutoscaler` +type ReachabilityType string + +const ( + // ReachabilityUnknown means the reachability of the `ScaleTarget` is unknown. + // Used when the reachability cannot be determined, eg. during activation. + ReachabilityUnknown ReachabilityType = "" + + // ReachabilityReachable means the `ScaleTarget` is reachable, ie. it has an active route. + ReachabilityReachable ReachabilityType = "Reachable" + + // ReachabilityReachable means the `ScaleTarget` is not reachable, ie. it does not have an active route. + ReachabilityUnreachable ReachabilityType = "Unreachable" +) + // PodAutoscalerSpec holds the desired state of the PodAutoscaler (from the client). type PodAutoscalerSpec struct { // DeprecatedGeneration was used prior in Kubernetes versions <1.11 @@ -71,28 +85,23 @@ type PodAutoscalerSpec struct { // +optional DeprecatedGeneration int64 `json:"generation,omitempty"` - // DeprecatedConcurrencyModel no longer does anything, use ContainerConcurrency. - // +optional - DeprecatedConcurrencyModel servingv1alpha1.RevisionRequestConcurrencyModelType `json:"concurrencyModel,omitempty"` - // ContainerConcurrency specifies the maximum allowed // in-flight (concurrent) requests per container of the Revision. // Defaults to `0` which means unlimited concurrency. - // This field replaces ConcurrencyModel. A value of `1` - // is equivalent to `Single` and `0` is equivalent to `Multi`. // +optional - ContainerConcurrency servingv1beta1.RevisionContainerConcurrencyType `json:"containerConcurrency,omitempty"` + ContainerConcurrency int64 `json:"containerConcurrency,omitempty"` // ScaleTargetRef defines the /scale-able resource that this PodAutoscaler // is responsible for quickly right-sizing. ScaleTargetRef corev1.ObjectReference `json:"scaleTargetRef"` - // DeprecatedServiceName holds the name of a core Kubernetes Service resource that - // load balances over the pods referenced by the ScaleTargetRef. - DeprecatedServiceName string `json:"serviceName"` + // Reachable specifies whether or not the `ScaleTargetRef` can be reached (ie. has a route). + // Defaults to `ReachabilityUnknown` + // +optional + Reachability ReachabilityType `json:"reachability,omitempty"` // The application-layer protocol. Matches `ProtocolType` inferred from the revision spec. - ProtocolType net.ProtocolType + ProtocolType net.ProtocolType `json:"protocolType"` } const ( @@ -105,7 +114,7 @@ const ( // PodAutoscalerStatus communicates the observed state of the PodAutoscaler (from the controller). type PodAutoscalerStatus struct { - duckv1beta1.Status + duckv1.Status `json:",inline"` // ServiceName is the K8s Service name that serves the revision, scaled by this PA. // The service is created and owned by the ServerlessService object owned by this PA. @@ -114,6 +123,12 @@ type PodAutoscalerStatus struct { // MetricsServiceName is the K8s Service name that provides revision metrics. // The service is managed by the PA object. MetricsServiceName string `json:"metricsServiceName"` + + // DesiredScale shows the current desired number of replicas for the revision. + DesiredScale *int32 `json:"desiredScale,omitempty"` + + // ActualScale shows the actual number of replicas for the revision. + ActualScale *int32 `json:"actualScale,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go new file mode 100644 index 0000000000..31b2bb957a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" +) + +func (pa *PodAutoscaler) Validate(ctx context.Context) *apis.FieldError { + errs := serving.ValidateObjectMetadata(pa.GetObjectMeta()).ViaField("metadata") + return errs.Also(pa.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) +} + +// Validate validates PodAutoscaler Spec. +func (pa *PodAutoscalerSpec) Validate(ctx context.Context) *apis.FieldError { + if equality.Semantic.DeepEqual(pa, &PodAutoscalerSpec{}) { + return apis.ErrMissingField(apis.CurrentField) + } + return serving.ValidateNamespacedObjectReference(&pa.ScaleTargetRef).ViaField("scaleTargetRef").Also(serving.ValidateContainerConcurrency(&pa.ContainerConcurrency).ViaField("containerConcurrency")).Also(validateSKSFields(ctx, pa)) +} + +func validateSKSFields(ctx context.Context, rs *PodAutoscalerSpec) (errs *apis.FieldError) { + return errs.Also(rs.ProtocolType.Validate(ctx)).ViaField("protocolType") +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation_test.go new file mode 100644 index 0000000000..68574c2e81 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/pa_validation_test.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "math" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/config" + net "knative.dev/serving/pkg/apis/networking" +) + +func TestPodAutoscalerSpecValidation(t *testing.T) { + tests := []struct { + name string + rs *PodAutoscalerSpec + want *apis.FieldError + }{{ + name: "valid", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + want: nil, + }, { + name: "protocol type missing", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + }, + want: apis.ErrMissingField("protocolType"), + }, { + name: "protcol type invalid", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolType("dragon"), + }, + want: apis.ErrInvalidValue("dragon", "protocolType"), + }, { + name: "has missing scaleTargetRef", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 1, + ProtocolType: net.ProtocolHTTP1, + }, + want: apis.ErrMissingField("scaleTargetRef.apiVersion", "scaleTargetRef.kind", + "scaleTargetRef.name"), + }, { + name: "has missing scaleTargetRef kind", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 1, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + want: apis.ErrMissingField("scaleTargetRef.kind"), + }, { + name: "has missing scaleTargetRef apiVersion", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + want: apis.ErrMissingField("scaleTargetRef.apiVersion"), + }, { + name: "has missing scaleTargetRef name", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ProtocolType: net.ProtocolHTTP1, + }, + want: apis.ErrMissingField("scaleTargetRef.name"), + }, { + name: "bad container concurrency", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: -1, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + want: apis.ErrOutOfBoundsValue(-1, 0, + config.DefaultMaxRevisionContainerConcurrency, "containerConcurrency"), + }, { + name: "multi invalid, bad concurrency and missing ref kind", + rs: &PodAutoscalerSpec{ + ContainerConcurrency: -2, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + want: apis.ErrOutOfBoundsValue(-2, 0, + config.DefaultMaxRevisionContainerConcurrency, "containerConcurrency").Also( + apis.ErrMissingField("scaleTargetRef.kind")), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.rs.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %s", diff) + } + }) + } +} + +func TestPodAutoscalerValidation(t *testing.T) { + tests := []struct { + name string + r *PodAutoscaler + want *apis.FieldError + }{{ + name: "valid", + r: &PodAutoscaler{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid", + }, + Spec: PodAutoscalerSpec{ + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + }, + want: nil, + }, { + name: "bad protocol", + r: &PodAutoscaler{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + autoscaling.MinScaleAnnotationKey: "2", + }, + }, + Spec: PodAutoscalerSpec{ + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolType("WebSocket"), + }, + }, + want: apis.ErrInvalidValue("WebSocket", "spec.protocolType"), + }, { + name: "bad scale bounds", + r: &PodAutoscaler{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + autoscaling.MinScaleAnnotationKey: "FOO", + }, + }, + Spec: PodAutoscalerSpec{ + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + }, + want: apis.ErrOutOfBoundsValue("FOO", 1, math.MaxInt32, autoscaling.MinScaleAnnotationKey).ViaField("metadata", "annotations"), + }, { + name: "empty spec", + r: &PodAutoscaler{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid", + }, + }, + want: apis.ErrMissingField("spec"), + }, { + name: "nested spec error", + r: &PodAutoscaler{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid", + }, + Spec: PodAutoscalerSpec{ + ContainerConcurrency: -1, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar", + }, + ProtocolType: net.ProtocolHTTP1, + }, + }, + want: apis.ErrOutOfBoundsValue(-1, 0, + config.DefaultMaxRevisionContainerConcurrency, "spec.containerConcurrency"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Got: %q, want: %q, diff: %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_implements_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_implements_test.go new file mode 100644 index 0000000000..aa163ddb3b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_implements_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + + "knative.dev/pkg/apis/duck" +) + +func TestImplementsPodScalable(t *testing.T) { + instances := []interface{}{ + &PodScalable{}, + &appsv1.ReplicaSet{}, + &appsv1.Deployment{}, + &appsv1.StatefulSet{}, + } + for _, instance := range instances { + if err := duck.VerifyType(instance, &PodScalable{}); err != nil { + t.Error(err) + } + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go index 4455986ada..b6d05a7b73 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go @@ -17,13 +17,14 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - "github.com/knative/pkg/apis/duck" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" ) +// +genduck // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodScalable is a duck type that the resources referenced by the diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/register.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/register.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register.go index 92d4fee7f3..1a90e867e2 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodAutoscaler{}, &PodAutoscalerList{}, + &Metric{}, + &MetricList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register_test.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register_test.go new file mode 100644 index 0000000000..43ec7df798 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/register_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/serving/pkg/apis/autoscaling" +) + +func TestRegisterHelpers(t *testing.T) { + if got, want := Kind("PodAutoscaler"), "PodAutoscaler."+autoscaling.InternalGroupName; got.String() != want { + t.Errorf("Kind(PodAutoscaler) = %v, want %v", got.String(), want) + } + + if got, want := Resource("PodAutoscaler"), "PodAutoscaler."+autoscaling.InternalGroupName; got.String() != want { + t.Errorf("Resource(PodAutoscaler) = %v, want %v", got.String(), want) + } + + if got, want := SchemeGroupVersion.String(), autoscaling.InternalGroupName+"/v1alpha1"; got != want { + t.Errorf("SchemeGroupVersion() = %v, want %v", got, want) + } + + scheme := runtime.NewScheme() + if err := addKnownTypes(scheme); err != nil { + t.Errorf("addKnownTypes() = %v", err) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go similarity index 68% rename from test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go rename to test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go index 2c4a511221..a4a822dfa9 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,6 +25,100 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metric) DeepCopyInto(out *Metric) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metric. +func (in *Metric) DeepCopy() *Metric { + if in == nil { + return nil + } + out := new(Metric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Metric) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricList) DeepCopyInto(out *MetricList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Metric, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricList. +func (in *MetricList) DeepCopy() *MetricList { + if in == nil { + return nil + } + out := new(MetricList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MetricList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodAutoscaler) DeepCopyInto(out *PodAutoscaler) { *out = *in @@ -57,7 +151,7 @@ func (in *PodAutoscaler) DeepCopyObject() runtime.Object { func (in *PodAutoscalerList) DeepCopyInto(out *PodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodAutoscaler, len(*in)) @@ -107,6 +201,16 @@ func (in *PodAutoscalerSpec) DeepCopy() *PodAutoscalerSpec { func (in *PodAutoscalerStatus) DeepCopyInto(out *PodAutoscalerStatus) { *out = *in in.Status.DeepCopyInto(&out.Status) + if in.DesiredScale != nil { + in, out := &in.DesiredScale, &out.DesiredScale + *out = new(int32) + **out = **in + } + if in.ActualScale != nil { + in, out := &in.ActualScale, &out.ActualScale + *out = new(int32) + **out = **in + } return } @@ -152,7 +256,7 @@ func (in *PodScalable) DeepCopyObject() runtime.Object { func (in *PodScalableList) DeepCopyInto(out *PodScalableList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodScalable, len(*in)) diff --git a/test/vendor/github.com/knative/serving/pkg/apis/config/defaults.go b/test/vendor/knative.dev/serving/pkg/apis/config/defaults.go similarity index 84% rename from test/vendor/github.com/knative/serving/pkg/apis/config/defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/config/defaults.go index 86b63774b0..fe1820dec3 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/config/defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/config/defaults.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) const ( @@ -44,6 +44,13 @@ const ( // DefaultUserContainerName is the default name we give to the container // specified by the user, if `name:` is omitted. DefaultUserContainerName = "user-container" + + // DefaultContainerConcurrency is the default container concurrency. It will be set if ContainerConcurrency is not specified. + DefaultContainerConcurrency int64 = 0 + + // DefaultMaxRevisionContainerConcurrency is the maximum configurable + // container concurrency. + DefaultMaxRevisionContainerConcurrency int64 = 1000 ) // NewDefaultsConfigFromMap creates a Defaults from the supplied Map @@ -64,6 +71,10 @@ func NewDefaultsConfigFromMap(data map[string]string) (*Defaults, error) { key: "max-revision-timeout-seconds", field: &nc.MaxRevisionTimeoutSeconds, defaultValue: DefaultMaxRevisionTimeoutSeconds, + }, { + key: "container-concurrency", + field: &nc.ContainerConcurrency, + defaultValue: DefaultContainerConcurrency, }} { if raw, ok := data[i64.key]; !ok { *i64.field = i64.defaultValue @@ -78,6 +89,10 @@ func NewDefaultsConfigFromMap(data map[string]string) (*Defaults, error) { return nil, fmt.Errorf("revision-timeout-seconds (%d) cannot be greater than max-revision-timeout-seconds (%d)", nc.RevisionTimeoutSeconds, nc.MaxRevisionTimeoutSeconds) } + if nc.ContainerConcurrency < 0 || nc.ContainerConcurrency > DefaultMaxRevisionContainerConcurrency { + return nil, apis.ErrOutOfBoundsValue(nc.ContainerConcurrency, 0, DefaultMaxRevisionContainerConcurrency, "containerConcurrency") + } + // Process resource quantity fields for _, rsrc := range []struct { key string @@ -113,7 +128,7 @@ func NewDefaultsConfigFromMap(data map[string]string) (*Defaults, error) { } // Check that the template properly applies to ObjectMeta. if err := tmpl.Execute(ioutil.Discard, metav1.ObjectMeta{}); err != nil { - return nil, fmt.Errorf("error executing template: %v", err) + return nil, fmt.Errorf("error executing template: %w", err) } // We store the raw template because we run deepcopy-gen on the // config and that doesn't copy nicely. @@ -137,6 +152,8 @@ type Defaults struct { UserContainerNameTemplate string + ContainerConcurrency int64 + RevisionCPURequest *resource.Quantity RevisionCPULimit *resource.Quantity RevisionMemoryRequest *resource.Quantity diff --git a/test/vendor/knative.dev/serving/pkg/apis/config/defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/config/defaults_test.go new file mode 100644 index 0000000000..5c08e771c5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/config/defaults_test.go @@ -0,0 +1,227 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/system" + + . "knative.dev/pkg/configmap/testing" + _ "knative.dev/pkg/system/testing" +) + +func TestDefaultsConfigurationFromFile(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, DefaultsConfigName) + + if _, err := NewDefaultsConfigFromConfigMap(cm); err != nil { + t.Errorf("NewDefaultsConfigFromConfigMap(actual) = %v", err) + } + + if _, err := NewDefaultsConfigFromConfigMap(example); err != nil { + t.Errorf("NewDefaultsConfigFromConfigMap(example) = %v", err) + } +} + +func TestDefaultsConfiguration(t *testing.T) { + oneTwoThree := resource.MustParse("123m") + + configTests := []struct { + name string + wantErr bool + wantDefaults interface{} + config *corev1.ConfigMap + }{{ + name: "defaults configuration", + wantErr: false, + wantDefaults: &Defaults{ + RevisionTimeoutSeconds: DefaultRevisionTimeoutSeconds, + MaxRevisionTimeoutSeconds: DefaultMaxRevisionTimeoutSeconds, + UserContainerNameTemplate: DefaultUserContainerName, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{}, + }, + }, { + name: "specified values", + wantErr: false, + wantDefaults: &Defaults{ + RevisionTimeoutSeconds: 123, + MaxRevisionTimeoutSeconds: 456, + RevisionCPURequest: &oneTwoThree, + UserContainerNameTemplate: "{{.Name}}", + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "123", + "max-revision-timeout-seconds": "456", + "revision-cpu-request": "123m", + "container-name-template": "{{.Name}}", + }, + }, + }, { + name: "bad revision timeout", + wantErr: true, + wantDefaults: (*Defaults)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "asdf", + }, + }, + }, { + name: "bad max revision timeout", + wantErr: true, + wantDefaults: (*Defaults)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "max-revision-timeout-seconds": "asdf", + }, + }, + }, { + name: "bad name template", + wantErr: true, + wantDefaults: (*Defaults)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "container-name-template": "{{.NAme}}", + }, + }, + }, { + name: "bad resource", + wantErr: true, + wantDefaults: (*Defaults)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "revision-cpu-request": "bad", + }, + }, + }, { + name: "revision timeout bigger than max timeout", + wantErr: true, + wantDefaults: (*Defaults)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "456", + "max-revision-timeout-seconds": "123", + }, + }, + }, { + name: "containerConcurrency is bigger than default DefaultMaxRevisionContainerConcurrency", + wantErr: true, + wantDefaults: (*Defaults)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "container-concurrency": "2000", + }, + }, + }} + + for _, tt := range configTests { + t.Run(tt.name, func(t *testing.T) { + actualDefaults, err := NewDefaultsConfigFromConfigMap(tt.config) + + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewDefaultsConfigFromConfigMap() error = %v, WantErr %v", tt.name, err, tt.wantErr) + } + + if diff := cmp.Diff(actualDefaults, tt.wantDefaults, ignoreStuff); diff != "" { + t.Fatalf("Test: %q; want %v, but got %v", tt.name, tt.wantDefaults, actualDefaults) + } + }) + } +} + +func TestTemplating(t *testing.T) { + tests := []struct { + name string + template string + want string + }{{ + name: "groot", + template: "{{.Name}}", + want: "i-am-groot", + }, { + name: "complex", + template: "{{.Namespace}}-of-the-galaxy", + want: "guardians-of-the-galaxy", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + def, err := NewDefaultsConfigFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DefaultsConfigName, + }, + Data: map[string]string{ + "container-name-template": test.template, + }, + }) + if err != nil { + t.Errorf("Error parsing defaults: %v", err) + } + + ctx := apis.WithinParent(context.Background(), metav1.ObjectMeta{ + Name: "i-am-groot", + Namespace: "guardians", + }) + + got := def.UserContainerName(ctx) + if test.want != got { + t.Errorf("UserContainerName() = %v, wanted %v", got, test.want) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/config/doc.go b/test/vendor/knative.dev/serving/pkg/apis/config/doc.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/config/doc.go rename to test/vendor/knative.dev/serving/pkg/apis/config/doc.go diff --git a/test/vendor/github.com/knative/serving/pkg/apis/config/store.go b/test/vendor/knative.dev/serving/pkg/apis/config/store.go similarity index 98% rename from test/vendor/github.com/knative/serving/pkg/apis/config/store.go rename to test/vendor/knative.dev/serving/pkg/apis/config/store.go index 7d7f519d9a..38f21bc66d 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/config/store.go +++ b/test/vendor/knative.dev/serving/pkg/apis/config/store.go @@ -19,7 +19,7 @@ package config import ( "context" - "github.com/knative/pkg/configmap" + "knative.dev/pkg/configmap" ) type cfgKey struct{} diff --git a/test/vendor/knative.dev/serving/pkg/apis/config/store_test.go b/test/vendor/knative.dev/serving/pkg/apis/config/store_test.go new file mode 100644 index 0000000000..793b0af8ba --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/config/store_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/api/resource" + logtesting "knative.dev/pkg/logging/testing" + + . "knative.dev/pkg/configmap/testing" +) + +var ignoreStuff = cmp.Options{ + cmpopts.IgnoreUnexported(resource.Quantity{}), +} + +func TestStoreLoadWithContext(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + defaultsConfig := ConfigMapFromTestFile(t, DefaultsConfigName) + + store.OnConfigChanged(defaultsConfig) + + config := FromContextOrDefaults(store.ToContext(context.Background())) + + t.Run("defaults", func(t *testing.T) { + expected, _ := NewDefaultsConfigFromConfigMap(defaultsConfig) + if diff := cmp.Diff(expected, config.Defaults, ignoreStuff...); diff != "" { + t.Errorf("Unexpected defaults config (-want, +got): %v", diff) + } + }) +} + +func TestStoreLoadWithContextOrDefaults(t *testing.T) { + defaultsConfig := ConfigMapFromTestFile(t, DefaultsConfigName) + config := FromContextOrDefaults(context.Background()) + + t.Run("defaults", func(t *testing.T) { + expected, _ := NewDefaultsConfigFromConfigMap(defaultsConfig) + if diff := cmp.Diff(expected, config.Defaults, ignoreStuff...); diff != "" { + t.Errorf("Unexpected defaults config (-want, +got): %v", diff) + } + }) +} + +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + store.OnConfigChanged(ConfigMapFromTestFile(t, DefaultsConfigName)) + + config := store.Load() + + config.Defaults.RevisionTimeoutSeconds = 1234 + + newConfig := store.Load() + + if newConfig.Defaults.RevisionTimeoutSeconds == 1234 { + t.Error("Defaults config is not immutable") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/config/testdata/config-defaults.yaml b/test/vendor/knative.dev/serving/pkg/apis/config/testdata/config-defaults.yaml new file mode 120000 index 0000000000..4e958fe292 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/config/testdata/config-defaults.yaml @@ -0,0 +1 @@ +../../../../config/core/configmaps/defaults.yaml \ No newline at end of file diff --git a/test/vendor/github.com/knative/serving/pkg/apis/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/apis/config/zz_generated.deepcopy.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/config/zz_generated.deepcopy.go rename to test/vendor/knative.dev/serving/pkg/apis/config/zz_generated.deepcopy.go index d0e78d8c0d..341f6334ba 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/config/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/serving/pkg/apis/config/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/apis/doc.go b/test/vendor/knative.dev/serving/pkg/apis/doc.go new file mode 100644 index 0000000000..d854edc901 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource + +// +k8s:deepcopy-gen=package +package apis diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/OWNERS b/test/vendor/knative.dev/serving/pkg/apis/networking/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/generic_types.go b/test/vendor/knative.dev/serving/pkg/apis/networking/generic_types.go similarity index 93% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/generic_types.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/generic_types.go index 7edf4fe045..82685b1e44 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/generic_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/generic_types.go @@ -19,7 +19,7 @@ package networking import ( "context" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // This files contains the versionless types and enums that are strongly @@ -41,6 +41,8 @@ func (p ProtocolType) Validate(context.Context) *apis.FieldError { switch p { case ProtocolH2C, ProtocolHTTP1: return nil + case ProtocolType(""): + return apis.ErrMissingField(apis.CurrentField) } return apis.ErrInvalidValue(p, apis.CurrentField) } diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/generic_types_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/generic_types_test.go new file mode 100644 index 0000000000..d56730c6b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/generic_types_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networking + +import ( + "context" + "reflect" + "testing" + + "knative.dev/pkg/apis" +) + +func TestProtocolTypeValidate(t *testing.T) { + cases := []struct { + name string + proto ProtocolType + expect *apis.FieldError + }{{ + name: "no protocol", + proto: "", + expect: apis.ErrMissingField(apis.CurrentField), + }, { + name: "invalid protocol", + proto: "invalidProtocol", + expect: apis.ErrInvalidValue("invalidProtocol", apis.CurrentField), + }, { + name: "valid h2c protocol", + proto: ProtocolH2C, + expect: nil, + }, { + name: "valid http1 protocol", + proto: ProtocolHTTP1, + expect: nil, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got, want := c.proto.Validate(context.Background()), c.expect; !reflect.DeepEqual(got, want) { + t.Errorf("got = %v, want: %v", got, want) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/ports.go b/test/vendor/knative.dev/serving/pkg/apis/networking/ports.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/ports.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/ports.go diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/ports_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/ports_test.go new file mode 100644 index 0000000000..a15cda54f9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/ports_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networking + +import ( + "testing" +) + +func TestServicePortName(t *testing.T) { + cases := []struct { + name string + proto ProtocolType + expect string + }{{ + name: "pass h2c get http2 protocol", + proto: ProtocolH2C, + expect: ServicePortNameH2C, + }, { + name: "pass any get http protocol", + proto: ProtocolHTTP1, + expect: ServicePortNameHTTP1, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got, want := ServicePortName(c.proto), c.expect; !(got == want) { + t.Errorf("got = %s, want: %s", got, want) + } + }) + } +} + +func TestServicePort(t *testing.T) { + cases := []struct { + name string + proto ProtocolType + expect int + }{{ + name: "pass h2c protocol to get Serving and Activator K8s services for HTTP/2 endpoints", + proto: ProtocolH2C, + expect: ServiceHTTP2Port, + }, { + name: "pass any protocol to get Serving and Activator K8s services for HTTP/1 endpoints", + proto: ProtocolHTTP1, + expect: ServiceHTTPPort, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if got, want := ServicePort(c.proto), c.expect; !(got == want) { + t.Errorf("got = %d, want: %d", got, want) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/register.go b/test/vendor/knative.dev/serving/pkg/apis/networking/register.go similarity index 58% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/register.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/register.go index 83f80b42f1..4ab10998a0 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/register.go @@ -21,7 +21,7 @@ const ( GroupName = "networking.internal.knative.dev" // IngressClassAnnotationKey is the annotation for the - // explicit class of ClusterIngress that a particular resource has + // explicit class of Ingress that a particular resource has // opted into. For example, // // networking.knative.dev/ingress.class: some-network-impl @@ -30,14 +30,14 @@ const ( // user-facing. // // The parent resource may use its own annotations to choose the - // annotation value for the ClusterIngress it uses. Based on such + // annotation value for the Ingress it uses. Based on such // value a different reconciliation logic may be used (for examples, - // Istio-based ClusterIngress will reconcile into a VirtualService). + // Istio-based Ingress will reconcile into a VirtualService). IngressClassAnnotationKey = "networking.knative.dev/ingress.class" - // ClusterIngressLabelKey is the label key attached to underlying network programming - // resources to indicate which ClusterIngress triggered their creation. - ClusterIngressLabelKey = GroupName + "/clusteringress" + // IngressLabelKey is the label key attached to underlying network programming + // resources to indicate which Ingress triggered their creation. + IngressLabelKey = GroupName + "/ingress" // SKSLabelKey is the label key that SKS Controller attaches to the // underlying resources it controls. @@ -54,6 +54,37 @@ const ( // OriginSecretNamespaceLabelKey is the label key attached to the TLS secret // to indicate the namespace of the origin secret that the TLS secret is copied from. OriginSecretNamespaceLabelKey = GroupName + "/originSecretNamespace" + + // CertificateClassAnnotationKey is the annotation for the + // explicit class of Certificate that a particular resource has + // opted into. For example, + // + // networking.internal.knative.dev/certificate.class: some-network-impl + // + // This uses a different domain because unlike the resource, it is + // user-facing. + // + // The parent resource may use its own annotations to choose the + // annotation value for the Certificate it uses. Based on such + // value a different reconciliation logic may be used (for examples, + // Cert-Manager-based Certificate will reconcile into a Cert-Manager Certificate). + CertificateClassAnnotationKey = GroupName + "/certificate.class" + + // ActivatorServiceName is the name of the activator Kubernetes service. + ActivatorServiceName = "activator-service" + + // DisableWildcardCertLabelKey is the label key attached to a namespace to indicate that + // a wildcard certificate should be not created for it. + DisableWildcardCertLabelKey = GroupName + "/disableWildcardCert" + + // WildcardCertDomainLabelKey is the label key attached to a certificate to indicate the + // domain for which it was issued. + WildcardCertDomainLabelKey = "networking.knative.dev/wildcardDomain" + + // KnativeIngressGateway is the name of the ingress gateway + KnativeIngressGateway = "knative-ingress-gateway" + // ClusterLocalGateway is the name of the local gateway + ClusterLocalGateway = "cluster-local-gateway" ) // ServiceType is the enumeration type for the Kubernetes services @@ -69,6 +100,7 @@ const ( ServiceTypePublic ServiceType = "Public" // ServiceTypeMetrics is the label value for Metrics services. Such services // are used for meric scraping. + // TODO(5900): Remove after 0.12 is cut. ServiceTypeMetrics ServiceType = "Metrics" ) diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_defaults.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_defaults.go diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go similarity index 87% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go index 7af58af033..38b77a9615 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle.go @@ -19,9 +19,9 @@ package v1alpha1 import ( "fmt" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) // InitializeConditions initializes the certificate conditions. @@ -34,13 +34,13 @@ func (cs *CertificateStatus) MarkReady() { certificateCondSet.Manage(cs).MarkTrue(CertificateConditionReady) } -// MarkUnknown marks the certificate status as unknown. -func (cs *CertificateStatus) MarkUnknown(reason, message string) { +// MarkNotReady marks the certificate status as unknown. +func (cs *CertificateStatus) MarkNotReady(reason, message string) { certificateCondSet.Manage(cs).MarkUnknown(CertificateConditionReady, reason, message) } -// MarkNotReady marks the certificate as not ready. -func (cs *CertificateStatus) MarkNotReady(reason, message string) { +// MarkFailed marks the certificate as not ready. +func (cs *CertificateStatus) MarkFailed(reason, message string) { certificateCondSet.Manage(cs).MarkFalse(CertificateConditionReady, reason, message) } @@ -64,7 +64,7 @@ func (cs *CertificateStatus) GetCondition(t apis.ConditionType) *apis.Condition // ConditionType represents a Certificate condition value const ( // CertificateConditionReady is set when the requested certificate - // is provioned and valid. + // is provisioned and valid. CertificateConditionReady = apis.ConditionReady ) @@ -75,6 +75,6 @@ func (c *Certificate) GetGroupVersionKind() schema.GroupVersionKind { return SchemeGroupVersion.WithKind("Certificate") } -func (cs *CertificateStatus) duck() *duckv1beta1.Status { +func (cs *CertificateStatus) duck() *duckv1.Status { return &cs.Status } diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle_test.go new file mode 100644 index 0000000000..39d09493b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_lifecycle_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" +) + +func TestCertificateDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Certificate{}, test.t) + if err != nil { + t.Errorf("VerifyType(Certificate, %T) = %v", test.t, err) + } + }) + } +} + +func TestCertificateGetGroupVersionKind(t *testing.T) { + c := Certificate{} + expected := SchemeGroupVersion.WithKind("Certificate") + if diff := cmp.Diff(expected, c.GetGroupVersionKind()); diff != "" { + t.Errorf("Unexpected diff (-want, +got) = %s", diff) + } +} + +func TestMarkReady(t *testing.T) { + c := &CertificateStatus{} + c.InitializeConditions() + apitestv1.CheckConditionOngoing(c.duck(), CertificateConditionReady, t) + + c.MarkReady() + if !c.IsReady() { + t.Error("IsReady=false, want: true") + } +} + +func TestMarkNotReady(t *testing.T) { + c := &CertificateStatus{} + c.InitializeConditions() + apitestv1.CheckCondition(c.duck(), CertificateConditionReady, corev1.ConditionUnknown) + + c.MarkNotReady("unknow", "unknown") + apitestv1.CheckCondition(c.duck(), CertificateConditionReady, corev1.ConditionUnknown) +} + +func TestMarkFailed(t *testing.T) { + c := &CertificateStatus{} + c.InitializeConditions() + apitestv1.CheckCondition(c.duck(), CertificateConditionReady, corev1.ConditionUnknown) + + c.MarkFailed("failed", "failed") + apitestv1.CheckConditionFailed(c.duck(), CertificateConditionReady, t) +} + +func TestMarkResourceNotOwned(t *testing.T) { + c := &CertificateStatus{} + c.InitializeConditions() + c.MarkResourceNotOwned("doesn't", "own") + apitestv1.CheckConditionFailed(c.duck(), CertificateConditionReady, t) +} + +func TestGetCondition(t *testing.T) { + c := &CertificateStatus{} + c.InitializeConditions() + tests := []struct { + name string + condType apis.ConditionType + expect *apis.Condition + reason string + message string + }{{ + name: "random condition", + condType: apis.ConditionType("random"), + expect: nil, + }, { + name: "ready condition for failed reason", + condType: apis.ConditionReady, + reason: "failed", + message: "failed", + expect: &apis.Condition{ + Status: corev1.ConditionFalse, + }, + }, { + name: "ready condition for unknown reason", + condType: apis.ConditionReady, + reason: "unknown", + message: "unknown", + expect: &apis.Condition{ + Status: corev1.ConditionUnknown, + }, + }, { + name: "succeeded condition", + condType: apis.ConditionSucceeded, + expect: nil, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if tc.reason == "unknown" { + c.MarkNotReady(tc.reason, tc.message) + } else { + c.MarkFailed(tc.reason, tc.message) + } + if got, want := c.GetCondition(tc.condType), tc.expect; got != nil && got.Status != want.Status { + t.Errorf("got: %v, want: %v", got, want) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_types.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_types.go similarity index 96% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_types.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_types.go index 74e454a8b6..9bf98cce46 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_types.go @@ -17,11 +17,11 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -88,7 +88,7 @@ type CertificateStatus struct { // - The target secret exists // - The target secret contains a certificate that has not expired // - The target secret contains a private key valid for the certificate - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` // The expiration time of the TLS certificate stored in the secret named // by this resource in spec.secretName. diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_validation.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation.go index 5fd29c738a..e529bc169a 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/certificate_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation.go @@ -19,7 +19,7 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // Validate inspects and validates Certificate object. diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation_test.go new file mode 100644 index 0000000000..c744c14e71 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/certificate_validation_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "knative.dev/pkg/apis" +) + +func TestCertificateSpecValidation(t *testing.T) { + tests := []struct { + name string + cs *CertificateSpec + want *apis.FieldError + }{{ + name: "valid", + cs: &CertificateSpec{ + DNSNames: []string{"host.example"}, + SecretName: "secret", + }, + want: nil, + }, { + name: "missing-dnsnames", + cs: &CertificateSpec{ + DNSNames: []string{}, + SecretName: "secret", + }, + want: apis.ErrMissingField("dnsNames"), + }, { + name: "empty-dnsname", + cs: &CertificateSpec{ + DNSNames: []string{"host.example", ""}, + SecretName: "secret", + }, + want: apis.ErrInvalidArrayValue("", "dnsNames", 1), + }, { + name: "missing-secret-name", + cs: &CertificateSpec{ + DNSNames: []string{"host.example"}, + SecretName: "", + }, + want: apis.ErrMissingField("secretName"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cs.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} +func TestCertificateValidation(t *testing.T) { + tests := []struct { + name string + c *Certificate + want *apis.FieldError + }{{ + name: "valid", + c: &Certificate{ + Spec: CertificateSpec{ + DNSNames: []string{"host.example"}, + SecretName: "secret", + }, + }, + want: nil, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.c.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/doc.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/doc.go similarity index 79% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/doc.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/doc.go index f3f12f28aa..2663a50e6a 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/doc.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/doc.go @@ -18,7 +18,7 @@ limitations under the License. // +groupName=networking.internal.knative.dev package v1alpha1 -// ClusterIngress is heavily based on K8s Ingress -// https://godoc.org/k8s.io/api/extensions/v1beta1#Ingress with some -// highlighted modifications. See clusteringress_types.go for more +// Ingress is heavily based on K8s Ingress +// https://godoc.org/k8s.io/api/networking/v1beta1#Ingress with some +// highlighted modifications. See ingress_types.go for more // information about the modifications that we made. diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go similarity index 86% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go index 368da7534f..b0a724991d 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults.go @@ -20,11 +20,11 @@ import ( "context" "time" - "github.com/knative/pkg/apis" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" - "github.com/knative/serving/pkg/apis/config" - "github.com/knative/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/networking" ) var ( @@ -51,14 +51,9 @@ func (s *IngressSpec) SetDefaults(ctx context.Context) { // SetDefaults populates default values in IngressTLS func (t *IngressTLS) SetDefaults(ctx context.Context) { - // Default Secret key for ServerCertificate is `tls.crt`. - if t.ServerCertificate == "" { - t.ServerCertificate = "tls.crt" - } - // Default Secret key for PrivateKey is `tls.key`. - if t.PrivateKey == "" { - t.PrivateKey = "tls.key" - } + // Deprecated, do not use. + t.DeprecatedServerCertificate = "" + t.DeprecatedPrivateKey = "" } // SetDefaults populates default values in IngressRule diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults_test.go new file mode 100644 index 0000000000..2bd1ee1a0e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_defaults_test.go @@ -0,0 +1,248 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "knative.dev/serving/pkg/apis/networking" +) + +func TestIngressDefaulting(t *testing.T) { + tests := []struct { + name string + in *Ingress + want *Ingress + }{{ + name: "empty", + in: &Ingress{}, + want: &Ingress{ + Spec: IngressSpec{ + Visibility: IngressVisibilityExternalIP, + }, + }, + }, { + name: "has-visibility", + in: &Ingress{ + Spec: IngressSpec{ + Visibility: IngressVisibilityClusterLocal, + }, + }, + want: &Ingress{ + Spec: IngressSpec{ + Visibility: IngressVisibilityClusterLocal, + }, + }, + }, { + name: "split-timeout-retry-defaulting", + in: &Ingress{ + Spec: IngressSpec{ + Rules: []IngressRule{{ + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + }}, + }, + }}, + Visibility: IngressVisibilityExternalIP, + }, + }, + want: &Ingress{ + Spec: IngressSpec{ + Rules: []IngressRule{{ + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + // Percent is filled in. + Percent: 100, + }}, + // Timeout and Retries are filled in. + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + }}, + }, + }}, + Visibility: IngressVisibilityExternalIP, + }, + }, + }, { + name: "split-timeout-retry-not-defaulting", + in: &Ingress{ + Spec: IngressSpec{ + Rules: []IngressRule{{ + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + Percent: 30, + }, { + IngressBackend: IngressBackend{ + ServiceName: "revision-001", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + Percent: 70, + }}, + Timeout: &metav1.Duration{Duration: 10 * time.Second}, + Retries: &HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: 10 * time.Second}, + Attempts: 2, + }, + }}, + }, + }}, + Visibility: IngressVisibilityExternalIP, + }, + }, + want: &Ingress{ + Spec: IngressSpec{ + Rules: []IngressRule{{ + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + // Percent is kept intact. + Percent: 30, + }, { + IngressBackend: IngressBackend{ + ServiceName: "revision-001", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + // Percent is kept intact. + Percent: 70, + }}, + // Timeout and Retries are kept intact. + Timeout: &metav1.Duration{Duration: 10 * time.Second}, + Retries: &HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: 10 * time.Second}, + Attempts: 2, + }, + }}, + }, + }}, + Visibility: IngressVisibilityExternalIP, + }, + }, + }, { + name: "perTryTimeout-in-retry-defaulting", + in: &Ingress{ + Spec: IngressSpec{ + Rules: []IngressRule{{ + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + Percent: 30, + }, { + IngressBackend: IngressBackend{ + ServiceName: "revision-001", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + Percent: 70, + }}, + Timeout: &metav1.Duration{Duration: 10 * time.Second}, + Retries: &HTTPRetry{ + Attempts: 2, + }, + }}, + }, + }}, + Visibility: IngressVisibilityExternalIP, + }, + }, + want: &Ingress{ + Spec: IngressSpec{ + Rules: []IngressRule{{ + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + // Percent is kept intact. + Percent: 30, + }, { + IngressBackend: IngressBackend{ + ServiceName: "revision-001", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + // Percent is kept intact. + Percent: 70, + }}, + // Timeout and Retries are kept intact. + Timeout: &metav1.Duration{Duration: 10 * time.Second}, + Retries: &HTTPRetry{ + // PerTryTimeout is filled in. + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: 2, + }, + }}, + }, + }}, + Visibility: IngressVisibilityExternalIP, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + got.SetDefaults(context.Background()) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("SetDefaults (-want, +got) = %v", diff) + } + }) + } + +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go similarity index 68% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go index 50a899a294..c6fd1b9e28 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle.go @@ -19,9 +19,9 @@ package v1alpha1 import ( "fmt" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) var ingressCondSet = apis.NewLivingConditionSet( @@ -63,20 +63,37 @@ func (is *IngressStatus) MarkResourceNotOwned(kind, name string) { // MarkLoadBalancerReady marks the Ingress with IngressConditionLoadBalancerReady, // and also populate the address of the load balancer. -func (is *IngressStatus) MarkLoadBalancerReady(lbs []LoadBalancerIngressStatus) { - is.LoadBalancer = &LoadBalancerStatus{ - Ingress: []LoadBalancerIngressStatus{}, - } - is.LoadBalancer.Ingress = append(is.LoadBalancer.Ingress, lbs...) +func (is *IngressStatus) MarkLoadBalancerReady(lbs []LoadBalancerIngressStatus, publicLbs []LoadBalancerIngressStatus, privateLbs []LoadBalancerIngressStatus) { + is.LoadBalancer = &LoadBalancerStatus{Ingress: lbs} + is.PublicLoadBalancer = &LoadBalancerStatus{Ingress: publicLbs} + is.PrivateLoadBalancer = &LoadBalancerStatus{Ingress: privateLbs} + ingressCondSet.Manage(is).MarkTrue(IngressConditionLoadBalancerReady) } +// MarkLoadBalancerNotReady marks the "IngressConditionLoadBalancerReady" condition to unknown to +// reflect that the load balancer is not ready yet. +func (is *IngressStatus) MarkLoadBalancerNotReady() { + ingressCondSet.Manage(is).MarkUnknown(IngressConditionLoadBalancerReady, "Uninitialized", + "Waiting for load balancer to be ready") +} + +// MarkLoadBalancerFailed marks the "IngressConditionLoadBalancerReady" condition to false. +func (is *IngressStatus) MarkLoadBalancerFailed(reason, message string) { + ingressCondSet.Manage(is).MarkFalse(IngressConditionLoadBalancerReady, reason, message) +} + +// MarkIngressNotReady marks the "IngressConditionReady" condition to unknown. +func (is *IngressStatus) MarkIngressNotReady(reason, message string) { + ingressCondSet.Manage(is).MarkUnknown(IngressConditionReady, reason, message) +} + // IsReady looks at the conditions and if the Status has a condition // IngressConditionReady returns true if ConditionStatus is True func (is *IngressStatus) IsReady() bool { return ingressCondSet.Manage(is).IsHappy() } -func (is *IngressStatus) duck() *duckv1beta1.Status { +func (is *IngressStatus) duck() *duckv1.Status { return &is.Status } diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle_test.go new file mode 100644 index 0000000000..284d7a6c7c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_lifecycle_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" +) + +func TestIngressDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Ingress{}, test.t) + if err != nil { + t.Errorf("VerifyType(Ingress, %T) = %v", test.t, err) + } + }) + } +} + +func TestIngressGetGroupVersionKind(t *testing.T) { + ci := Ingress{} + expected := SchemeGroupVersion.WithKind("Ingress") + if diff := cmp.Diff(expected, ci.GetGroupVersionKind()); diff != "" { + t.Errorf("Unexpected diff (-want, +got) = %v", diff) + } +} + +func TestIngressIsPublic(t *testing.T) { + ci := Ingress{} + if !ci.IsPublic() { + t.Error("Expected default Ingress to be public, for backward compatibility") + } + if !ci.IsPublic() { + t.Errorf("Expected IsPublic()==true, saw %v", ci.IsPublic()) + } + ci.Spec.Visibility = IngressVisibilityExternalIP + if !ci.IsPublic() { + t.Errorf("Expected IsPublic()==true, saw %v", ci.IsPublic()) + } + ci.Spec.Visibility = IngressVisibilityClusterLocal + if ci.IsPublic() { + t.Errorf("Expected IsPublic()==false, saw %v", ci.IsPublic()) + } +} + +func TestIngressTypicalFlow(t *testing.T) { + r := &IngressStatus{} + r.InitializeConditions() + + apitestv1.CheckConditionOngoing(r.duck(), IngressConditionReady, t) + + // Then network is configured. + r.MarkNetworkConfigured() + apitestv1.CheckConditionSucceeded(r.duck(), IngressConditionNetworkConfigured, t) + apitestv1.CheckConditionOngoing(r.duck(), IngressConditionReady, t) + + // Then ingress is pending. + r.MarkLoadBalancerNotReady() + apitestv1.CheckConditionOngoing(r.duck(), IngressConditionLoadBalancerReady, t) + apitestv1.CheckConditionOngoing(r.duck(), IngressConditionReady, t) + + r.MarkLoadBalancerFailed("some reason", "some message") + apitestv1.CheckConditionFailed(r.duck(), IngressConditionLoadBalancerReady, t) + apitestv1.CheckConditionFailed(r.duck(), IngressConditionLoadBalancerReady, t) + + // Then ingress has address. + r.MarkLoadBalancerReady( + []LoadBalancerIngressStatus{{DomainInternal: "gateway.default.svc"}}, + []LoadBalancerIngressStatus{{DomainInternal: "gateway.default.svc"}}, + []LoadBalancerIngressStatus{{DomainInternal: "private.gateway.default.svc"}}, + ) + apitestv1.CheckConditionSucceeded(r.duck(), IngressConditionLoadBalancerReady, t) + apitestv1.CheckConditionSucceeded(r.duck(), IngressConditionReady, t) + if !r.IsReady() { + t.Fatal("IsReady()=false, wanted true") + } + + // Mark not owned. + r.MarkResourceNotOwned("i own", "you") + apitestv1.CheckConditionFailed(r.duck(), IngressConditionReady, t) + + // Mark network configured, and check that ingress is ready again + r.MarkNetworkConfigured() + apitestv1.CheckConditionSucceeded(r.duck(), IngressConditionReady, t) + if !r.IsReady() { + t.Fatal("IsReady()=false, wanted true") + } + + // Mark ingress not ready + r.MarkIngressNotReady("", "") + apitestv1.CheckConditionOngoing(r.duck(), IngressConditionReady, t) +} + +func TestIngressGetCondition(t *testing.T) { + ingressStatus := &IngressStatus{} + ingressStatus.InitializeConditions() + tests := []struct { + name string + condType apis.ConditionType + expect *apis.Condition + }{{ + name: "random condition", + condType: apis.ConditionType("random"), + expect: nil, + }, { + name: "ready condition", + condType: apis.ConditionReady, + expect: &apis.Condition{ + Status: corev1.ConditionUnknown, + }, + }, { + name: "succeeded condition", + condType: apis.ConditionSucceeded, + expect: nil, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if got, want := ingressStatus.GetCondition(tc.condType), tc.expect; got != nil && got.Status != want.Status { + t.Errorf("got: %v, want: %v", got, want) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_types.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_types.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_types.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_types.go index 26100471ab..f779bef9ef 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_types.go @@ -17,11 +17,11 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -31,7 +31,7 @@ import ( // by a backend. An Ingress can be configured to give services externally-reachable URLs, load // balance traffic, offer name based virtual hosting, etc. // -// This is heavily based on K8s Ingress https://godoc.org/k8s.io/api/extensions/v1beta1#Ingress +// This is heavily based on K8s Ingress https://godoc.org/k8s.io/api/networking/v1beta1#Ingress // which some highlighted modifications. type Ingress struct { metav1.TypeMeta `json:",inline"` @@ -45,7 +45,7 @@ type Ingress struct { // +optional Spec IngressSpec `json:"spec,omitempty"` - // Status is the current state of the ClusterIngress. + // Status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty"` @@ -95,7 +95,7 @@ type IngressSpec struct { // +optional DeprecatedGeneration int64 `json:"generation,omitempty"` - // TLS configuration. Currently ClusterIngress only supports a single TLS + // TLS configuration. Currently Ingress only supports a single TLS // port: 443. If multiple members of this list specify different hosts, they // will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the @@ -130,7 +130,7 @@ type IngressTLS struct { // Hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this - // ClusterIngress, if left unspecified. + // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty"` @@ -143,12 +143,12 @@ type IngressTLS struct { // ServerCertificate identifies the certificate filename in the secret. // Defaults to `tls.crt`. // +optional - ServerCertificate string `json:"serverCertificate,omitempty"` + DeprecatedServerCertificate string `json:"serverCertificate,omitempty"` // PrivateKey identifies the private key filename in the secret. // Defaults to `tls.key`. // +optional - PrivateKey string `json:"privateKey,omitempty"` + DeprecatedPrivateKey string `json:"privateKey,omitempty"` } // IngressRule represents the rules mapping the paths under a specified host to @@ -159,17 +159,22 @@ type IngressRule struct { // by RFC 3986. Note the following deviations from the "host" part of the // URI as defined in the RFC: // 1. IPs are not allowed. Currently a rule value can only apply to the - // IP in the Spec of the parent ClusterIngress. + // IP in the Spec of the parent . // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an ClusterIngress is implicitly :80 for http and + // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // If the host is unspecified, the ClusterIngress routes all traffic based on the + // If the host is unspecified, the Ingress routes all traffic based on the // specified IngressRuleValue. // If multiple matching Hosts were provided, the first rule will take precedent. // +optional Hosts []string `json:"hosts,omitempty"` + // Visibility signifies whether this rule should `ClusterLocal`. If it's not + // specified then it defaults to `ExternalIP`. + // +optional + Visibility IngressVisibility `json:"visibility,omitempty"` + // HTTP represents a rule to apply against incoming requests. If the // rule is satisfied, the request is routed to the specified backend. HTTP *HTTPIngressRuleValue `json:"http,omitempty"` @@ -271,11 +276,20 @@ type HTTPRetry struct { // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` // LoadBalancer contains the current status of the load-balancer. + // This is to be superseded by the combination of `PublicLoadBalancer` and `PrivateLoadBalancer` // +optional LoadBalancer *LoadBalancerStatus `json:"loadBalancer,omitempty"` + + // PublicLoadBalancer contains the current status of the load-balancer. + // +optional + PublicLoadBalancer *LoadBalancerStatus `json:"publicLoadBalancer,omitempty"` + + // PrivateLoadBalancer contains the current status of the load-balancer. + // +optional + PrivateLoadBalancer *LoadBalancerStatus `json:"privateLoadBalancer,omitempty"` } // LoadBalancerStatus represents the status of a load-balancer. @@ -307,7 +321,7 @@ type LoadBalancerIngressStatus struct { // +optional DomainInternal string `json:"domainInternal,omitempty"` - // MeshOnly is set if the ClusterIngress is only load-balanced through a Service mesh. + // MeshOnly is set if the Ingress is only load-balanced through a Service mesh. // +optional MeshOnly bool `json:"meshOnly,omitempty"` } diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_validation.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation.go similarity index 96% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation.go index 0608989119..dfad48ad21 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/ingress_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation.go @@ -20,9 +20,9 @@ import ( "context" "strconv" - "github.com/knative/pkg/apis" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/apis" ) // Validate inspects and validates Ingress object. @@ -67,7 +67,7 @@ func (r *IngressRule) Validate(ctx context.Context) *apis.FieldError { return all } -// Validate inspects and validates HTTPClusterIngressRuleValue object. +// Validate inspects and validates HTTPIngressRuleValue object. func (h *HTTPIngressRuleValue) Validate(ctx context.Context) *apis.FieldError { if len(h.Paths) == 0 { return apis.ErrMissingField("paths") @@ -79,7 +79,7 @@ func (h *HTTPIngressRuleValue) Validate(ctx context.Context) *apis.FieldError { return all } -// Validate inspects and validates HTTPClusterIngressPath object. +// Validate inspects and validates HTTPIngressPath object. func (h HTTPIngressPath) Validate(ctx context.Context) *apis.FieldError { // Provided rule must not be empty. if equality.Semantic.DeepEqual(h, HTTPIngressPath{}) { diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation_test.go new file mode 100644 index 0000000000..b47ba4be20 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/ingress_validation_test.go @@ -0,0 +1,391 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/apis" +) + +func TestIngressSpecValidation(t *testing.T) { + tests := []struct { + name string + is *IngressSpec + want *apis.FieldError + }{{ + name: "valid", + is: &IngressSpec{ + TLS: []IngressTLS{{ + SecretNamespace: "secret-space", + SecretName: "secret-name", + }}, + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + Retries: &HTTPRetry{ + Attempts: 3, + }, + }}, + }, + }}, + }, + want: nil, + }, { + name: "empty", + is: &IngressSpec{}, + want: apis.ErrMissingField(apis.CurrentField), + }, { + name: "missing-rule", + is: &IngressSpec{ + TLS: []IngressTLS{{ + SecretName: "secret-name", + SecretNamespace: "secret-namespace", + }}, + }, + want: apis.ErrMissingField("rules"), + }, { + name: "empty-rule", + is: &IngressSpec{ + Rules: []IngressRule{{}}, + }, + want: apis.ErrMissingField("rules[0]"), + }, { + name: "missing-http", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + }}, + }, + want: apis.ErrMissingField("rules[0].http"), + }, { + name: "missing-http-paths", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{}, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths"), + }, { + name: "empty-http-paths", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{}}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0]"), + }, { + name: "backend-wrong-percentage", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + Percent: 199, + }}, + }}, + }, + }}, + }, + want: apis.ErrInvalidValue(199, "rules[0].http.paths[0].splits[0].percent"), + }, { + name: "missing-split", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{}, + AppendHeaders: map[string]string{ + "foo": "bar", + }, + }}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0].splits"), + }, { + name: "empty-split", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{}}, + AppendHeaders: map[string]string{ + "foo": "bar", + }, + }}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0].splits[0]"), + }, { + name: "missing-split-backend", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{}, + Percent: 100, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0].splits[0]"), + }, { + name: "missing-backend-name", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0].splits[0].serviceName"), + }, { + name: "missing-backend-namespace", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "service-name", + ServicePort: intstr.FromInt(8080), + }, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0].splits[0].serviceNamespace"), + }, { + name: "missing-backend-port", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "service-name", + ServiceNamespace: "default", + }, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("rules[0].http.paths[0].splits[0].servicePort"), + }, { + name: "split-percent-sum-not-100", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + Percent: 30, + }}, + }}, + }, + }}, + }, + want: &apis.FieldError{ + Message: "Traffic split percentage must total to 100, but was 30", + Paths: []string{"rules[0].http.paths[0].splits"}, + }, + }, { + name: "wrong-retry-attempts", + is: &IngressSpec{ + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + Retries: &HTTPRetry{ + Attempts: -1, + }, + }}, + }, + }}, + }, + want: apis.ErrInvalidValue(-1, "rules[0].http.paths[0].retries.attempts"), + }, { + name: "empty-tls", + is: &IngressSpec{ + TLS: []IngressTLS{{}}, + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("tls[0]"), + }, { + name: "missing-tls-secret-namespace", + is: &IngressSpec{ + TLS: []IngressTLS{{ + SecretName: "secret", + }}, + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("tls[0].secretNamespace"), + }, { + name: "missing-tls-secret-name", + is: &IngressSpec{ + TLS: []IngressTLS{{ + SecretNamespace: "secret-space", + }}, + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + }}, + }, + }}, + }, + want: apis.ErrMissingField("tls[0].secretName"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.is.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} +func TestIngressValidation(t *testing.T) { + tests := []struct { + name string + ci *Ingress + want *apis.FieldError + }{{ + name: "valid", + ci: &Ingress{ + Spec: IngressSpec{ + TLS: []IngressTLS{{ + SecretNamespace: "secret-space", + SecretName: "secret-name", + }}, + Rules: []IngressRule{{ + Hosts: []string{"example.com"}, + HTTP: &HTTPIngressRuleValue{ + Paths: []HTTPIngressPath{{ + Splits: []IngressBackendSplit{{ + IngressBackend: IngressBackend{ + ServiceName: "revision-000", + ServiceNamespace: "default", + ServicePort: intstr.FromInt(8080), + }, + }}, + Retries: &HTTPRetry{ + Attempts: 3, + }, + }}, + }, + }}, + }, + }, + want: nil, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.ci.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/register.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/register.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register.go index 4c4b7302c0..6aa855801d 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register.go @@ -17,10 +17,10 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/serving/pkg/apis/networking" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/serving/pkg/apis/networking" ) // SchemeGroupVersion is group version used to register these objects @@ -44,8 +44,6 @@ var ( // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ClusterIngress{}, - &ClusterIngressList{}, &Ingress{}, &IngressList{}, &ServerlessService{}, diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register_test.go new file mode 100644 index 0000000000..2854c0f6f3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/register_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" +) + +func TestRegisterHelpers(t *testing.T) { + tests := []struct { + kind string + want string + }{{ + kind: "Ingress", + want: "Ingress.networking.internal.knative.dev", + }, { + kind: "ServerlessService", + want: "ServerlessService.networking.internal.knative.dev", + }, { + kind: "Certificate", + want: "Certificate.networking.internal.knative.dev", + }} + for _, test := range tests { + if got, want := Kind(test.kind), test.want; got.String() != want { + t.Errorf("Kind(%s) = %q, want %q", test.kind, got.String(), want) + } + + if got, want := Resource(test.kind), test.want; got.String() != want { + t.Errorf("Resource(%s) = %q, want %q", test.kind, got.String(), want) + } + } + + if got, want := SchemeGroupVersion, "networking.internal.knative.dev/v1alpha1"; got.String() != want { + t.Errorf("SchemeGroupVersion() = %q, want %q", got.String(), want) + } + + scheme := runtime.NewScheme() + if err := addKnownTypes(scheme); err != nil { + t.Errorf("addKnownTypes() = %v", err) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_defaults.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_defaults.go diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go similarity index 62% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go index 0c74f87243..e8ad84fdc5 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle.go @@ -17,9 +17,13 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" + "time" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) var serverlessServiceCondSet = apis.NewLivingConditionSet( @@ -53,7 +57,29 @@ func (sss *ServerlessServiceStatus) MarkEndpointsNotOwned(kind, name string) { "Resource %s of type %s is not owned by SKS", name, kind) } -// MarkEndpointsNotReady marks the ServerlessServiceStatus endpoints populated conditiohn to unknown. +// MarkActivatorEndpointsPopulated is setting the ActivatorEndpointsPopulated to True. +func (sss *ServerlessServiceStatus) MarkActivatorEndpointsPopulated() { + serverlessServiceCondSet.Manage(sss).SetCondition(apis.Condition{ + Type: ActivatorEndpointsPopulated, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityInfo, + Reason: "ActivatorEndpointsPopulated", + Message: "Revision is backed by Activator", + }) +} + +// MarkActivatorEndpointsRemoved is setting the ActivatorEndpointsPopulated to False. +func (sss *ServerlessServiceStatus) MarkActivatorEndpointsRemoved() { + serverlessServiceCondSet.Manage(sss).SetCondition(apis.Condition{ + Type: ActivatorEndpointsPopulated, + Status: corev1.ConditionFalse, + Severity: apis.ConditionSeverityInfo, + Reason: "ActivatorEndpointsPopulated", + Message: "Revision is backed by Activator", + }) +} + +// MarkEndpointsNotReady marks the ServerlessServiceStatus endpoints populated condition to unknown. func (sss *ServerlessServiceStatus) MarkEndpointsNotReady(reason string) { serverlessServiceCondSet.Manage(sss).MarkUnknown( ServerlessServiceConditionEndspointsPopulated, reason, @@ -65,6 +91,16 @@ func (sss *ServerlessServiceStatus) IsReady() bool { return serverlessServiceCondSet.Manage(sss).IsHappy() } -func (sss *ServerlessServiceStatus) duck() *duckv1beta1.Status { +func (sss *ServerlessServiceStatus) duck() *duckv1.Status { return &sss.Status } + +// ProxyFor returns how long it has been since Activator was moved +// to the request path. +func (sss *ServerlessServiceStatus) ProxyFor() time.Duration { + cond := sss.GetCondition(ActivatorEndpointsPopulated) + if cond == nil || cond.Status != corev1.ConditionTrue { + return 0 + } + return time.Since(cond.LastTransitionTime.Inner.Time) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle_test.go new file mode 100644 index 0000000000..e07ebcebd1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_lifecycle_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" +) + +func TestServerlessServiceDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&ServerlessService{}, test.t) + if err != nil { + t.Errorf("VerifyType(ServerlessService, %T) = %v", test.t, err) + } + }) + } +} + +func TestGetGroupVersionKind(t *testing.T) { + ss := ServerlessService{} + expected := SchemeGroupVersion.WithKind("ServerlessService") + if diff := cmp.Diff(expected, ss.GetGroupVersionKind()); diff != "" { + t.Errorf("Unexpected diff (-want, +got) = %v", diff) + } +} + +func TestSSTypicalFlow(t *testing.T) { + r := &ServerlessServiceStatus{} + r.InitializeConditions() + + apitestv1.CheckConditionOngoing(r.duck(), ServerlessServiceConditionReady, t) + + r.MarkEndpointsReady() + apitestv1.CheckConditionSucceeded(r.duck(), ServerlessServiceConditionEndspointsPopulated, t) + apitestv1.CheckConditionSucceeded(r.duck(), ServerlessServiceConditionReady, t) + + // Verify that activator endpoints status is informational and does not + // affect readiness. + r.MarkActivatorEndpointsPopulated() + apitestv1.CheckConditionSucceeded(r.duck(), ServerlessServiceConditionReady, t) + r.MarkActivatorEndpointsRemoved() + apitestv1.CheckConditionSucceeded(r.duck(), ServerlessServiceConditionReady, t) + + // Or another way to check the same condition. + if !r.IsReady() { + t.Error("IsReady=false, want: true") + } + r.MarkEndpointsNotReady("random") + apitestv1.CheckConditionOngoing(r.duck(), ServerlessServiceConditionReady, t) + + // Verify that activator endpoints status is informational and does not + // affect readiness. + r.MarkActivatorEndpointsPopulated() + apitestv1.CheckConditionOngoing(r.duck(), ServerlessServiceConditionReady, t) + r.MarkActivatorEndpointsRemoved() + apitestv1.CheckConditionOngoing(r.duck(), ServerlessServiceConditionReady, t) + + r.MarkEndpointsNotOwned("service", "jukebox") + apitestv1.CheckConditionFailed(r.duck(), ServerlessServiceConditionReady, t) + + // Verify that activator endpoints status is informational and does not + // affect readiness. + r.MarkActivatorEndpointsPopulated() + apitestv1.CheckConditionFailed(r.duck(), ServerlessServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(r.duck(), ActivatorEndpointsPopulated, t) + + time.Sleep(time.Millisecond * 1) + if got, want := r.ProxyFor(), time.Duration(0); got == want { + t.Error("ProxyFor returned duration of 0") + } + + r.MarkActivatorEndpointsRemoved() + apitestv1.CheckConditionFailed(r.duck(), ServerlessServiceConditionReady, t) + apitestv1.CheckConditionFailed(r.duck(), ActivatorEndpointsPopulated, t) + + if got, want := r.ProxyFor(), time.Duration(0); got != want { + t.Errorf("ProxyFor = %v, want: %v", got, want) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go similarity index 88% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go index 47fa1a5b4e..264d942e87 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_types.go @@ -17,12 +17,12 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" - networking "github.com/knative/serving/pkg/apis/networking" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" + networking "knative.dev/serving/pkg/apis/networking" ) // +genclient @@ -104,7 +104,7 @@ type ServerlessServiceSpec struct { // ServerlessServiceStatus describes the current state of the ServerlessService. type ServerlessServiceStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` // ServiceName holds the name of a core K8s Service resource that // load balances over the pods backing this Revision (activator or revision). @@ -119,11 +119,17 @@ type ServerlessServiceStatus struct { // ConditionType represents a ServerlessService condition value const ( - // ServerlessServiceConditionReady is set when the clusterIngress networking setting is + // ServerlessServiceConditionReady is set when the ingress networking setting is // configured and it has a load balancer address. ServerlessServiceConditionReady = apis.ConditionReady // ServerlessServiceConditionEndspointsPopulated is set when the ServerlessService's underlying // Revision K8s Service has been populated with endpoints. ServerlessServiceConditionEndspointsPopulated apis.ConditionType = "EndpointsPopulated" + + // ActivatorEndpointsPopulated is an informational status that reports + // when the revision is backed by activator points. This might happen even if + // revision is active (no pods yet created) or even when it has healthy pods + // (e.g. due to target burst capacity settings). + ActivatorEndpointsPopulated apis.ConditionType = "ActivatorEndpointsPopulated" ) diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go index fe5151a5d8..ebace729dc 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation.go @@ -19,9 +19,9 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" ) // Validate inspects and validates ClusterServerlessService object. diff --git a/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation_test.go new file mode 100644 index 0000000000..8a8c407683 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/serverlessservice_validation_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + networking "knative.dev/serving/pkg/apis/networking" +) + +func TestServerlessServiceSpecValidation(t *testing.T) { + tests := []struct { + name string + skss *ServerlessServiceSpec + want *apis.FieldError + }{{ + name: "valid proxy", + skss: &ServerlessServiceSpec{ + Mode: SKSOperationModeProxy, + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo", + }, + ProtocolType: networking.ProtocolHTTP1, + }, + want: nil, + }, { + name: "valid serve", + skss: &ServerlessServiceSpec{ + Mode: SKSOperationModeServe, + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo", + }, + ProtocolType: networking.ProtocolH2C, + }, + want: nil, + }, { + name: "invalid protocol", + skss: &ServerlessServiceSpec{ + Mode: SKSOperationModeServe, + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo", + }, + ProtocolType: networking.ProtocolType("gRPC"), + }, + want: apis.ErrInvalidValue("gRPC", "protocolType"), + }, { + name: "wrong mode", + skss: &ServerlessServiceSpec{ + Mode: "bombastic", + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo", + }, + ProtocolType: networking.ProtocolH2C, + }, + want: apis.ErrInvalidValue("bombastic", "mode"), + }, { + name: "no mode", + skss: &ServerlessServiceSpec{ + Mode: "", + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo", + }, + ProtocolType: networking.ProtocolHTTP1, + }, + want: apis.ErrMissingField("mode"), + }, { + name: "no object reference", + skss: &ServerlessServiceSpec{ + Mode: SKSOperationModeProxy, + ProtocolType: networking.ProtocolH2C, + }, + want: apis.ErrMissingField("objectRef.apiVersion", "objectRef.kind", "objectRef.name"), + }, { + name: "empty object reference", + skss: &ServerlessServiceSpec{ + Mode: SKSOperationModeProxy, + ObjectRef: corev1.ObjectReference{}, + ProtocolType: networking.ProtocolHTTP1, + }, + want: apis.ErrMissingField("objectRef.apiVersion", "objectRef.kind", "objectRef.name"), + }, { + name: "missing kind", + skss: &ServerlessServiceSpec{ + Mode: SKSOperationModeProxy, + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Name: "foo", + }, + ProtocolType: networking.ProtocolHTTP1, + }, + want: apis.ErrMissingField("objectRef.kind"), + }, { + name: "multiple errors", + skss: &ServerlessServiceSpec{ + ObjectRef: corev1.ObjectReference{ + Kind: "Deployment", + }, + ProtocolType: networking.ProtocolH2C, + }, + want: apis.ErrMissingField("mode", "objectRef.apiVersion", "objectRef.name"), + }, { + name: "empty spec", + skss: &ServerlessServiceSpec{}, + want: apis.ErrMissingField(apis.CurrentField), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.skss.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + // Now validate via parent object + got = (&ServerlessService{ + Spec: *test.skss, + }).Validate(context.Background()) + if diff := cmp.Diff(test.want.ViaField("spec").Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go rename to test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go index 2363fd9fd5..0f4641d8b7 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/serving/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ limitations under the License. package v1alpha1 import ( - apis "github.com/knative/pkg/apis" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -58,7 +58,7 @@ func (in *Certificate) DeepCopyObject() runtime.Object { func (in *CertificateList) DeepCopyInto(out *CertificateList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Certificate, len(*in)) @@ -136,67 +136,6 @@ func (in *CertificateStatus) DeepCopy() *CertificateStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterIngress) DeepCopyInto(out *ClusterIngress) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIngress. -func (in *ClusterIngress) DeepCopy() *ClusterIngress { - if in == nil { - return nil - } - out := new(ClusterIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterIngress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterIngressList) DeepCopyInto(out *ClusterIngressList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterIngress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterIngressList. -func (in *ClusterIngressList) DeepCopy() *ClusterIngressList { - if in == nil { - return nil - } - out := new(ClusterIngressList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterIngressList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTP01Challenge) DeepCopyInto(out *HTTP01Challenge) { *out = *in @@ -376,7 +315,7 @@ func (in *IngressBackendSplit) DeepCopy() *IngressBackendSplit { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -470,6 +409,16 @@ func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { *out = new(LoadBalancerStatus) (*in).DeepCopyInto(*out) } + if in.PublicLoadBalancer != nil { + in, out := &in.PublicLoadBalancer, &out.PublicLoadBalancer + *out = new(LoadBalancerStatus) + (*in).DeepCopyInto(*out) + } + if in.PrivateLoadBalancer != nil { + in, out := &in.PrivateLoadBalancer, &out.PrivateLoadBalancer + *out = new(LoadBalancerStatus) + (*in).DeepCopyInto(*out) + } return } @@ -573,7 +522,7 @@ func (in *ServerlessService) DeepCopyObject() runtime.Object { func (in *ServerlessServiceList) DeepCopyInto(out *ServerlessServiceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServerlessService, len(*in)) diff --git a/test/vendor/knative.dev/serving/pkg/apis/roadmap-2018.md b/test/vendor/knative.dev/serving/pkg/apis/roadmap-2018.md new file mode 100644 index 0000000000..c36737d476 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/roadmap-2018.md @@ -0,0 +1,69 @@ +# 2018 API Core Roadmap + +The purpose of the API Core group is to implement the control plane API for the +Knative Serving project. This includes the API governance process as well as +implementation and supporting documentation. + +This roadmap is what we hope to accomplish in 2018. + +## References + +- [Resource Overview](../../docs/spec/overview.md) +- [Conformance Tests](../../test/conformance/README.md) + +In 2018, we will largely focus on curating and implementing the Knative Serving +resource specification. + +## Areas of Interest and Requirements + +1. **Process**. It must be clear to contributors how to drive changes to the + Knative Serving API. +1. **Schema**. [The Knative Serving API schema](../../docs/spec/spec.md) matches + [our implementation.](./serving/). +1. **Semantics**. The [semantics](../../cmd/controller/) of Knative Serving API + interactions match + [our specification](../../docs/spec/normative_examples.md), and are well + covered by [conformance testing](../../test/conformance/README.md). + + + +### Process + +1. **Define the process** by which changes to the API are proposed, approved, + and implemented. +1. **Define our conventions** to which API changes should adhere for consistency + with the Knative Serving API. + +### Specification + +1. **Complete our implementation** of the initial API specification. + +1. **Track changes** to our API specification (according to our process) over + time, including the versioning of API resources. + +1. **Triage drift** of our implementation from the API specification. + + + +### Semantics + +1. **Implement our desired semantics** as outlined in our + ["normative examples"](../../docs/spec/normative_examples.md). + +1. **Fail gracefully and clearly** as outlined in our + ["errors conditions and reporting"](../../docs/spec/errors.md) docs. + + + +1. **Ensure continued conformance** of our implementation with the API + specification over time by ensuring semantics are well covered by our + conformance testing. + + + + +1. **Operator Extensions**. Guidelines for how operators can/should customize an + Knative Serving installation (e.g. runtime contract) are captured in + documentation. + + diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/fieldmask.go b/test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go similarity index 99% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/fieldmask.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go index fe958373cb..833d0aa207 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/fieldmask.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go @@ -147,6 +147,7 @@ func PodSpecMask(in *corev1.PodSpec) *corev1.PodSpec { out.ServiceAccountName = in.ServiceAccountName out.Containers = in.Containers out.Volumes = in.Volumes + out.ImagePullSecrets = in.ImagePullSecrets // Disallowed fields // This list is unnecessary, but added here for clarity @@ -163,7 +164,6 @@ func PodSpecMask(in *corev1.PodSpec) *corev1.PodSpec { out.HostIPC = false out.ShareProcessNamespace = nil out.SecurityContext = nil - out.ImagePullSecrets = nil out.Hostname = "" out.Subdomain = "" out.Affinity = nil diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask_test.go new file mode 100644 index 0000000000..acbbcf53e9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/fieldmask_test.go @@ -0,0 +1,648 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serving + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/kmp" + "knative.dev/pkg/ptr" +) + +func TestVolumeMask(t *testing.T) { + want := &corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{}, + } + in := want + + got := VolumeMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("VolumeMask (-want, +got): %s", diff) + } + + if got = VolumeMask(nil); got != nil { + t.Errorf("VolumeMask(nil) = %v, want: nil", got) + } +} + +func TestVolumeSourceMask(t *testing.T) { + want := &corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{}, + ConfigMap: &corev1.ConfigMapVolumeSource{}, + } + in := &corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{}, + ConfigMap: &corev1.ConfigMapVolumeSource{}, + NFS: &corev1.NFSVolumeSource{}, + } + + got := VolumeSourceMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("VolumeSourceMask (-want, +got): %s", diff) + } + + if got = VolumeSourceMask(nil); got != nil { + t.Errorf("VolumeSourceMask(nil) = %v, want: nil", got) + } +} + +func TestPodSpecMask(t *testing.T) { + want := &corev1.PodSpec{ + ServiceAccountName: "default", + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "foo", + }}, + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + } + in := &corev1.PodSpec{ + ServiceAccountName: "default", + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "foo", + }}, + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + // Stripped out. + InitContainers: []corev1.Container{{ + Image: "busybox", + }}, + } + + got := PodSpecMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("PodSpecMask (-want, +got): %s", diff) + } + + if got = PodSpecMask(nil); got != nil { + t.Errorf("PodSpecMask(nil) = %v, want: nil", got) + } +} + +func TestContainerMask(t *testing.T) { + want := &corev1.Container{ + Name: "foo", + Args: []string{"hello"}, + Command: []string{"world"}, + Env: []corev1.EnvVar{{}}, + EnvFrom: []corev1.EnvFromSource{{}}, + Image: "python", + ImagePullPolicy: corev1.PullAlways, + LivenessProbe: &corev1.Probe{}, + Ports: []corev1.ContainerPort{{}}, + ReadinessProbe: &corev1.Probe{}, + Resources: corev1.ResourceRequirements{}, + SecurityContext: &corev1.SecurityContext{}, + TerminationMessagePath: "/", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + VolumeMounts: []corev1.VolumeMount{{}}, + } + in := &corev1.Container{ + Args: []string{"hello"}, + Command: []string{"world"}, + Env: []corev1.EnvVar{{}}, + EnvFrom: []corev1.EnvFromSource{{}}, + Image: "python", + ImagePullPolicy: corev1.PullAlways, + LivenessProbe: &corev1.Probe{}, + Ports: []corev1.ContainerPort{{}}, + ReadinessProbe: &corev1.Probe{}, + Resources: corev1.ResourceRequirements{}, + SecurityContext: &corev1.SecurityContext{}, + TerminationMessagePath: "/", + TerminationMessagePolicy: corev1.TerminationMessageReadFile, + VolumeMounts: []corev1.VolumeMount{{}}, + Name: "foo", + Stdin: true, + StdinOnce: true, + TTY: true, + } + + got := ContainerMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ContainerMask (-want, +got): %s", diff) + } + + if got = ContainerMask(nil); got != nil { + t.Errorf("ContainerMask(nil) = %v, want: nil", got) + } +} + +func TestVolumeMountMask(t *testing.T) { + mode := corev1.MountPropagationBidirectional + + want := &corev1.VolumeMount{ + Name: "foo", + ReadOnly: true, + MountPath: "/foo/bar", + SubPath: "baz", + } + in := &corev1.VolumeMount{ + Name: "foo", + ReadOnly: true, + MountPath: "/foo/bar", + SubPath: "baz", + MountPropagation: &mode, + } + + got := VolumeMountMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("VolumeMountMask (-want, +got): %s", diff) + } + + if got = VolumeMountMask(nil); got != nil { + t.Errorf("VolumeMountMask(nil) = %v, want: nil", got) + } +} + +func TestProbeMask(t *testing.T) { + want := &corev1.Probe{ + Handler: corev1.Handler{}, + InitialDelaySeconds: 42, + TimeoutSeconds: 42, + PeriodSeconds: 42, + SuccessThreshold: 42, + FailureThreshold: 42, + } + in := want + + got := ProbeMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ProbeMask (-want, +got): %s", diff) + } + + if got = ProbeMask(nil); got != nil { + t.Errorf("ProbeMask(nil) = %v, want: nil", got) + } +} + +func TestHandlerMask(t *testing.T) { + want := &corev1.Handler{ + Exec: &corev1.ExecAction{}, + HTTPGet: &corev1.HTTPGetAction{}, + TCPSocket: &corev1.TCPSocketAction{}, + } + in := want + + got := HandlerMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("HandlerMask (-want, +got): %s", diff) + } + + if got = HandlerMask(nil); got != nil { + t.Errorf("HandlerMask(nil) = %v, want: nil", got) + } +} + +func TestExecActionMask(t *testing.T) { + want := &corev1.ExecAction{ + Command: []string{"foo", "bar"}, + } + in := want + + got := ExecActionMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ExecActionMask (-want, +got): %s", diff) + } + + if got = ExecActionMask(nil); got != nil { + t.Errorf("ExecActionMask(nil) = %v, want: nil", got) + } +} + +func TestHTTPGetActionMask(t *testing.T) { + want := &corev1.HTTPGetAction{ + Host: "foo", + Path: "/bar", + Scheme: corev1.URISchemeHTTP, + HTTPHeaders: []corev1.HTTPHeader{{}}, + } + in := &corev1.HTTPGetAction{ + Host: "foo", + Path: "/bar", + Scheme: corev1.URISchemeHTTP, + HTTPHeaders: []corev1.HTTPHeader{{}}, + Port: intstr.FromInt(8080), + } + + got := HTTPGetActionMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("HTTPGetActionMask (-want, +got): %s", diff) + } + + if got = HTTPGetActionMask(nil); got != nil { + t.Errorf("HTTPGetActionMask(nil) = %v, want: nil", got) + } +} + +func TestTCPSocketActionMask(t *testing.T) { + want := &corev1.TCPSocketAction{ + Host: "foo", + } + in := &corev1.TCPSocketAction{ + Host: "foo", + Port: intstr.FromInt(8080), + } + + got := TCPSocketActionMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("TCPSocketActionMask (-want, +got): %s", diff) + } + + if got = TCPSocketActionMask(nil); got != nil { + t.Errorf("TCPSocketActionMask(nil) = %v, want: nil", got) + } +} + +func TestContainerPortMask(t *testing.T) { + want := &corev1.ContainerPort{ + ContainerPort: 42, + Name: "foo", + Protocol: corev1.ProtocolTCP, + } + in := &corev1.ContainerPort{ + ContainerPort: 42, + Name: "foo", + Protocol: corev1.ProtocolTCP, + HostIP: "10.0.0.1", + HostPort: 43, + } + + got := ContainerPortMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ContainerPortMask (-want, +got): %s", diff) + } + + if got = ContainerPortMask(nil); got != nil { + t.Errorf("ContainerPortMask(nil) = %v, want: nil", got) + } +} + +func TestEnvVarMask(t *testing.T) { + want := &corev1.EnvVar{ + Name: "foo", + Value: "bar", + ValueFrom: &corev1.EnvVarSource{}, + } + in := want + + got := EnvVarMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("EnvVarMask (-want, +got): %s", diff) + } + + if got = EnvVarMask(nil); got != nil { + t.Errorf("EnvVarMask(nil) = %v, want: nil", got) + } +} + +func TestEnvVarSourceMask(t *testing.T) { + want := &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{}, + SecretKeyRef: &corev1.SecretKeySelector{}, + } + in := &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{}, + SecretKeyRef: &corev1.SecretKeySelector{}, + FieldRef: &corev1.ObjectFieldSelector{}, + ResourceFieldRef: &corev1.ResourceFieldSelector{}, + } + + got := EnvVarSourceMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("EnvVarSourceMask (-want, +got): %s", diff) + } + + if got = EnvVarSourceMask(nil); got != nil { + t.Errorf("EnvVarSourceMask(nil) = %v, want: nil", got) + } +} + +func TestLocalObjectReferenceMask(t *testing.T) { + want := &corev1.LocalObjectReference{ + Name: "foo", + } + in := want + + got := LocalObjectReferenceMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("LocalObjectReferenceMask (-want, +got): %s", diff) + } + + if got = LocalObjectReferenceMask(nil); got != nil { + t.Errorf("LocalObjectReferenceMask(nil) = %v, want: nil", got) + } +} + +func TestConfigMapKeySelectorMask(t *testing.T) { + want := &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{}, + Key: "foo", + Optional: ptr.Bool(true), + } + in := want + + got := ConfigMapKeySelectorMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ConfigMapKeySelectorMask (-want, +got): %s", diff) + } + + if got = ConfigMapKeySelectorMask(nil); got != nil { + t.Errorf("ConfigMapKeySelectorMask(nil) = %v, want: nil", got) + } +} + +func TestSecretKeySelectorMask(t *testing.T) { + want := &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{}, + Key: "foo", + Optional: ptr.Bool(true), + } + in := want + + got := SecretKeySelectorMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("SecretKeySelectorMask (-want, +got): %s", diff) + } + + if got = SecretKeySelectorMask(nil); got != nil { + t.Errorf("SecretKeySelectorMask(nil) = %v, want: nil", got) + } +} + +func TestConfigMapEnvSourceMask(t *testing.T) { + want := &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{}, + Optional: ptr.Bool(true), + } + in := want + + got := ConfigMapEnvSourceMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ConfigMapEnvSourceMask (-want, +got): %s", diff) + } + + if got = ConfigMapEnvSourceMask(nil); got != nil { + t.Errorf("ConfigMapEnvSourceMask(nil) = %v, want: nil", got) + } +} + +func TestSecretEnvSourceMask(t *testing.T) { + want := &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{}, + Optional: ptr.Bool(true), + } + in := want + + got := SecretEnvSourceMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("SecretEnvSourceMask (-want, +got): %s", diff) + } + + if got = SecretEnvSourceMask(nil); got != nil { + t.Errorf("SecretEnvSourceMask(nil) = %v, want: nil", got) + } +} + +func TestEnvFromSourceMask(t *testing.T) { + want := &corev1.EnvFromSource{ + Prefix: "foo", + ConfigMapRef: &corev1.ConfigMapEnvSource{}, + SecretRef: &corev1.SecretEnvSource{}, + } + in := want + + got := EnvFromSourceMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("EnvFromSourceMask (-want, +got): %s", diff) + } + + if got = EnvFromSourceMask(nil); got != nil { + t.Errorf("EnvFromSourceMask(nil) = %v, want: nil", got) + } +} + +func TestResourceRequirementsMask(t *testing.T) { + want := &corev1.ResourceRequirements{ + Limits: make(corev1.ResourceList), + Requests: make(corev1.ResourceList), + } + in := want + + got := ResourceRequirementsMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("ResourceRequirementsMask (-want, +got): %s", diff) + } + + if got = ResourceRequirementsMask(nil); got != nil { + t.Errorf("ResourceRequirementsMask(nil) = %v, want: nil", got) + } +} + +func TestSecurityContextMask(t *testing.T) { + mtype := corev1.UnmaskedProcMount + want := &corev1.SecurityContext{ + RunAsUser: ptr.Int64(1), + } + in := &corev1.SecurityContext{ + RunAsUser: ptr.Int64(1), + Capabilities: &corev1.Capabilities{}, + Privileged: ptr.Bool(true), + SELinuxOptions: &corev1.SELinuxOptions{}, + RunAsGroup: ptr.Int64(2), + RunAsNonRoot: ptr.Bool(true), + ReadOnlyRootFilesystem: ptr.Bool(true), + AllowPrivilegeEscalation: ptr.Bool(true), + ProcMount: &mtype, + } + + got := SecurityContextMask(in) + + if &want == &got { + t.Errorf("Input and output share addresses. Want different addresses") + } + + if diff, err := kmp.SafeDiff(want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if diff != "" { + t.Errorf("SecurityContextMask (-want, +got): %s", diff) + } + + if got = SecurityContextMask(nil); got != nil { + t.Errorf("SecurityContextMask(nil) = %v, want: nil", got) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go new file mode 100644 index 0000000000..f40bfbf9fd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serving + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +var depCondSet = apis.NewLivingConditionSet( + DeploymentConditionProgressing, + DeploymentConditionReplicaSetReady, +) + +const ( + // DeploymentConditionReady means the underlying deployment is ready. + DeploymentConditionReady = apis.ConditionReady + // DeploymentConditionReplicaSetready inverts the underlying deployment's + // ReplicaSetFailure condition. + DeploymentConditionReplicaSetReady apis.ConditionType = "ReplicaSetReady" + // DeploymentConditionProgressing reflects the underlying deployment's + // Progressing condition. + DeploymentConditionProgressing apis.ConditionType = "Progressing" +) + +// transformDeploymentStatus transforms the kubernetes DeploymentStatus into a +// duckv1.Status that uses ConditionSets to propagate failures and expose +// a top-level happy state, per our condition conventions. +func TransformDeploymentStatus(ds *appsv1.DeploymentStatus) *duckv1.Status { + s := &duckv1.Status{} + + depCondSet.Manage(s).InitializeConditions() + // The absence of this condition means no failure has occurred. If we find it + // below, we'll ovewrwrite this. + depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady) + + for _, cond := range ds.Conditions { + // TODO(jonjohnsonjr): Should we care about appsv1.DeploymentAvailable here? + switch cond.Type { + case appsv1.DeploymentProgressing: + switch cond.Status { + case corev1.ConditionUnknown: + depCondSet.Manage(s).MarkUnknown(DeploymentConditionProgressing, cond.Reason, cond.Message) + case corev1.ConditionTrue: + depCondSet.Manage(s).MarkTrue(DeploymentConditionProgressing) + case corev1.ConditionFalse: + depCondSet.Manage(s).MarkFalse(DeploymentConditionProgressing, cond.Reason, cond.Message) + } + case appsv1.DeploymentReplicaFailure: + switch cond.Status { + case corev1.ConditionUnknown: + depCondSet.Manage(s).MarkUnknown(DeploymentConditionReplicaSetReady, cond.Reason, cond.Message) + case corev1.ConditionTrue: + depCondSet.Manage(s).MarkFalse(DeploymentConditionReplicaSetReady, cond.Reason, cond.Message) + case corev1.ConditionFalse: + depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady) + } + } + } + return s +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle_test.go new file mode 100644 index 0000000000..699bb0da76 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package serving + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestTransformDeploymentStatus(t *testing.T) { + tests := []struct { + name string + ds *appsv1.DeploymentStatus + want *duckv1.Status + }{{ + name: "initial conditions", + ds: &appsv1.DeploymentStatus{}, + want: &duckv1.Status{ + Conditions: []apis.Condition{{ + Type: DeploymentConditionProgressing, + Status: corev1.ConditionUnknown, + }, { + Type: DeploymentConditionReplicaSetReady, + Status: corev1.ConditionTrue, + }, { + Type: DeploymentConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, { + name: "happy without rs", + ds: &appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }}, + }, + want: &duckv1.Status{ + Conditions: []apis.Condition{{ + Type: DeploymentConditionProgressing, + Status: corev1.ConditionTrue, + }, { + Type: DeploymentConditionReplicaSetReady, + Status: corev1.ConditionTrue, + }, { + Type: DeploymentConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, { + name: "happy with rs", + ds: &appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionFalse, + }}, + }, + want: &duckv1.Status{ + Conditions: []apis.Condition{{ + Type: DeploymentConditionProgressing, + Status: corev1.ConditionTrue, + }, { + Type: DeploymentConditionReplicaSetReady, + Status: corev1.ConditionTrue, + }, { + Type: DeploymentConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, { + name: "false progressing unknown rs", + ds: &appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionFalse, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionUnknown, + }}, + }, + want: &duckv1.Status{ + Conditions: []apis.Condition{{ + Type: DeploymentConditionProgressing, + Status: corev1.ConditionFalse, + }, { + Type: DeploymentConditionReplicaSetReady, + Status: corev1.ConditionUnknown, + }, { + Type: DeploymentConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, { + name: "unknown progressing failed rs", + ds: &appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionUnknown, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionTrue, + }}, + }, + want: &duckv1.Status{ + Conditions: []apis.Condition{{ + Type: DeploymentConditionProgressing, + Status: corev1.ConditionUnknown, + }, { + Type: DeploymentConditionReplicaSetReady, + Status: corev1.ConditionFalse, + }, { + Type: DeploymentConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, { + name: "reason and message propagate", + ds: &appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionUnknown, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionTrue, + Reason: "ReplicaSetReason", + Message: "Something bag happened", + }}, + }, + want: &duckv1.Status{ + Conditions: []apis.Condition{{ + Type: DeploymentConditionProgressing, + Status: corev1.ConditionUnknown, + }, { + Type: DeploymentConditionReplicaSetReady, + Status: corev1.ConditionFalse, + Reason: "ReplicaSetReason", + Message: "Something bag happened", + }, { + Type: DeploymentConditionReady, + Status: corev1.ConditionFalse, + Reason: "ReplicaSetReason", + Message: "Something bag happened", + }}, + }, + }} + + opts := []cmp.Option{ + cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime"), + cmpopts.SortSlices(func(a, b apis.Condition) bool { + return a.Type < b.Type + }), + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + want, got := test.want, TransformDeploymentStatus(test.ds) + if diff := cmp.Diff(want, got, opts...); diff != "" { + t.Errorf("GetCondition refs diff (-want +got): %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/k8s_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go similarity index 82% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/k8s_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go index 2e26ce06fc..09be210985 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/k8s_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation.go @@ -23,12 +23,13 @@ import ( "strings" "github.com/google/go-containerregistry/pkg/name" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/networking" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" + "knative.dev/pkg/profiling" + "knative.dev/serving/pkg/apis/networking" ) const ( @@ -97,9 +98,15 @@ func validateVolume(volume corev1.Volume) *apis.FieldError { specified := []string{} if vs.Secret != nil { specified = append(specified, "secret") + for i, item := range vs.Secret.Items { + errs = errs.Also(validateKeyToPath(item).ViaFieldIndex("items", i)) + } } if vs.ConfigMap != nil { specified = append(specified, "configMap") + for i, item := range vs.ConfigMap.Items { + errs = errs.Also(validateKeyToPath(item).ViaFieldIndex("items", i)) + } } if vs.Projected != nil { specified = append(specified, "projected") @@ -143,7 +150,7 @@ func validateConfigMapProjection(cmp *corev1.ConfigMapProjection) *apis.FieldErr errs = errs.Also(apis.ErrMissingField("name")) } for i, item := range cmp.Items { - errs = errs.Also(apis.CheckDisallowedFields(item, *KeyToPathMask(&item)).ViaIndex(i)) + errs = errs.Also(validateKeyToPath(item).ViaFieldIndex("items", i)) } return errs } @@ -156,7 +163,18 @@ func validateSecretProjection(sp *corev1.SecretProjection) *apis.FieldError { errs = errs.Also(apis.ErrMissingField("name")) } for i, item := range sp.Items { - errs = errs.Also(apis.CheckDisallowedFields(item, *KeyToPathMask(&item)).ViaIndex(i)) + errs = errs.Also(validateKeyToPath(item).ViaFieldIndex("items", i)) + } + return errs +} + +func validateKeyToPath(k2p corev1.KeyToPath) *apis.FieldError { + errs := apis.CheckDisallowedFields(k2p, *KeyToPathMask(&k2p)) + if k2p.Key == "" { + errs = errs.Also(apis.ErrMissingField("key")) + } + if k2p.Path == "" { + errs = errs.Also(apis.ErrMissingField("path")) } return errs } @@ -241,6 +259,11 @@ func ValidatePodSpec(ps corev1.PodSpec) *apis.FieldError { default: errs = errs.Also(apis.ErrMultipleOneOf("containers")) } + if ps.ServiceAccountName != "" { + for range validation.IsDNS1123Subdomain(ps.ServiceAccountName) { + errs = errs.Also(apis.ErrInvalidValue("serviceAccountName", ps.ServiceAccountName)) + } + } return errs } @@ -278,7 +301,7 @@ func ValidateContainer(container corev1.Container, volumes sets.String) *apis.Fi // Ports errs = errs.Also(validateContainerPorts(container.Ports).ViaField("ports")) // Readiness Probes - errs = errs.Also(validateProbe(container.ReadinessProbe).ViaField("readinessProbe")) + errs = errs.Also(validateReadinessProbe(container.ReadinessProbe).ViaField("readinessProbe")) // Resources errs = errs.Also(validateResources(&container.Resources).ViaField("resources")) // SecurityContext @@ -396,13 +419,14 @@ func validateContainerPorts(ports []corev1.ContainerPort) *apis.FieldError { userPort.ContainerPort == networking.BackendHTTP2Port || userPort.ContainerPort == networking.QueueAdminPort || userPort.ContainerPort == networking.AutoscalingQueueMetricsPort || - userPort.ContainerPort == networking.UserQueueMetricsPort { + userPort.ContainerPort == networking.UserQueueMetricsPort || + userPort.ContainerPort == profiling.ProfilingPort { errs = errs.Also(apis.ErrInvalidValue(userPort.ContainerPort, "containerPort")) } - if userPort.ContainerPort < 1 || userPort.ContainerPort > 65535 { + if userPort.ContainerPort < 0 || userPort.ContainerPort > 65535 { errs = errs.Also(apis.ErrOutOfBoundsValue(userPort.ContainerPort, - 1, 65535, "containerPort")) + 0, 65535, "containerPort")) } if !validPortNames.Has(userPort.Name) { @@ -416,6 +440,53 @@ func validateContainerPorts(ports []corev1.ContainerPort) *apis.FieldError { return errs } +func validateReadinessProbe(p *corev1.Probe) *apis.FieldError { + if p == nil { + return nil + } + + errs := validateProbe(p) + + if p.PeriodSeconds < 0 { + errs = errs.Also(apis.ErrOutOfBoundsValue(p.PeriodSeconds, 0, math.MaxInt32, "periodSeconds")) + } + + if p.InitialDelaySeconds < 0 { + errs = errs.Also(apis.ErrOutOfBoundsValue(p.InitialDelaySeconds, 0, math.MaxInt32, "initialDelaySeconds")) + } + + if p.SuccessThreshold < 1 { + errs = errs.Also(apis.ErrOutOfBoundsValue(p.SuccessThreshold, 1, math.MaxInt32, "successThreshold")) + } + + // PeriodSeconds == 0 indicates Knative's special probe with aggressive retries + if p.PeriodSeconds == 0 { + if p.FailureThreshold != 0 { + errs = errs.Also(&apis.FieldError{ + Message: "failureThreshold is disallowed when periodSeconds is zero", + Paths: []string{"failureThreshold"}, + }) + } + + if p.TimeoutSeconds != 0 { + errs = errs.Also(&apis.FieldError{ + Message: "timeoutSeconds is disallowed when periodSeconds is zero", + Paths: []string{"timeoutSeconds"}, + }) + } + } else { + if p.TimeoutSeconds < 1 { + errs = errs.Also(apis.ErrOutOfBoundsValue(p.TimeoutSeconds, 1, math.MaxInt32, "timeoutSeconds")) + } + + if p.FailureThreshold < 1 { + errs = errs.Also(apis.ErrOutOfBoundsValue(p.FailureThreshold, 1, math.MaxInt32, "failureThreshold")) + } + } + + return errs +} + func validateProbe(p *corev1.Probe) *apis.FieldError { if p == nil { return nil @@ -425,12 +496,26 @@ func validateProbe(p *corev1.Probe) *apis.FieldError { h := p.Handler errs = errs.Also(apis.CheckDisallowedFields(h, *HandlerMask(&h))) - switch { - case h.HTTPGet != nil: + var handlers []string + + if h.HTTPGet != nil { + handlers = append(handlers, "httpGet") errs = errs.Also(apis.CheckDisallowedFields(*h.HTTPGet, *HTTPGetActionMask(h.HTTPGet))).ViaField("httpGet") - case h.TCPSocket != nil: + } + if h.TCPSocket != nil { + handlers = append(handlers, "tcpSocket") errs = errs.Also(apis.CheckDisallowedFields(*h.TCPSocket, *TCPSocketActionMask(h.TCPSocket))).ViaField("tcpSocket") } + if h.Exec != nil { + handlers = append(handlers, "exec") + errs = errs.Also(apis.CheckDisallowedFields(*h.Exec, *ExecActionMask(h.Exec))).ViaField("exec") + } + + if len(handlers) == 0 { + errs = errs.Also(apis.ErrMissingOneOf("httpGet", "tcpSocket", "exec")) + } else if len(handlers) > 1 { + errs = errs.Also(apis.ErrMultipleOneOf(handlers...)) + } return errs } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation_test.go new file mode 100644 index 0000000000..dfa6050d8e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/k8s_validation_test.go @@ -0,0 +1,1153 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serving + +import ( + "fmt" + "math" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" +) + +func TestPodSpecValidation(t *testing.T) { + tests := []struct { + name string + ps corev1.PodSpec + want *apis.FieldError + }{{ + name: "valid", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + want: nil, + }, { + name: "with volume (ok)", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + want: nil, + }, { + name: "with volume name collision", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, { + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{}, + }, + }}, + }, + want: (&apis.FieldError{ + Message: fmt.Sprintf(`duplicate volume name "the-name"`), + Paths: []string{"name"}, + }).ViaFieldIndex("volumes", 1), + }, { + name: "with volume mount path collision", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-foo", + ReadOnly: true, + }, { + MountPath: "/mount/path", + Name: "the-bar", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-foo", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, { + Name: "the-bar", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "bar", + }, + }, + }}, + }, + want: apis.ErrInvalidValue(`"/mount/path" must be unique`, "mountPath"). + ViaFieldIndex("volumeMounts", 1).ViaFieldIndex("containers", 0), + }, { + name: "bad pod spec", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "steve", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }}, + }, + want: apis.ErrDisallowedFields("containers[0].lifecycle"), + }, { + name: "missing all", + ps: corev1.PodSpec{ + Containers: []corev1.Container{}, + }, + want: apis.ErrMissingField("containers"), + }, { + name: "missing container", + ps: corev1.PodSpec{ + ServiceAccountName: "bob", + Containers: []corev1.Container{}, + }, + want: apis.ErrMissingField("containers"), + }, { + name: "too many containers", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }, { + Image: "helloworld", + }}, + }, + want: apis.ErrMultipleOneOf("containers"), + }, { + name: "extra field", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + InitContainers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + want: apis.ErrDisallowedFields("initContainers"), + }, { + name: "bad service account name", + ps: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + ServiceAccountName: "foo@bar.baz", + }, + want: apis.ErrInvalidValue("serviceAccountName", "foo@bar.baz"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := ValidatePodSpec(test.ps) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("ValidatePodSpec (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestContainerValidation(t *testing.T) { + bidir := corev1.MountPropagationBidirectional + + tests := []struct { + name string + c corev1.Container + want *apis.FieldError + volumes sets.String + }{{ + name: "empty container", + c: corev1.Container{}, + want: apis.ErrMissingField(apis.CurrentField), + }, { + name: "valid container", + c: corev1.Container{ + Image: "foo", + }, + want: nil, + }, { + name: "invalid container image", + c: corev1.Container{ + Image: "foo:bar:baz", + }, + want: &apis.FieldError{ + Message: "Failed to parse image reference", + Paths: []string{"image"}, + Details: "image: \"foo:bar:baz\", error: could not parse reference", + }, + }, { + name: "has a lifecycle", + c: corev1.Container{ + Name: "foo", + Image: "foo", + Lifecycle: &corev1.Lifecycle{}, + }, + want: apis.ErrDisallowedFields("lifecycle"), + }, { + name: "has resources", + c: corev1.Container{ + Image: "foo", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceName("memory"): resource.MustParse("250M"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("25m"), + }, + }, + }, + want: nil, + }, { + name: "has no container ports set", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{}, + }, + want: nil, + }, { + name: "has valid unnamed user port", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8181, + }}, + }, + want: nil, + }, { + name: "has valid user port http1", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + Name: "http1", + }}, + }, + want: nil, + }, { + name: "has valid user port h2c", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + Name: "h2c", + }}, + }, + want: nil, + }, { + name: "has more than one ports with valid names", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + Name: "h2c", + }, { + Name: "http1", + }}, + }, + want: &apis.FieldError{ + Message: "More than one container port is set", + Paths: []string{"ports"}, + Details: "Only a single port is allowed", + }, + }, { + name: "has container port value too large", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 65536, + }}, + }, + want: apis.ErrOutOfBoundsValue(65536, 0, 65535, "ports.containerPort"), + }, { + name: "has an empty port set", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{}}, + }, + want: nil, + }, { + name: "has more than one unnamed port", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8080, + }, { + ContainerPort: 8181, + }}, + }, + want: &apis.FieldError{ + Message: "More than one container port is set", + Paths: []string{"ports"}, + Details: "Only a single port is allowed", + }, + }, { + name: "has tcp protocol", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + Protocol: corev1.ProtocolTCP, + }}, + }, + want: nil, + }, { + name: "has invalid protocol", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + Protocol: "tdp", + }}, + }, + want: apis.ErrInvalidValue("tdp", "ports.protocol"), + }, { + name: "has host port", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + HostPort: 80, + }}, + }, + want: apis.ErrDisallowedFields("ports.hostPort"), + }, { + name: "has host ip", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + HostIP: "127.0.0.1", + }}, + }, + want: apis.ErrDisallowedFields("ports.hostIP"), + }, { + name: "port conflicts with queue proxy admin", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8022, + }}, + }, + want: apis.ErrInvalidValue(8022, "ports.containerPort"), + }, { + name: "port conflicts with queue proxy", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8013, + }}, + }, + want: apis.ErrInvalidValue(8013, "ports.containerPort"), + }, { + name: "port conflicts with queue proxy", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8012, + }}, + }, + want: apis.ErrInvalidValue(8012, "ports.containerPort"), + }, { + name: "port conflicts with queue proxy metrics", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + ContainerPort: 9090, + }}, + }, + want: apis.ErrInvalidValue(9090, "ports.containerPort"), + }, { + name: "has invalid port name", + c: corev1.Container{ + Image: "foo", + Ports: []corev1.ContainerPort{{ + Name: "foobar", + }}, + }, + want: &apis.FieldError{ + Message: fmt.Sprintf("Port name %v is not allowed", "foobar"), + Paths: []string{"ports"}, + Details: "Name must be empty, or one of: 'h2c', 'http1'", + }, + }, { + name: "has unknown volumeMounts", + c: corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "the-name", + SubPath: "oops", + MountPropagation: &bidir, + }}, + }, + want: (&apis.FieldError{ + Message: "volumeMount has no matching volume", + Paths: []string{"name"}, + }).ViaFieldIndex("volumeMounts", 0).Also( + apis.ErrMissingField("readOnly").ViaFieldIndex("volumeMounts", 0)).Also( + apis.ErrMissingField("mountPath").ViaFieldIndex("volumeMounts", 0)).Also( + apis.ErrDisallowedFields("mountPropagation").ViaFieldIndex("volumeMounts", 0)), + }, { + name: "missing known volumeMounts", + c: corev1.Container{ + Image: "foo", + }, + volumes: sets.NewString("the-name"), + want: &apis.FieldError{ + Message: "volumes not mounted: [the-name]", + Paths: []string{"volumeMounts"}, + }, + }, { + name: "has known volumeMounts", + c: corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + volumes: sets.NewString("the-name"), + }, { + name: "has known volumeMounts, but at reserved path", + c: corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "//var//log//", + Name: "the-name", + ReadOnly: true, + }}, + }, + volumes: sets.NewString("the-name"), + want: (&apis.FieldError{ + Message: `mountPath "/var/log" is a reserved path`, + Paths: []string{"mountPath"}, + }).ViaFieldIndex("volumeMounts", 0), + }, { + name: "has known volumeMounts, bad mountPath", + c: corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "not/absolute", + Name: "the-name", + ReadOnly: true, + }}, + }, + volumes: sets.NewString("the-name"), + want: apis.ErrInvalidValue("not/absolute", "volumeMounts[0].mountPath"), + }, { + name: "has lifecycle", + c: corev1.Container{ + Image: "foo", + Lifecycle: &corev1.Lifecycle{}, + }, + want: apis.ErrDisallowedFields("lifecycle"), + }, { + name: "has known volumeMount twice", + c: corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }, { + MountPath: "/another/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + volumes: sets.NewString("the-name"), + }, { + name: "valid with probes (no port)", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + }, + }, + want: nil, + }, { + name: "valid with exec probes ", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + InitialDelaySeconds: 0, + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{}, + }, + }, + }, + want: nil, + }, { + name: "invalid with no handler", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{}, + }, + }, + want: apis.ErrMissingOneOf("livenessProbe.httpGet", "livenessProbe.tcpSocket", "livenessProbe.exec"), + }, { + name: "invalid with multiple handlers", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + Exec: &corev1.ExecAction{}, + TCPSocket: &corev1.TCPSocketAction{}, + }, + }, + }, + want: apis.ErrMultipleOneOf("readinessProbe.exec", "readinessProbe.tcpSocket", "readinessProbe.httpGet"), + }, { + name: "invalid readiness http probe (has port)", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(8080), + }, + }, + }, + }, + want: apis.ErrDisallowedFields("readinessProbe.httpGet.port"), + }, { + name: "invalid readiness probe (has failureThreshold while using special probe)", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: 0, + FailureThreshold: 2, + SuccessThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "failureThreshold is disallowed when periodSeconds is zero", + Paths: []string{"readinessProbe.failureThreshold"}, + }, + }, { + name: "invalid readiness probe (has timeoutSeconds while using special probe)", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 2, + SuccessThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "timeoutSeconds is disallowed when periodSeconds is zero", + Paths: []string{"readinessProbe.timeoutSeconds"}, + }, + }, { + name: "out of bounds probe values", + c: corev1.Container{ + Image: "foo", + ReadinessProbe: &corev1.Probe{ + PeriodSeconds: -1, + TimeoutSeconds: 0, + SuccessThreshold: 0, + FailureThreshold: 0, + InitialDelaySeconds: -1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + }, + want: apis.ErrOutOfBoundsValue(-1, 0, math.MaxInt32, "readinessProbe.periodSeconds").Also( + apis.ErrOutOfBoundsValue(0, 1, math.MaxInt32, "readinessProbe.timeoutSeconds")).Also( + apis.ErrOutOfBoundsValue(0, 1, math.MaxInt32, "readinessProbe.successThreshold")).Also( + apis.ErrOutOfBoundsValue(0, 1, math.MaxInt32, "readinessProbe.failureThreshold")).Also( + apis.ErrOutOfBoundsValue(-1, 0, math.MaxInt32, "readinessProbe.initialDelaySeconds")), + }, { + name: "disallowed security context field", + c: corev1.Container{ + Image: "foo", + SecurityContext: &corev1.SecurityContext{ + RunAsGroup: ptr.Int64(10), + }, + }, + want: apis.ErrDisallowedFields("securityContext.runAsGroup"), + }, { + name: "too large uid", + c: corev1.Container{ + Image: "foo", + SecurityContext: &corev1.SecurityContext{ + RunAsUser: ptr.Int64(math.MaxInt32 + 1), + }, + }, + want: apis.ErrOutOfBoundsValue(int64(math.MaxInt32+1), 0, math.MaxInt32, "securityContext.runAsUser"), + }, { + name: "negative uid", + c: corev1.Container{ + Image: "foo", + SecurityContext: &corev1.SecurityContext{ + RunAsUser: ptr.Int64(-10), + }, + }, + want: apis.ErrOutOfBoundsValue(-10, 0, math.MaxInt32, "securityContext.runAsUser"), + }, { + name: "envFrom - None of", + c: corev1.Container{ + Image: "foo", + EnvFrom: []corev1.EnvFromSource{{}}, + }, + want: apis.ErrMissingOneOf("envFrom.configMapRef", "envFrom.secretRef"), + }, { + name: "envFrom - Multiple", + c: corev1.Container{ + Image: "foo", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "ConfigMapName", + }, + }, + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "SecretName", + }, + }, + }}, + }, + want: apis.ErrMultipleOneOf("envFrom.configMapRef", "envFrom.secretRef"), + }, { + name: "envFrom - Secret", + c: corev1.Container{ + Image: "foo", + EnvFrom: []corev1.EnvFromSource{{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "SecretName", + }, + }, + }}, + }, + want: nil, + }, { + name: "envFrom - ConfigMap", + c: corev1.Container{ + Image: "foo", + EnvFrom: []corev1.EnvFromSource{{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "ConfigMapName", + }, + }, + }}, + }, + want: nil, + }, { + name: "termination message policy", + c: corev1.Container{ + Image: "foo", + TerminationMessagePolicy: corev1.TerminationMessagePolicy("Not a Policy"), + }, + want: apis.ErrInvalidValue(corev1.TerminationMessagePolicy("Not a Policy"), "terminationMessagePolicy"), + }, { + name: "empty env var name", + c: corev1.Container{ + Image: "foo", + Env: []corev1.EnvVar{{ + Value: "Foo", + }}, + }, + want: apis.ErrMissingField("env[0].name"), + }, { + name: "reserved env var name", + c: corev1.Container{ + Image: "foo", + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: "Foo", + }}, + }, + want: &apis.FieldError{ + Message: `"PORT" is a reserved environment variable`, + Paths: []string{"env[0].name"}, + }, + }, { + name: "disallowed envvarsource", + c: corev1.Container{ + Image: "foo", + Env: []corev1.EnvVar{{ + Name: "Foo", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "/v1", + }, + }, + }}, + }, + want: apis.ErrDisallowedFields("env[0].valueFrom.fieldRef"), + }, { + name: "invalid liveness tcp probe (has port)", + c: corev1.Container{ + Image: "foo", + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromString("http"), + }, + }, + }, + }, + want: apis.ErrDisallowedFields("livenessProbe.tcpSocket.port"), + }, { + name: "disallowed container fields", + c: corev1.Container{ + Image: "foo", + Name: "fail", + Stdin: true, + StdinOnce: true, + TTY: true, + Lifecycle: &corev1.Lifecycle{}, + VolumeDevices: []corev1.VolumeDevice{{ + Name: "disallowed", + DevicePath: "/", + }}, + }, + want: apis.ErrDisallowedFields("lifecycle").Also( + apis.ErrDisallowedFields("stdin")).Also( + apis.ErrDisallowedFields("stdinOnce")).Also( + apis.ErrDisallowedFields("tty")).Also( + apis.ErrDisallowedFields("volumeDevices")), + }, { + name: "has numerous problems", + c: corev1.Container{ + Lifecycle: &corev1.Lifecycle{}, + }, + want: apis.ErrDisallowedFields("lifecycle").Also( + apis.ErrMissingField("image")), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := ValidateContainer(test.c, test.volumes) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("ValidateContainer (-want, +got) = %v", diff) + } + }) + } +} + +func TestVolumeValidation(t *testing.T) { + tests := []struct { + name string + v corev1.Volume + want *apis.FieldError + }{{ + name: "just name", + v: corev1.Volume{ + Name: "foo", + }, + want: apis.ErrMissingOneOf("secret", "configMap", "projected"), + }, { + name: "secret volume", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, + }, { + name: "configMap volume", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "foo"}, + }, + }, + }, + }, { + name: "emptyDir volume", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + want: apis.ErrMissingOneOf("secret", "configMap", "projected").Also( + apis.ErrDisallowedFields("emptyDir")), + }, { + name: "no volume source", + v: corev1.Volume{ + Name: "foo", + }, + want: apis.ErrMissingOneOf("secret", "configMap", "projected"), + }, { + name: "multiple volume source", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "foo"}, + }, + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "foo", + }, + Items: []corev1.KeyToPath{{ + Key: "foo", + Path: "bar/baz", + }}, + }, + }}, + }, + }, + }, + want: apis.ErrMultipleOneOf("configMap", "projected"), + }, { + name: "multiple project volume single source", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "foo", + }, + }, + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "bar", + }, + Items: []corev1.KeyToPath{{ + Key: "foo", + Path: "bar/baz", + }}, + }, + }}, + }, + }, + }, + want: apis.ErrMultipleOneOf("projected[0].configMap", "projected[0].secret"), + }, { + name: "multiple project volume one-per-source", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "foo", + }, + }, + }, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "bar", + }, + }, + }}, + }, + }, + }, + want: nil, + }, { + name: "multiple project volume one-per-source (no names)", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{}, + }, { + ConfigMap: &corev1.ConfigMapProjection{}, + }}, + }, + }, + }, + want: apis.ErrMissingField("projected[0].secret.name", "projected[1].configMap.name"), + }, { + name: "no project volume source", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{}}, + }, + }, + }, + want: apis.ErrMissingOneOf("projected[0].configMap", "projected[0].secret"), + }, { + name: "no name", + v: corev1.Volume{ + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, + want: apis.ErrMissingField("name"), + }, { + name: "bad name", + v: corev1.Volume{ + Name: "@@@", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, + want: apis.ErrInvalidValue("@@@", "name"), + }, { + name: "secret missing keyToPath values", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + Items: []corev1.KeyToPath{{}}, + }, + }, + }, + want: apis.ErrMissingField("items[0].key").Also(apis.ErrMissingField("items[0].path")), + }, { + name: "configMap missing keyToPath values", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "foo", + }, + Items: []corev1.KeyToPath{{}}, + }, + }, + }, + want: apis.ErrMissingField("items[0].key").Also(apis.ErrMissingField("items[0].path")), + }, { + name: "projection missing keyToPath values", + v: corev1.Volume{ + Name: "foo", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "foo", + }, + Items: []corev1.KeyToPath{{}}, + }}, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "foo", + }, + Items: []corev1.KeyToPath{{}}, + }}, + }, + }, + }, + }, + want: apis.ErrMissingField("projected[0].secret.items[0].key").Also( + apis.ErrMissingField("projected[0].secret.items[0].path")).Also( + apis.ErrMissingField("projected[1].configMap.items[0].key")).Also( + apis.ErrMissingField("projected[1].configMap.items[0].path")), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := validateVolume(test.v) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("validateVolume (-want, +got) = %v", diff) + } + }) + } +} + +func TestObjectReferenceValidation(t *testing.T) { + tests := []struct { + name string + r *corev1.ObjectReference + want *apis.FieldError + }{{ + name: "nil", + }, { + name: "no api version", + r: &corev1.ObjectReference{ + Kind: "Bar", + Name: "foo", + }, + want: apis.ErrMissingField("apiVersion"), + }, { + name: "bad api version", + r: &corev1.ObjectReference{ + APIVersion: "/v1alpha1", + Kind: "Bar", + Name: "foo", + }, + want: apis.ErrInvalidValue("prefix part must be non-empty", "apiVersion"), + }, { + name: "no kind", + r: &corev1.ObjectReference{ + APIVersion: "foo/v1alpha1", + Name: "foo", + }, + want: apis.ErrMissingField("kind"), + }, { + name: "bad kind", + r: &corev1.ObjectReference{ + APIVersion: "foo/v1alpha1", + Kind: "Bad Kind", + Name: "foo", + }, + want: apis.ErrInvalidValue("a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_' (e.g. 'my_name', or 'MY_NAME', or 'MyName', regex used for validation is '[A-Za-z_][A-Za-z0-9_]*')", "kind"), + }, { + name: "no namespace", + r: &corev1.ObjectReference{ + APIVersion: "foo.group/v1alpha1", + Kind: "Bar", + Name: "the-bar-0001", + }, + want: nil, + }, { + name: "no name", + r: &corev1.ObjectReference{ + APIVersion: "foo.group/v1alpha1", + Kind: "Bar", + }, + want: apis.ErrMissingField("name"), + }, { + name: "bad name", + r: &corev1.ObjectReference{ + APIVersion: "foo.group/v1alpha1", + Kind: "Bar", + Name: "bad name", + }, + want: apis.ErrInvalidValue("a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')", "name"), + }, { + name: "disallowed fields", + r: &corev1.ObjectReference{ + APIVersion: "foo.group/v1alpha1", + Kind: "Bar", + Name: "bar0001", + + // None of these are allowed. + Namespace: "foo", + FieldPath: "some.field.path", + ResourceVersion: "234234", + UID: "deadbeefcafebabe", + }, + want: apis.ErrDisallowedFields("namespace", "fieldPath", "resourceVersion", "uid"), + }, { + name: "all good", + r: &corev1.ObjectReference{ + APIVersion: "foo.group/v1alpha1", + Kind: "Bar", + Name: "bar0001", + }, + want: nil, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := ValidateNamespacedObjectReference(test.r) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("ValidateNamespacedObjectReference (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation.go new file mode 100644 index 0000000000..034af7c231 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serving + +import ( + "context" + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/config" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" +) + +var ( + allowedAnnotations = map[string]struct{}{ + UpdaterAnnotation: {}, + CreatorAnnotation: {}, + RevisionLastPinnedAnnotationKey: {}, + GroupNamePrefix + "forceUpgrade": {}, + } +) + +// ValidateObjectMetadata validates that `metadata` stanza of the +// resources is correct. +func ValidateObjectMetadata(meta metav1.Object) *apis.FieldError { + return apis.ValidateObjectMetadata(meta). + Also(autoscaling.ValidateAnnotations(meta.GetAnnotations()). + Also(validateKnativeAnnotations(meta.GetAnnotations())). + ViaField("annotations")) +} + +func validateKnativeAnnotations(annotations map[string]string) (errs *apis.FieldError) { + for key := range annotations { + if _, ok := allowedAnnotations[key]; !ok && strings.HasPrefix(key, GroupNamePrefix) { + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} + +// ValidateQueueSidecarAnnotation validates QueueSideCarResourcePercentageAnnotation +func ValidateQueueSidecarAnnotation(annotations map[string]string) *apis.FieldError { + if len(annotations) == 0 { + return nil + } + v, ok := annotations[QueueSideCarResourcePercentageAnnotation] + if !ok { + return nil + } + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return apis.ErrInvalidValue(v, apis.CurrentField).ViaKey(QueueSideCarResourcePercentageAnnotation) + } + if value <= 0.1 || value > 100 { + return apis.ErrOutOfBoundsValue(value, 0.1, 100.0, QueueSideCarResourcePercentageAnnotation) + } + return nil +} + +// ValidateTimeoutSeconds validates timeout by comparing MaxRevisionTimeoutSeconds +func ValidateTimeoutSeconds(ctx context.Context, timeoutSeconds int64) *apis.FieldError { + if timeoutSeconds != 0 { + cfg := config.FromContextOrDefaults(ctx) + if timeoutSeconds > cfg.Defaults.MaxRevisionTimeoutSeconds || timeoutSeconds < 0 { + return apis.ErrOutOfBoundsValue(timeoutSeconds, 0, + cfg.Defaults.MaxRevisionTimeoutSeconds, + "timeoutSeconds") + } + } + return nil +} + +// ValidateContainerConcurrency function validates the ContainerConcurrency field +// TODO(#5007): Move this to autoscaling. +func ValidateContainerConcurrency(containerConcurrency *int64) *apis.FieldError { + if containerConcurrency != nil { + if *containerConcurrency < 0 || *containerConcurrency > config.DefaultMaxRevisionContainerConcurrency { + return apis.ErrOutOfBoundsValue( + *containerConcurrency, 0, config.DefaultMaxRevisionContainerConcurrency, apis.CurrentField) + } + } + return nil +} + +// ValidateClusterVisibilityLabel function validates the visibility label on a Route +func ValidateClusterVisibilityLabel(label string) (errs *apis.FieldError) { + if label != routeconfig.VisibilityClusterLocal { + errs = apis.ErrInvalidValue(label, routeconfig.VisibilityLabelKey) + } + return +} + +// SetUserInfo sets creator and updater annotations +func SetUserInfo(ctx context.Context, oldSpec, newSpec, resource interface{}) { + if ui := apis.GetUserInfo(ctx); ui != nil { + objectMetaAccessor, ok := resource.(metav1.ObjectMetaAccessor) + if !ok { + return + } + ans := objectMetaAccessor.GetObjectMeta().GetAnnotations() + if ans == nil { + ans = map[string]string{} + objectMetaAccessor.GetObjectMeta().SetAnnotations(ans) + } + + if apis.IsInUpdate(ctx) { + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return + } + ans[UpdaterAnnotation] = ui.Username + } else { + ans[CreatorAnnotation] = ui.Username + ans[UpdaterAnnotation] = ui.Username + } + } +} + +// ValidateRevisionName validates name and generateName for the revisionTemplate +func ValidateRevisionName(ctx context.Context, name, generateName string) *apis.FieldError { + if generateName != "" { + if msgs := validation.NameIsDNS1035Label(generateName, true); len(msgs) > 0 { + return apis.ErrInvalidValue( + fmt.Sprintf("not a DNS 1035 label prefix: %v", msgs), + "metadata.generateName") + } + } + if name != "" { + if msgs := validation.NameIsDNS1035Label(name, false); len(msgs) > 0 { + return apis.ErrInvalidValue( + fmt.Sprintf("not a DNS 1035 label: %v", msgs), + "metadata.name") + } + om := apis.ParentMeta(ctx) + prefix := om.Name + "-" + if om.Name != "" { + // Even if there is GenerateName, allow the use + // of Name post-creation. + } else if om.GenerateName != "" { + // We disallow bringing your own name when the parent + // resource uses generateName (at creation). + return apis.ErrDisallowedFields("metadata.name") + } + + if !strings.HasPrefix(name, prefix) { + return apis.ErrInvalidValue( + fmt.Sprintf("%q must have prefix %q", name, prefix), + "metadata.name") + } + } + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation_test.go new file mode 100644 index 0000000000..83386083a2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/metadata_validation_test.go @@ -0,0 +1,495 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serving + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" +) + +func TestValidateObjectMetadata(t *testing.T) { + cases := []struct { + name string + objectMeta metav1.Object + expectErr error + }{{ + name: "invalid name - dots", + objectMeta: &metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + expectErr: &apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"name"}, + }, + }, { + name: "invalid name - too long", + objectMeta: &metav1.ObjectMeta{ + Name: strings.Repeat("a", 64), + }, + expectErr: &apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters]", + Paths: []string{"name"}, + }, + }, { + name: "invalid name - trailing dash", + objectMeta: &metav1.ObjectMeta{ + Name: "some-name-", + }, + expectErr: &apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"name"}, + }, + }, { + name: "valid generateName", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + }, + expectErr: (*apis.FieldError)(nil), + }, { + name: "valid generateName - trailing dash", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name-", + }, + expectErr: (*apis.FieldError)(nil), + }, { + name: "invalid generateName - dots", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "do.not.use.dots", + }, + expectErr: &apis.FieldError{ + Message: "not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"generateName"}, + }, + }, { + name: "invalid generateName - too long", + objectMeta: &metav1.ObjectMeta{ + GenerateName: strings.Repeat("a", 64), + }, + expectErr: &apis.FieldError{ + Message: "not a DNS 1035 label prefix: [must be no more than 63 characters]", + Paths: []string{"generateName"}, + }, + }, { + name: "missing name and generateName", + objectMeta: &metav1.ObjectMeta{}, + expectErr: &apis.FieldError{ + Message: "name or generateName is required", + Paths: []string{"name"}, + }, + }, { + name: "valid forceUpgrade annotation label", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + Annotations: map[string]string{ + "serving.knative.dev/forceUpgrade": "true", + }, + }, + expectErr: (*apis.FieldError)(nil), + }, { + name: "valid creator annotation label", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + Annotations: map[string]string{ + CreatorAnnotation: "svc-creator", + }, + }, + + expectErr: (*apis.FieldError)(nil), + }, { + name: "valid lastModifier annotation label", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + Annotations: map[string]string{ + UpdaterAnnotation: "svc-modifier", + }, + }, + expectErr: (*apis.FieldError)(nil), + }, { + name: "valid lastPinned annotation label", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + Annotations: map[string]string{ + RevisionLastPinnedAnnotationKey: "pinned-val", + }, + }, + expectErr: (*apis.FieldError)(nil), + }, { + name: "invalid knative prefix annotation", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + Annotations: map[string]string{ + "serving.knative.dev/testAnnotation": "value", + }, + }, + expectErr: (&apis.FieldError{Message: "", Paths: []string(nil), Details: ""}).Also( + (&apis.FieldError{Message: "", Paths: []string(nil), Details: ""}).Also( + (&apis.FieldError{Message: "", Paths: []string(nil), Details: ""}).Also( + apis.ErrInvalidKeyName("serving.knative.dev/testAnnotation", "annotations"), + ))), + }, { + name: "valid non-knative prefix annotation label", + objectMeta: &metav1.ObjectMeta{ + GenerateName: "some-name", + Annotations: map[string]string{ + "testAnnotation": "testValue", + }, + }, + expectErr: (*apis.FieldError)(nil), + }} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := ValidateObjectMetadata(c.objectMeta) + + if !reflect.DeepEqual(c.expectErr, err) { + t.Errorf("Expected: '%#v', Got: '%#v'", c.expectErr, err) + } + }) + } +} + +func TestValidateQueueSidecarAnnotation(t *testing.T) { + cases := []struct { + name string + annotation map[string]string + expectErr error + }{{ + name: "Queue sidecar resource percentage annotation more than 100", + annotation: map[string]string{ + QueueSideCarResourcePercentageAnnotation: "200", + }, + expectErr: &apis.FieldError{ + Message: "expected 0.1 <= 200 <= 100", + Paths: []string{QueueSideCarResourcePercentageAnnotation}, + }, + }, { + name: "Invalid queue sidecar resource percentage annotation", + annotation: map[string]string{ + QueueSideCarResourcePercentageAnnotation: "", + }, + expectErr: &apis.FieldError{ + Message: "invalid value: ", + Paths: []string{fmt.Sprintf("[%s]", QueueSideCarResourcePercentageAnnotation)}, + }, + }, { + name: "empty annotation", + annotation: map[string]string{}, + expectErr: (*apis.FieldError)(nil), + }, { + name: "different annotation other than QueueSideCarResourcePercentageAnnotation", + annotation: map[string]string{ + CreatorAnnotation: "", + }, + expectErr: (*apis.FieldError)(nil), + }, { + name: "valid value for Queue sidecar resource percentage annotation", + annotation: map[string]string{ + QueueSideCarResourcePercentageAnnotation: "100", + }, + expectErr: (*apis.FieldError)(nil), + }} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := ValidateQueueSidecarAnnotation(c.annotation) + if !reflect.DeepEqual(c.expectErr, err) { + t.Errorf("Expected: '%#v', Got: '%#v'", c.expectErr, err) + } + }) + } +} + +func TestValidateTimeoutSecond(t *testing.T) { + cases := []struct { + name string + timeout *int64 + expectErr error + }{{ + name: "exceed max timeout", + timeout: ptr.Int64(6000), + expectErr: apis.ErrOutOfBoundsValue( + 6000, 0, config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }, { + name: "valid timeout value", + timeout: ptr.Int64(100), + expectErr: (*apis.FieldError)(nil), + }} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + err := ValidateTimeoutSeconds(ctx, *c.timeout) + if !reflect.DeepEqual(c.expectErr, err) { + t.Errorf("Expected: '%#v', Got: '%#v'", c.expectErr, err) + } + }) + } +} + +func TestValidateContainerConcurrency(t *testing.T) { + cases := []struct { + name string + containerConcurrency *int64 + expectErr error + }{{ + name: "empty containerConcurrency", + containerConcurrency: nil, + expectErr: (*apis.FieldError)(nil), + }, { + name: "invalid containerConcurrency value", + containerConcurrency: ptr.Int64(2000), + expectErr: apis.ErrOutOfBoundsValue( + 2000, 0, config.DefaultMaxRevisionContainerConcurrency, apis.CurrentField), + }, { + name: "valid containerConcurrency value", + containerConcurrency: ptr.Int64(10), + expectErr: (*apis.FieldError)(nil), + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := ValidateContainerConcurrency(c.containerConcurrency) + if !reflect.DeepEqual(c.expectErr, err) { + t.Errorf("Expected: '%#v', Got: '%#v'", c.expectErr, err) + } + }) + } +} + +func TestValidateClusterVisibilityLabel(t *testing.T) { + tests := []struct { + name string + label string + expectErr error + }{{ + name: "empty label", + label: "", + expectErr: apis.ErrInvalidValue("", routeconfig.VisibilityLabelKey), + }, { + name: "valid label", + label: routeconfig.VisibilityClusterLocal, + expectErr: (*apis.FieldError)(nil), + }, { + name: "invalid label", + label: "not-cluster-local", + expectErr: apis.ErrInvalidValue("not-cluster-local", routeconfig.VisibilityLabelKey), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ValidateClusterVisibilityLabel(test.label) + if !reflect.DeepEqual(test.expectErr, err) { + t.Errorf("ValidateClusterVisibilityLabel(%s) = %#v, Want: '%#v'", test.label, err, test.expectErr) + } + }) + } + +} + +type withPod struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec corev1.PodSpec `json:"spec,omitempty"` +} + +func getSpec(image string) corev1.PodSpec { + return corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: image, + }}, + } +} + +func TestAnnotationCreate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + ) + tests := []struct { + name string + user string + this *withPod + want map[string]string + }{{ + name: "create annotation", + user: u1, + this: &withPod{ + Spec: getSpec("foo"), + }, + want: map[string]string{ + CreatorAnnotation: u1, + UpdaterAnnotation: u1, + }, + }, { + name: "create annotation should override user provided annotations", + user: u1, + this: &withPod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + CreatorAnnotation: u2, + UpdaterAnnotation: u2, + }, + }, + Spec: getSpec("foo"), + }, + want: map[string]string{ + CreatorAnnotation: u1, + UpdaterAnnotation: u1, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + SetUserInfo(ctx, nil, test.this.Spec, test.this) + if !reflect.DeepEqual(test.this.Annotations, test.want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", test.this.Annotations, test.want, cmp.Diff(test.this.Annotations, test.want)) + } + }) + } +} + +func TestAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + ) + tests := []struct { + name string + user string + prev *withPod + this *withPod + want map[string]string + }{{ + name: "update annotation without spec changes", + user: u2, + this: &withPod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + CreatorAnnotation: u1, + UpdaterAnnotation: u1, + }, + }, + Spec: getSpec("foo"), + }, + prev: &withPod{ + Spec: getSpec("foo"), + }, + want: map[string]string{ + CreatorAnnotation: u1, + UpdaterAnnotation: u1, + }, + }, { + name: "update annotation with spec changes", + user: u2, + this: &withPod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + CreatorAnnotation: u1, + UpdaterAnnotation: u1, + }, + }, + Spec: getSpec("bar"), + }, + prev: &withPod{ + Spec: getSpec("foo"), + }, + want: map[string]string{ + CreatorAnnotation: u1, + UpdaterAnnotation: u2, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + } + SetUserInfo(ctx, test.prev.Spec, test.this.Spec, test.this) + if !reflect.DeepEqual(test.this.Annotations, test.want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", test.this.Annotations, test.want, cmp.Diff(test.this.Annotations, test.want)) + } + }) + } +} + +func TestValidateRevisionName(t *testing.T) { + cases := []struct { + name string + revName string + revGenerateName string + objectMeta metav1.ObjectMeta + expectErr error + }{{ + name: "invalid revision generateName - dots", + revGenerateName: "foo.bar", + expectErr: apis.ErrInvalidValue("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "metadata.generateName"), + }, { + name: "invalid revision name - dots", + revName: "foo.bar", + expectErr: apis.ErrInvalidValue("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "metadata.name"), + }, { + name: "invalid name (not prefixed)", + objectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + revName: "foo", + expectErr: apis.ErrInvalidValue(`"foo" must have prefix "bar-"`, + "metadata.name"), + }, { + name: "invalid name (with generateName)", + objectMeta: metav1.ObjectMeta{ + GenerateName: "foo-bar-", + }, + revName: "foo-bar-foo", + expectErr: apis.ErrDisallowedFields("metadata.name"), + }, { + name: "valid name", + objectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + revName: "valid-name", + expectErr: (*apis.FieldError)(nil), + }} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinParent(ctx, c.objectMeta) + if err := ValidateRevisionName(ctx, c.revName, c.revGenerateName); !reflect.DeepEqual(c.expectErr, err) { + t.Errorf("Expected: '%#v', Got: '%#v'", c.expectErr, err) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/register.go b/test/vendor/knative.dev/serving/pkg/apis/serving/register.go similarity index 86% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/register.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/register.go index c11e02f890..a73b45e0c5 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/register.go @@ -17,8 +17,12 @@ limitations under the License. package serving const ( + // GroupName is the group name for knative labels and annotations GroupName = "serving.knative.dev" + // GroupNamePrefix is the prefix for label key and annotation key + GroupNamePrefix = GroupName + "/" + // ConfigurationLabelKey is the label key attached to a Revision indicating by // which Configuration it is created. ConfigurationLabelKey = GroupName + "/configuration" @@ -29,11 +33,13 @@ const ( // RouteLabelKey is the label key attached to a Configuration indicating by // which Route it is configured as traffic target. - // The key can also be attached to ClusterIngress resources to indicate + // The key can also be attached to Ingress resources to indicate // which Route triggered their creation. + // The key is also attached to k8s Service resources to indicate which Route + // triggered their creation. RouteLabelKey = GroupName + "/route" - // RouteNamespaceLabelKey is the label key attached to a ClusterIngress + // RouteNamespaceLabelKey is the label key attached to a Ingress // by a Route to indicate which namespace the Route was created in. RouteNamespaceLabelKey = GroupName + "/routeNamespace" diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go new file mode 100644 index 0000000000..05d012ce55 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertUp implements apis.Convertible +func (source *Configuration) ConvertUp(ctx context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertDown implements apis.Convertible +func (sink *Configuration) ConvertDown(ctx context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion_test.go new file mode 100644 index 0000000000..054cf5a73e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" +) + +func TestConfigurationConversionBadType(t *testing.T) { + good, bad := &Configuration{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults.go similarity index 76% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults.go index 1583abe03e..6e2527693b 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults.go @@ -14,18 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" ) // SetDefaults implements apis.Defaultable func (c *Configuration) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, c.ObjectMeta) c.Spec.SetDefaults(apis.WithinSpec(ctx)) + if c.GetOwnerReferences() == nil { + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Configuration).Spec, c.Spec, c) + } else { + serving.SetUserInfo(ctx, nil, c.Spec, c) + } + } } // SetDefaults implements apis.Defaultable diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults_test.go new file mode 100644 index 0000000000..e8e713894a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_defaults_test.go @@ -0,0 +1,247 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" +) + +func TestConfigurationDefaulting(t *testing.T) { + tests := []struct { + name string + in *Configuration + want *Configuration + }{{ + name: "empty", + in: &Configuration{}, + want: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + }, { + name: "run latest", + in: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + want: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + }, { + name: "run latest with some default overrides", + in: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + want: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(60), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + got.SetDefaults(context.Background()) + if !cmp.Equal(got, test.want, ignoreUnexportedResources) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got, ignoreUnexportedResources)) + } + }) + } +} + +func TestConfigurationUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + withUserAnns := func(u1, u2 string, s *Configuration) *Configuration { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + tests := []struct { + name string + user string + this *Configuration + prev *Configuration + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Configuration{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Configuration{}, + prev: &Configuration{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Configuration{}), + prev: withUserAnns(u1, u1, &Configuration{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }, + prev: &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }), + prev: withUserAnns(u1, u2, &Configuration{ + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle.go index 6efb612d30..93057dacf5 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) var configurationCondSet = apis.NewLivingConditionSet() diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle_test.go new file mode 100644 index 0000000000..f897a6ae3f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_lifecycle_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestConfigurationDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Configuration{}, test.t) + if err != nil { + t.Errorf("VerifyType(Configuration, %T) = %v", test.t, err) + } + }) + } +} + +func TestConfigurationGetGroupVersionKind(t *testing.T) { + r := &Configuration{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1", + Kind: "Configuration", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestConfigurationIsReady(t *testing.T) { + tests := []struct { + name string + cs *ConfigurationStatus + expected bool + }{{ + name: "Ready undefined", + cs: &ConfigurationStatus{}, + expected: false, + }, { + name: "Ready=False", + cs: &ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + expected: false, + }, { + name: "Ready=Unknown", + cs: &ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + expected: false, + }, { + name: "Ready=True", + cs: &ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + expected: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ready := test.cs.IsReady() + if ready != test.expected { + t.Errorf("IsReady() = %t; expected %t", ready, test.expected) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_types.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_types.go index b33fe00189..dc18e1829d 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_types.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -86,7 +86,7 @@ type ConfigurationStatusFields struct { // ConfigurationStatus communicates the observed state of the Configuration (from the controller). type ConfigurationStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` ConfigurationStatusFields `json:",inline"` } diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation.go similarity index 55% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation.go index ccadcda9be..da69563049 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation.go @@ -14,13 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" + "strings" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/route/config" ) // Validate makes sure that Configuration is properly configured. @@ -30,7 +33,8 @@ func (c *Configuration) Validate(ctx context.Context) (errs *apis.FieldError) { // have changed (i.e. due to config-defaults changes), we elide the metadata and // spec validation. if !apis.IsInStatusUpdate(ctx) { - errs = errs.Also(serving.ValidateObjectMetadata(c.GetObjectMeta()).ViaField("metadata")) + errs = errs.Also(serving.ValidateObjectMetadata(c.GetObjectMeta()).Also( + c.validateLabels().ViaField("labels")).ViaField("metadata")) ctx = apis.WithinParent(ctx, c.ObjectMeta) errs = errs.Also(c.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } @@ -39,7 +43,12 @@ func (c *Configuration) Validate(ctx context.Context) (errs *apis.FieldError) { if apis.IsInUpdate(ctx) { original := apis.GetBaseline(ctx).(*Configuration) - + // Don't validate annotations(creator and lastModifier) when configuration owned by service + // validate only when configuration created independently. + if c.OwnerReferences == nil { + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, c.Spec, original.GetAnnotations(), + c.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + } err := c.Spec.Template.VerifyNameChange(ctx, original.Spec.Template) errs = errs.Also(err.ViaField("spec.template")) } @@ -61,3 +70,29 @@ func (cs *ConfigurationStatus) Validate(ctx context.Context) *apis.FieldError { func (csf *ConfigurationStatusFields) Validate(ctx context.Context) *apis.FieldError { return nil } + +// validateLabels function validates configuration labels +func (c *Configuration) validateLabels() (errs *apis.FieldError) { + for key, val := range c.GetLabels() { + switch { + case key == config.VisibilityLabelKey: + errs = errs.Also(validateClusterVisibilityLabel(val)) + case key == serving.RouteLabelKey: + case key == serving.ServiceLabelKey: + errs = errs.Also(verifyLabelOwnerRef(val, serving.ServiceLabelKey, "Service", c.GetOwnerReferences())) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} + +// verifyLabelOwnerRef function verifies the owner references of resource with label key has val value. +func verifyLabelOwnerRef(val, label, resource string, ownerRefs []metav1.OwnerReference) (errs *apis.FieldError) { + for _, ref := range ownerRefs { + if ref.Kind == resource && val == ref.Name { + return + } + } + return errs.Also(apis.ErrMissingField(label)) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation_test.go new file mode 100644 index 0000000000..0901201d01 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_validation_test.go @@ -0,0 +1,887 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" +) + +func TestConfigurationValidation(t *testing.T) { + tests := []struct { + name string + c *Configuration + want *apis.FieldError + }{{ + name: "valid", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "invalid container concurrency", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + ContainerConcurrency: ptr.Int64(-10), + }, + }, + }, + }, + want: apis.ErrOutOfBoundsValue( + -10, 0, config.DefaultMaxRevisionContainerConcurrency, + "spec.template.spec.containerConcurrency"), + }, { + name: "valid BYO name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "invalid name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "name or generateName is required", + Paths: []string{"metadata.name"}, + }, + }, { + name: "valid BYO name (with generateName)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "byo-name-", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.template.metadata.name"), + }, { + name: "invalid BYO name (not prefixed)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue(`"foo" must have prefix "byo-name-"`, + "spec.template.metadata.name"), + }, { + name: "invalid name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo.bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "spec.template.metadata.name"), + }, { + name: "invalid generate name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "foo.bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "spec.template.metadata.generateName"), + }, { + name: "valid generate name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "valid-generatename", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: nil, + }} + + // TODO(dangerd): PodSpec validation failures. + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.c.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestConfigurationLabelValidation(t *testing.T) { + validConfigSpec := ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + } + tests := []struct { + name string + c *Configuration + want *apis.FieldError + }{{ + name: "valid visibility name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "cluster-local", + }, + }, + Spec: validConfigSpec, + }, + want: nil, + }, { + name: "invalid visibility name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "bad-value", + }, + }, + Spec: validConfigSpec, + }, + want: apis.ErrInvalidValue("bad-value", "metadata.labels.serving.knative.dev/visibility"), + }, { + name: "valid route name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + }, + Spec: validConfigSpec, + }, + want: nil, + }, { + name: "valid knative service name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validConfigSpec, + }, + want: nil, + }, { + name: "invalid knative service name without matching owner references", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "absent-svc", + }}, + }, + Spec: validConfigSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative service name with multiple owner ref", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "NewSerice", + Name: "test-new-svc", + }, { + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validConfigSpec, + }, + }, { + name: "invalid knative service name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "absent-svc", + }, + }, + Spec: validConfigSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "Mismatch knative service label and owner ref", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "BrandNewService", + Name: "brand-new-svc", + }}, + }, + Spec: validConfigSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative label", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + "serving.knative.dev/testlabel": "value", + }, + }, + Spec: validConfigSpec, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/testlabel", "metadata.labels"), + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.c.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} +func TestImmutableConfigurationFields(t *testing.T) { + tests := []struct { + name string + new *Configuration + old *Configuration + want *apis.FieldError + }{{ + name: "without byo-name", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo name change", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo name (no change)", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "bad byo name change", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "Saw the following changes without a name change (-old +new)", + Paths: []string{"spec.template.metadata.name"}, + Details: "{*v1.RevisionTemplateSpec}.Spec.PodSpec.Containers[0].Image:\n\t-: \"helloworld:bar\"\n\t+: \"helloworld:foo\"\n", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + got := test.new.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v\nwant: %v\ngot: %v", + diff, test.want, got) + } + }) + } +} + +func TestConfigurationSubresourceUpdate(t *testing.T) { + tests := []struct { + name string + config *Configuration + subresource string + want *apis.FieldError + }{{ + name: "status update with valid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "non-status sub resource update with valid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + subresource: "foo", + want: nil, + }, { + name: "non-status sub resource update with invalid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + subresource: "foo", + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionTimeoutSeconds+1, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "spec.template.spec.timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinSubResourceUpdate(ctx, test.config, test.subresource) + got := test.config.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getConfigurationSpec(image string) ConfigurationSpec { + return ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: image, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds), + }, + }, + } +} + +func TestConfigurationAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Configuration + this *Configuration + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update creator annotation with spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:bar"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier annotation without spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier annotation with spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getConfigurationSpec("helloworld:bar"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }, { + name: "no validation for lastModifier annotation even after update without spec changes as configuration owned by service", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: serving.GroupName, + }}, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }, { + name: "no validation for creator annotation even after update as configuration owned by service", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u3, + serving.UpdaterAnnotation: u1, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: serving.GroupName, + }}, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/contexts.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts.go similarity index 96% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/contexts.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts.go index 9cc23937ad..2b1076cf33 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/contexts.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import "context" @@ -40,13 +40,13 @@ func HasDefaultConfigurationName(ctx context.Context) bool { type lemonadeKey struct{} // WithUpgradeViaDefaulting notes on the context that we want defaulting to rewrite -// from v1alpha1 to v1beta1. +// from v1alpha1 to v1. func WithUpgradeViaDefaulting(ctx context.Context) context.Context { return context.WithValue(ctx, lemonadeKey{}, struct{}{}) } // IsUpgradeViaDefaulting checks whether we should be "defaulting" from v1alpha1 to -// the v1beta1 subset. +// the v1 subset. func IsUpgradeViaDefaulting(ctx context.Context) bool { return ctx.Value(lemonadeKey{}) != nil } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts_test.go new file mode 100644 index 0000000000..3ad7f5f45c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/contexts_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" +) + +func TestContexts(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + ctx context.Context + check func(context.Context) bool + want bool + }{{ + name: "has default config name", + ctx: WithDefaultConfigurationName(ctx), + check: HasDefaultConfigurationName, + want: true, + }, { + name: "doesn't have default config name", + ctx: ctx, + check: HasDefaultConfigurationName, + want: false, + }, { + name: "are upgrading via defaulting", + ctx: WithUpgradeViaDefaulting(ctx), + check: IsUpgradeViaDefaulting, + want: true, + }, { + name: "aren't upgrading via defaulting", + ctx: ctx, + check: IsUpgradeViaDefaulting, + want: false, + }} + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := tc.check(tc.ctx) + if tc.want != got { + t.Errorf("check() = %v, wanted %v", tc.want, got) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/doc.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/doc.go new file mode 100644 index 0000000000..fab9a24e65 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains the Serving v1 API types. + +// +k8s:deepcopy-gen=package +// +groupName=serving.knative.dev +package v1 diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/register.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/register.go new file mode 100644 index 0000000000..479507d511 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "knative.dev/serving/pkg/apis/serving" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: serving.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder registers the addKnownTypes function. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Revision{}, + &RevisionList{}, + &Configuration{}, + &ConfigurationList{}, + &Route{}, + &RouteList{}, + &Service{}, + &ServiceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/register_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/register_test.go new file mode 100644 index 0000000000..f8add9b885 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/register_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" +) + +func TestRegisterHelpers(t *testing.T) { + if got, want := Kind("Revision"), "Revision.serving.knative.dev"; got.String() != want { + t.Errorf("Kind(Revision) = %v, want %v", got.String(), want) + } + + if got, want := Resource("Revision"), "Revision.serving.knative.dev"; got.String() != want { + t.Errorf("Resource(Revision) = %v, want %v", got.String(), want) + } + + if got, want := SchemeGroupVersion.String(), "serving.knative.dev/v1"; got != want { + t.Errorf("SchemeGroupVersion() = %v, want %v", got, want) + } + + scheme := runtime.NewScheme() + if err := addKnownTypes(scheme); err != nil { + t.Errorf("addKnownTypes() = %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go new file mode 100644 index 0000000000..89ed8e5a04 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertUp implements apis.Convertible +func (source *Revision) ConvertUp(ctx context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertDown implements apis.Convertible +func (sink *Revision) ConvertDown(ctx context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion_test.go new file mode 100644 index 0000000000..71e4b75d90 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" +) + +func TestRevisionConversionBadType(t *testing.T) { + good, bad := &Revision{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go new file mode 100644 index 0000000000..01df5b6262 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" +) + +// SetDefaults implements apis.Defaultable +func (r *Revision) SetDefaults(ctx context.Context) { + r.Spec.SetDefaults(apis.WithinSpec(ctx)) +} + +// SetDefaults implements apis.Defaultable +func (rts *RevisionTemplateSpec) SetDefaults(ctx context.Context) { + rts.Spec.SetDefaults(apis.WithinSpec(ctx)) +} + +// SetDefaults implements apis.Defaultable +func (rs *RevisionSpec) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) + + // Default TimeoutSeconds based on our configmap + if rs.TimeoutSeconds == nil || *rs.TimeoutSeconds == 0 { + rs.TimeoutSeconds = ptr.Int64(cfg.Defaults.RevisionTimeoutSeconds) + } + + // Default ContainerConcurrency based on our configmap + if rs.ContainerConcurrency == nil { + rs.ContainerConcurrency = ptr.Int64(cfg.Defaults.ContainerConcurrency) + } + + for idx := range rs.PodSpec.Containers { + if rs.PodSpec.Containers[idx].Name == "" { + rs.PodSpec.Containers[idx].Name = cfg.Defaults.UserContainerName(ctx) + } + + if rs.PodSpec.Containers[idx].Resources.Requests == nil { + rs.PodSpec.Containers[idx].Resources.Requests = corev1.ResourceList{} + } + if _, ok := rs.PodSpec.Containers[idx].Resources.Requests[corev1.ResourceCPU]; !ok { + if rsrc := cfg.Defaults.RevisionCPURequest; rsrc != nil { + rs.PodSpec.Containers[idx].Resources.Requests[corev1.ResourceCPU] = *rsrc + } + } + if _, ok := rs.PodSpec.Containers[idx].Resources.Requests[corev1.ResourceMemory]; !ok { + if rsrc := cfg.Defaults.RevisionMemoryRequest; rsrc != nil { + rs.PodSpec.Containers[idx].Resources.Requests[corev1.ResourceMemory] = *rsrc + } + } + + if rs.PodSpec.Containers[idx].Resources.Limits == nil { + rs.PodSpec.Containers[idx].Resources.Limits = corev1.ResourceList{} + } + if _, ok := rs.PodSpec.Containers[idx].Resources.Limits[corev1.ResourceCPU]; !ok { + if rsrc := cfg.Defaults.RevisionCPULimit; rsrc != nil { + rs.PodSpec.Containers[idx].Resources.Limits[corev1.ResourceCPU] = *rsrc + } + } + if _, ok := rs.PodSpec.Containers[idx].Resources.Limits[corev1.ResourceMemory]; !ok { + if rsrc := cfg.Defaults.RevisionMemoryLimit; rsrc != nil { + rs.PodSpec.Containers[idx].Resources.Limits[corev1.ResourceMemory] = *rsrc + } + } + if rs.PodSpec.Containers[idx].ReadinessProbe == nil { + rs.PodSpec.Containers[idx].ReadinessProbe = &corev1.Probe{} + } + if rs.PodSpec.Containers[idx].ReadinessProbe.TCPSocket == nil && + rs.PodSpec.Containers[idx].ReadinessProbe.HTTPGet == nil && + rs.PodSpec.Containers[idx].ReadinessProbe.Exec == nil { + rs.PodSpec.Containers[idx].ReadinessProbe.TCPSocket = &corev1.TCPSocketAction{} + } + + if rs.PodSpec.Containers[idx].ReadinessProbe.SuccessThreshold == 0 { + rs.PodSpec.Containers[idx].ReadinessProbe.SuccessThreshold = 1 + } + + vms := rs.PodSpec.Containers[idx].VolumeMounts + for i := range vms { + vms[i].ReadOnly = true + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults_test.go new file mode 100644 index 0000000000..74a3af95e3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_defaults_test.go @@ -0,0 +1,342 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/config" +) + +var ( + defaultResources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + Limits: corev1.ResourceList{}, + } + defaultProbe = &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + } + ignoreUnexportedResources = cmpopts.IgnoreUnexported(resource.Quantity{}) +) + +func TestRevisionDefaulting(t *testing.T) { + logger := logtesting.TestLogger(t) + tests := []struct { + name string + in *Revision + want *Revision + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Revision{}, + want: &Revision{Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }}, + }, { + name: "with context", + in: &Revision{Spec: RevisionSpec{PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}}}, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logger) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "123", + }, + }) + + return s.ToContext(ctx) + }, + want: &Revision{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(123), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "readonly volumes", + in: &Revision{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + }}, + }}, + }, + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + ReadOnly: true, + }}, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, { + name: "timeout sets to default when 0 is specified", + in: &Revision{Spec: RevisionSpec{PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}, TimeoutSeconds: ptr.Int64(0)}}, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logger) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "456", + }, + }) + + return s.ToContext(ctx) + }, + want: &Revision{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(456), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "no overwrite", + in: &Revision{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.2", + }, + }, + }, + }}, + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Resources: defaultResources, + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.2", + }, + }, + }, + }}, + }, + }, + }, + }, { + name: "no overwrite exec", + in: &Revision{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"echo", "hi"}, + }, + }, + }, + }}, + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"echo", "hi"}, + }, + }, + }, + }}, + }, + }, + }, + }, { + name: "partially initialized", + in: &Revision{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "with resources from context", + in: &Revision{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}, + }, + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logger) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-cpu-request": "100m", + "revision-memory-request": "200M", + "revision-cpu-limit": "300m", + "revision-memory-limit": "400M", + }, + }) + + return s.ToContext(ctx) + }, + want: &Revision{ + Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200M"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("400M"), + }, + }, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "multiple containers", + in: &Revision{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "busybox", + }, { + Name: "helloworld", + }}, + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, { + Name: "helloworld", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if !cmp.Equal(test.want, got, ignoreUnexportedResources) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got, ignoreUnexportedResources)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle.go new file mode 100644 index 0000000000..a178edf353 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/config" +) + +const ( + // DefaultUserPort is the system default port value exposed on the user-container. + DefaultUserPort = 8080 +) + +var revisionCondSet = apis.NewLivingConditionSet() + +// GetGroupVersionKind returns the GroupVersionKind. +func (r *Revision) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("Revision") +} + +// IsReady returns if the revision is ready to serve the requested configuration. +func (rs *RevisionStatus) IsReady() bool { + return revisionCondSet.Manage(rs).IsHappy() +} + +// GetContainerConcurrency returns the container concurrency. If +// container concurrency is not set, the default value will be returned. +// We use the original default (0) here for backwards compatibility. +// Previous versions of Knative equated unspecified and zero, so to avoid +// changing the value used by Revisions with unspecified values when a different +// default is configured, we use the original default instead of the configured +// default to remain safe across upgrades. +func (rs *RevisionSpec) GetContainerConcurrency() int64 { + if rs.ContainerConcurrency == nil { + return config.DefaultContainerConcurrency + } + return *rs.ContainerConcurrency +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle_test.go new file mode 100644 index 0000000000..598a4a97d7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_lifecycle_test.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" +) + +func TestRevisionDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Revision{}, test.t) + if err != nil { + t.Errorf("VerifyType(Revision, %T) = %v", test.t, err) + } + }) + } +} + +func TestRevisionGetGroupVersionKind(t *testing.T) { + r := &Revision{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1", + Kind: "Revision", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestGetContainerConcurrency(t *testing.T) { + tests := []struct { + name string + rs *RevisionSpec + expected int64 + }{{ + name: "nil concurrency", + rs: &RevisionSpec{}, + expected: config.DefaultContainerConcurrency, + }, { + name: "concurrency 42", + rs: &RevisionSpec{ContainerConcurrency: ptr.Int64(42)}, + expected: 42, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cc := test.rs.GetContainerConcurrency() + if cc != test.expected { + t.Errorf("GetContainerConcurrency() = %d, expected:%d", cc, test.expected) + } + }) + } + +} + +func TestRevisionIsReady(t *testing.T) { + tests := []struct { + name string + rs *RevisionStatus + expected bool + }{{ + name: "Ready undefined", + rs: &RevisionStatus{}, + expected: false, + }, { + name: "Ready=False", + rs: &RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + expected: false, + }, { + name: "Ready=Unknown", + rs: &RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + expected: false, + }, { + name: "Ready=True", + rs: &RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + expected: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ready := test.rs.IsReady() + if ready != test.expected { + t.Errorf("IsReady() = %t; expected %t", ready, test.expected) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go similarity index 86% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go index a6ddfe0fc7..d4e5deba91 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_types.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -67,25 +67,16 @@ type RevisionTemplateSpec struct { Spec RevisionSpec `json:"spec,omitempty"` } -// RevisionContainerConcurrencyType is an integer expressing the maximum number of -// in-flight (concurrent) requests. -type RevisionContainerConcurrencyType int64 - -const ( - // RevisionContainerConcurrencyMax is the maximum configurable - // container concurrency. - RevisionContainerConcurrencyMax RevisionContainerConcurrencyType = 1000 -) - // RevisionSpec holds the desired state of the Revision (from the client). type RevisionSpec struct { corev1.PodSpec `json:",inline"` // ContainerConcurrency specifies the maximum allowed in-flight (concurrent) // requests per container of the Revision. Defaults to `0` which means - // unlimited concurrency. + // concurrency to the application is not limited, and the system decides the + // target concurrency for the autoscaler. // +optional - ContainerConcurrency RevisionContainerConcurrencyType `json:"containerConcurrency,omitempty"` + ContainerConcurrency *int64 `json:"containerConcurrency,omitempty"` // TimeoutSeconds holds the max duration the instance is allowed for // responding to a request. If unspecified, a system default will @@ -102,7 +93,7 @@ const ( // RevisionStatus communicates the observed state of the Revision (from the controller). type RevisionStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` // ServiceName holds the name of a core Kubernetes Service resource that // load balances over the pods backing this Revision. diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go similarity index 65% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go index 75fd3ff821..d83b3760cb 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go @@ -14,23 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" - "fmt" "strings" - "github.com/knative/serving/pkg/apis/config" - - "github.com/knative/pkg/apis" - "github.com/knative/pkg/kmp" - "github.com/knative/serving/pkg/apis/serving" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmp" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/serving" ) // Validate ensures Revision is properly configured. func (r *Revision) Validate(ctx context.Context) *apis.FieldError { - errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).ViaField("metadata") + errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).Also( + r.ValidateLabels().ViaField("labels")).ViaField("metadata") errs = errs.Also(r.Status.Validate(apis.WithinStatus(ctx)).ViaField("status")) if apis.IsInUpdate(ctx) { @@ -58,24 +57,12 @@ func (r *Revision) Validate(ctx context.Context) *apis.FieldError { // Validate implements apis.Validatable func (rts *RevisionTemplateSpec) Validate(ctx context.Context) *apis.FieldError { errs := rts.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec") + errs = errs.Also(autoscaling.ValidateAnnotations(rts.GetAnnotations()).ViaField("metadata.annotations")) // If the RevisionTemplateSpec has a name specified, then check that // it follows the requirements on the name. - if rts.Name != "" { - var prefix string - if om := apis.ParentMeta(ctx); om.Name == "" { - prefix = om.GenerateName - } else { - prefix = om.Name + "-" - } - - if !strings.HasPrefix(rts.Name, prefix) { - errs = errs.Also(apis.ErrInvalidValue( - fmt.Sprintf("%q must have prefix %q", rts.Name, prefix), - "metadata.name")) - } - } - + errs = errs.Also(serving.ValidateRevisionName(ctx, rts.Name, rts.GenerateName)) + errs = errs.Also(serving.ValidateQueueSidecarAnnotation(rts.Annotations).ViaField("metadata.annotations")) return errs } @@ -100,7 +87,7 @@ func (current *RevisionTemplateSpec) VerifyNameChange(ctx context.Context, og Re } else if diff != "" { return &apis.FieldError{ Message: "Saw the following changes without a name change (-old +new)", - Paths: []string{apis.CurrentField}, + Paths: []string{"metadata.name"}, Details: diff, } } @@ -109,32 +96,34 @@ func (current *RevisionTemplateSpec) VerifyNameChange(ctx context.Context, og Re // Validate implements apis.Validatable func (rs *RevisionSpec) Validate(ctx context.Context) *apis.FieldError { - err := rs.ContainerConcurrency.Validate(ctx).ViaField("containerConcurrency") - - err = err.Also(serving.ValidatePodSpec(rs.PodSpec)) + errs := serving.ValidatePodSpec(rs.PodSpec) if rs.TimeoutSeconds != nil { - ts := *rs.TimeoutSeconds - cfg := config.FromContextOrDefaults(ctx) - if ts < 0 || ts > cfg.Defaults.MaxRevisionTimeoutSeconds { - err = err.Also(apis.ErrOutOfBoundsValue( - ts, 0, cfg.Defaults.MaxRevisionTimeoutSeconds, "timeoutSeconds")) - } + errs = errs.Also(serving.ValidateTimeoutSeconds(ctx, *rs.TimeoutSeconds)) } - return err -} - -// Validate implements apis.Validatable. -func (cc RevisionContainerConcurrencyType) Validate(ctx context.Context) *apis.FieldError { - if cc < 0 || cc > RevisionContainerConcurrencyMax { - return apis.ErrOutOfBoundsValue( - cc, 0, RevisionContainerConcurrencyMax, apis.CurrentField) + if rs.ContainerConcurrency != nil { + errs = errs.Also(serving.ValidateContainerConcurrency(rs.ContainerConcurrency).ViaField("containerConcurrency")) } - return nil + + return errs } // Validate implements apis.Validatable func (rs *RevisionStatus) Validate(ctx context.Context) *apis.FieldError { return nil } + +// ValidateLabels function validates service labels +func (r *Revision) ValidateLabels() (errs *apis.FieldError) { + for key, val := range r.GetLabels() { + switch { + case key == serving.RouteLabelKey || key == serving.ServiceLabelKey || key == serving.ConfigurationGenerationLabelKey: + case key == serving.ConfigurationLabelKey: + errs = errs.Also(verifyLabelOwnerRef(val, serving.ConfigurationLabelKey, "Configuration", r.GetOwnerReferences())) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, "")) + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation_test.go new file mode 100644 index 0000000000..21aa7e2d1b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation_test.go @@ -0,0 +1,906 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "knative.dev/pkg/apis" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestRevisionValidation(t *testing.T) { + tests := []struct { + name string + r *Revision + want *apis.FieldError + }{{ + name: "valid", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid container concurrency", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + ContainerConcurrency: ptr.Int64(-10), + }, + }, + want: apis.ErrOutOfBoundsValue( + -10, 0, config.DefaultMaxRevisionContainerConcurrency, + "spec.containerConcurrency"), + }} + + // TODO(dangerd): PodSpec validation failures. + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestRevisionLabelAnnotationValidation(t *testing.T) { + validRevisionSpec := RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + } + tests := []struct { + name string + r *Revision + want *apis.FieldError + }{{ + name: "valid route name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "valid knative service name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "valid knative service name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationGenerationLabelKey: "1234", + }, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "invalid knative configuration name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "absent-cfg", + }, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/configuration"), + }, { + name: "valid knative configuration name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-cfg", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: "test-cfg", + }}, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "invalid knative configuration name without owner ref", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: "diff-cfg", + }}, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/configuration"), + }, { + name: "invalid knative configuration name with multiple owner ref", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-cfg", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "NewConfiguration", + Name: "test-new-cfg", + }, { + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: "test-cfg", + }}, + }, + Spec: validRevisionSpec, + }, + }, { + name: "Mismatch knative configuration label and owner ref", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-cfg", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "BrandNewService", + Name: "brand-new-svc", + }}, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/configuration"), + }, { + name: "invalid knative label", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + "serving.knative.dev/testlabel": "value", + }, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/testlabel", "metadata.labels"), + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %s", cmp.Diff(want, got)) + } + }) + } +} + +func TestContainerConcurrencyValidation(t *testing.T) { + tests := []struct { + name string + cc int64 + want *apis.FieldError + }{{ + name: "single", + cc: 1, + want: nil, + }, { + name: "unlimited", + cc: 0, + want: nil, + }, { + name: "ten", + cc: 10, + want: nil, + }, { + name: "invalid container concurrency (too small)", + cc: -1, + want: apis.ErrOutOfBoundsValue(-1, 0, config.DefaultMaxRevisionContainerConcurrency, + apis.CurrentField), + }, { + name: "invalid container concurrency (too large)", + cc: config.DefaultMaxRevisionContainerConcurrency + 1, + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionContainerConcurrency+1, + 0, config.DefaultMaxRevisionContainerConcurrency, apis.CurrentField), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := serving.ValidateContainerConcurrency(&test.cc) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %v", cmp.Diff(want, got)) + } + }) + } +} + +func TestRevisionSpecValidation(t *testing.T) { + tests := []struct { + name string + rs *RevisionSpec + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "valid", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + want: nil, + }, { + name: "with volume (ok)", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + }, + want: nil, + }, { + name: "with volume name collision", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, { + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{}, + }, + }}, + }, + }, + want: (&apis.FieldError{ + Message: fmt.Sprintf(`duplicate volume name "the-name"`), + Paths: []string{"name"}, + }).ViaFieldIndex("volumes", 1), + }, { + name: "bad pod spec", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "steve", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }}, + }, + }, + want: apis.ErrDisallowedFields("containers[0].lifecycle"), + }, { + name: "missing container", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{}, + }, + }, + want: apis.ErrMissingField("containers"), + }, { + name: "too many containers", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }, { + Image: "helloworld", + }}, + }, + }, + want: apis.ErrMultipleOneOf("containers"), + }, { + name: "exceed max timeout", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(6000), + }, + want: apis.ErrOutOfBoundsValue( + 6000, 0, config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }, { + name: "exceed custom max timeout", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(100), + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "25", + "max-revision-timeout-seconds": "50"}, + }) + return s.ToContext(ctx) + }, + want: apis.ErrOutOfBoundsValue(100, 0, 50, "timeoutSeconds"), + }, { + name: "negative timeout", + rs: &RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(-30), + }, + want: apis.ErrOutOfBoundsValue( + -30, 0, config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.rs.Validate(ctx) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %v", cmp.Diff(want, got)) + } + }) + } +} + +func TestImmutableFields(t *testing.T) { + tests := []struct { + name string + new *Revision + old *Revision + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "good (no change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + // Test the case where max-revision-timeout is changed to a value + // that is less than an existing revision's timeout value. + // Existing revision should keep operating normally. + name: "good (max revision timeout change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(100), + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(100), + }, + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "25", + "max-revision-timeout-seconds": "50"}, + }) + return s.ToContext(ctx) + }, + want: nil, + }, { + name: "bad (resources image change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("50m"), + }, + }, + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("100m"), + }, + }, + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.Containers[0].Resources.Requests["cpu"]: + -: resource.Quantity: "{i:{value:100 scale:-3} d:{Dec:} s:100m Format:DecimalSI}" + +: resource.Quantity: "{i:{value:50 scale:-3} d:{Dec:} s:50m Format:DecimalSI}" +`, + }, + }, { + name: "bad (container image change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.Containers[0].Image: + -: "busybox" + +: "helloworld" +`, + }, + }, { + name: "bad (concurrency model change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + ContainerConcurrency: ptr.Int64(1), + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + ContainerConcurrency: ptr.Int64(2), + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `*{v1.RevisionSpec}.ContainerConcurrency: + -: "2" + +: "1" +`, + }, + }, { + name: "bad (new field added)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + ServiceAccountName: "foobar", + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.ServiceAccountName: + -: "" + +: "foobar" +`, + }, + }, { + name: "bad (multiple changes)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "foobar", + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.Containers[0].Image: + -: "busybox" + +: "helloworld" +{v1.RevisionSpec}.PodSpec.ServiceAccountName: + -: "" + +: "foobar" +`, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithinUpdate(context.Background(), test.old) + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.new.Validate(ctx) + if got, want := got.Error(), test.want.Error(); got != want { + t.Errorf("Validate got: %s, want: %s, diff:(-want, +got)=\n%v", got, want, cmp.Diff(got, want)) + } + }) + } +} + +func TestRevisionTemplateSpecValidation(t *testing.T) { + tests := []struct { + name string + rts *RevisionTemplateSpec + want *apis.FieldError + }{{ + name: "valid", + rts: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + name: "empty spec", + rts: &RevisionTemplateSpec{}, + want: apis.ErrMissingField("spec.containers"), + }, { + name: "nested spec error", + rts: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "kevin", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.containers[0].lifecycle"), + }, { + name: "has revision template name", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision name. + Name: "parent-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + name: "valid name for revision template", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // When user provides empty string in the name field it will behave like no name provided. + Name: "", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid name for revision template", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision name. + Name: "parent-@foo-bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "metadata.name"), + }, { + name: "invalid generate name for revision template", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision generate name. + GenerateName: "parent-@foo-bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "metadata.generateName"), + }, { + name: "invalid metadata.annotations for scale", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.MinScaleAnnotationKey: "5", + autoscaling.MaxScaleAnnotationKey: "", + }, + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: "expected 1 <= <= 2147483647", + Paths: []string{autoscaling.MaxScaleAnnotationKey}, + }).ViaField("annotations").ViaField("metadata"), + }, { + name: "Queue sidecar resource percentage annotation more than 100", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "200", + }, + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: "expected 0.1 <= 200 <= 100", + Paths: []string{serving.QueueSideCarResourcePercentageAnnotation}, + }).ViaField("metadata.annotations"), + }, { + name: "Invalid queue sidecar resource percentage annotation", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "50mx", + }, + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: "invalid value: 50mx", + Paths: []string{fmt.Sprintf("[%s]", serving.QueueSideCarResourcePercentageAnnotation)}, + }).ViaField("metadata.annotations"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithinParent(context.Background(), metav1.ObjectMeta{ + Name: "parent", + }) + + got := test.rts.Validate(ctx) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %v", cmp.Diff(want, got)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go new file mode 100644 index 0000000000..7ef3cb1ae5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertUp implements apis.Convertible +func (source *Route) ConvertUp(ctx context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertDown implements apis.Convertible +func (sink *Route) ConvertDown(ctx context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion_test.go new file mode 100644 index 0000000000..c5e98dfe31 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" +) + +func TestRouteConversionBadType(t *testing.T) { + good, bad := &Route{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults.go similarity index 75% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults.go index fd1627f769..55bcc825f5 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults.go @@ -14,25 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/ptr" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" ) // SetDefaults implements apis.Defaultable func (r *Route) SetDefaults(ctx context.Context) { r.Spec.SetDefaults(apis.WithinSpec(ctx)) + if r.GetOwnerReferences() == nil { + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Route).Spec, r.Spec, r) + } else { + serving.SetUserInfo(ctx, nil, r.Spec, r) + } + } } // SetDefaults implements apis.Defaultable func (rs *RouteSpec) SetDefaults(ctx context.Context) { if len(rs.Traffic) == 0 && HasDefaultConfigurationName(ctx) { rs.Traffic = []TrafficTarget{{ - Percent: 100, + Percent: ptr.Int64(100), LatestRevision: ptr.Bool(true), }} } @@ -45,7 +53,6 @@ func (rs *RouteSpec) SetDefaults(ctx context.Context) { // SetDefaults implements apis.Defaultable func (tt *TrafficTarget) SetDefaults(ctx context.Context) { if tt.LatestRevision == nil { - sense := (tt.RevisionName == "") - tt.LatestRevision = &sense + tt.LatestRevision = ptr.Bool(tt.RevisionName == "") } } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults_test.go new file mode 100644 index 0000000000..7773c305ea --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_defaults_test.go @@ -0,0 +1,262 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/serving" +) + +func TestRouteDefaulting(t *testing.T) { + tests := []struct { + name string + in *Route + want *Route + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Route{}, + want: &Route{}, + }, { + name: "empty w/ default configuration", + in: &Route{}, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + wc: WithDefaultConfigurationName, + }, { + // Make sure it keeps a 'nil' as a 'nil' and not 'zero' + name: "implied zero percent", + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + Percent: nil, + LatestRevision: ptr.Bool(false), + }}, + }, + }, + wc: WithDefaultConfigurationName, + }, { + // Just to make sure it doesn't convert a 'zero' into a 'nil' + name: "explicit zero percent", + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + Percent: ptr.Int64(0), + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + Percent: ptr.Int64(0), + LatestRevision: ptr.Bool(false), + }}, + }, + }, + wc: WithDefaultConfigurationName, + }, { + name: "latest revision defaulting", + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(12), + }, { + RevisionName: "bar", + Percent: ptr.Int64(34), + }, { + ConfigurationName: "baz", + Percent: ptr.Int64(54), + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(12), + LatestRevision: ptr.Bool(false), + }, { + RevisionName: "bar", + Percent: ptr.Int64(34), + LatestRevision: ptr.Bool(false), + }, { + ConfigurationName: "baz", + Percent: ptr.Int64(54), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if !cmp.Equal(test.want, got) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got)) + } + }) + } +} + +func TestRouteUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + withUserAnns := func(u1, u2 string, s *Route) *Route { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + tests := []struct { + name string + user string + this *Route + prev *Route + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Route{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Route{}, + prev: &Route{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Route{}), + prev: withUserAnns(u1, u1, &Route{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + ConfigurationName: "new", + }}, + }, + }, + prev: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + ConfigurationName: "old", + }}, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + ConfigurationName: "new", + }}, + }, + }), + prev: withUserAnns(u1, u2, &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + ConfigurationName: "old", + }}, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle.go index 97229ccd99..7b4094b12f 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) var routeCondSet = apis.NewLivingConditionSet() diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle_test.go new file mode 100644 index 0000000000..356cdcdf27 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_lifecycle_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestRouteDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Route{}, test.t) + if err != nil { + t.Errorf("VerifyType(Route, %T) = %v", test.t, err) + } + }) + } +} + +func TestRouteGetGroupVersionKind(t *testing.T) { + r := &Route{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1", + Kind: "Route", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestRouteIsReady(t *testing.T) { + tests := []struct { + name string + rs *RouteStatus + expected bool + }{{ + name: "Ready undefined", + rs: &RouteStatus{}, + expected: false, + }, { + name: "Ready=False", + rs: &RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + expected: false, + }, { + name: "Ready=Unknown", + rs: &RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + expected: false, + }, { + name: "Ready=True", + rs: &RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + expected: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ready := test.rs.IsReady() + if ready != test.expected { + t.Errorf("IsReady() = %t; expected %t", ready, test.expected) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_types.go similarity index 88% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_types.go index f1e4e9076c..6a8eed1bb0 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_types.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -65,7 +65,6 @@ type TrafficTarget struct { // Tag is optionally used to expose a dedicated url for referencing // this target exclusively. // +optional - // TODO(mattmoor): Discuss alternative naming options. Tag string `json:"tag,omitempty"` // RevisionName of a specific revision to which to send this portion of @@ -89,10 +88,16 @@ type TrafficTarget struct { // +optional LatestRevision *bool `json:"latestRevision,omitempty"` - // Percent specifies percent of the traffic to this Revision or Configuration. - // This defaults to zero if unspecified. + // Percent indicates that percentage based routing should be used and + // the value indicates the percent of traffic that is be routed to this + // Revision or Configuration. `0` (zero) mean no traffic, `100` means all + // traffic. + // When percentage based routing is being used the follow rules apply: + // - the sum of all percent values must equal 100 + // - when not specified, the implied value for `percent` is zero for + // that particular Revision or Configuration // +optional - Percent int `json:"percent"` + Percent *int64 `json:"percent,omitempty"` // URL displays the URL for accessing named traffic targets. URL is displayed in // status, and is disallowed on spec. URL must contain a scheme (e.g. http://) and @@ -126,7 +131,7 @@ type RouteStatusFields struct { // Address holds the information needed for a Route to be the target of an event. // +optional - Address *duckv1beta1.Addressable `json:"address,omitempty"` + Address *duckv1.Addressable `json:"address,omitempty"` // Traffic holds the configured traffic distribution. // These entries will always contain RevisionName references. @@ -138,7 +143,7 @@ type RouteStatusFields struct { // RouteStatus communicates the observed state of the Route (from the controller). type RouteStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` RouteStatusFields `json:",inline"` } diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation.go similarity index 66% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation.go index 48e062afed..ec7843dac7 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation.go @@ -14,22 +14,35 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" "fmt" + "strings" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/route/config" ) // Validate makes sure that Route is properly configured. func (r *Route) Validate(ctx context.Context) *apis.FieldError { - errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).ViaField("metadata") + errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).Also( + r.validateLabels().ViaField("labels")).ViaField("metadata") errs = errs.Also(r.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) errs = errs.Also(r.Status.Validate(apis.WithinStatus(ctx)).ViaField("status")) + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Route) + // Don't validate annotations(creator and lastModifier) when route owned by service + // validate only when route created independently. + if r.OwnerReferences == nil { + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, r.Spec, original.GetAnnotations(), + r.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + } + } return errs } @@ -39,10 +52,18 @@ func validateTrafficList(ctx context.Context, traffic []TrafficTarget) *apis.Fie // Track the targets of named TrafficTarget entries (to detect duplicates). trafficMap := make(map[string]int) - sum := 0 + sum := int64(0) for i, tt := range traffic { errs = errs.Also(tt.Validate(ctx).ViaIndex(i)) + if tt.Percent != nil { + sum += *tt.Percent + } + + if tt.Tag == "" { + continue + } + if idx, ok := trafficMap[tt.Tag]; ok { // We want only single definition of the route, even if it points // to the same config or revision. @@ -56,7 +77,6 @@ func validateTrafficList(ctx context.Context, traffic []TrafficTarget) *apis.Fie } else { trafficMap[tt.Tag] = i } - sum += tt.Percent } if sum != 100 { @@ -75,20 +95,15 @@ func (rs *RouteSpec) Validate(ctx context.Context) *apis.FieldError { // Validate verifies that TrafficTarget is properly configured. func (tt *TrafficTarget) Validate(ctx context.Context) *apis.FieldError { - var errs *apis.FieldError + errs := tt.validateLatestRevision(ctx) + errs = tt.validateRevisionAndConfiguration(ctx, errs) + errs = tt.validateTrafficPercentage(errs) + return tt.validateURL(ctx, errs) +} +func (tt *TrafficTarget) validateRevisionAndConfiguration(ctx context.Context, errs *apis.FieldError) *apis.FieldError { // We only validate the sense of latestRevision in the context of a Spec, // and only when it is specified. - if apis.IsInSpec(ctx) && tt.LatestRevision != nil { - lr := *tt.LatestRevision - pinned := tt.RevisionName != "" - if pinned == lr { - // The senses for whether to pin to a particular revision or - // float forward to the latest revision must match. - errs = errs.Also(apis.ErrInvalidValue(lr, "latestRevision")) - } - } - switch { // When we have a default configurationName, we don't // allow one to be specified. @@ -127,13 +142,32 @@ func (tt *TrafficTarget) Validate(ctx context.Context) *apis.FieldError { errs = errs.Also(apis.ErrMissingOneOf( "revisionName", "configurationName")) } + return errs +} +func (tt *TrafficTarget) validateTrafficPercentage(errs *apis.FieldError) *apis.FieldError { // Check that the traffic Percentage is within bounds. - if tt.Percent < 0 || tt.Percent > 100 { + if tt.Percent != nil && (*tt.Percent < 0 || *tt.Percent > 100) { errs = errs.Also(apis.ErrOutOfBoundsValue( - tt.Percent, 0, 100, "percent")) + *tt.Percent, 0, 100, "percent")) + } + return errs +} + +func (tt *TrafficTarget) validateLatestRevision(ctx context.Context) *apis.FieldError { + if apis.IsInSpec(ctx) && tt.LatestRevision != nil { + lr := *tt.LatestRevision + pinned := tt.RevisionName != "" + if pinned == lr { + // The senses for whether to pin to a particular revision or + // float forward to the latest revision must match. + return apis.ErrGeneric(fmt.Sprintf("may not set revisionName %q when latestRevision is %t", tt.RevisionName, lr), "latestRevision") + } } + return nil +} +func (tt *TrafficTarget) validateURL(ctx context.Context, errs *apis.FieldError) *apis.FieldError { // Check that we set the URL appropriately. if tt.URL.String() != "" { // URL is not allowed in traffic under spec. @@ -151,16 +185,15 @@ func (tt *TrafficTarget) Validate(ctx context.Context) *apis.FieldError { errs = errs.Also(apis.ErrMissingField("url")) } } - return errs } -// Validate implements apis.Validatable +// Validate implements apis.Validatable. func (rs *RouteStatus) Validate(ctx context.Context) *apis.FieldError { return rs.RouteStatusFields.Validate(ctx) } -// Validate implements apis.Validatable +// Validate implements apis.Validatable. func (rsf *RouteStatusFields) Validate(ctx context.Context) *apis.FieldError { // TODO(mattmoor): Validate other status fields. @@ -169,3 +202,25 @@ func (rsf *RouteStatusFields) Validate(ctx context.Context) *apis.FieldError { } return nil } + +func validateClusterVisibilityLabel(label string) (errs *apis.FieldError) { + if label != config.VisibilityClusterLocal { + errs = apis.ErrInvalidValue(label, config.VisibilityLabelKey) + } + return +} + +// validateLabels function validates route labels. +func (r *Route) validateLabels() (errs *apis.FieldError) { + for key, val := range r.GetLabels() { + switch { + case key == config.VisibilityLabelKey: + errs = errs.Also(validateClusterVisibilityLabel(val)) + case key == serving.ServiceLabelKey: + errs = errs.Also(verifyLabelOwnerRef(val, serving.ServiceLabelKey, "Service", r.GetOwnerReferences())) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation_test.go new file mode 100644 index 0000000000..3f85639d79 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/route_validation_test.go @@ -0,0 +1,830 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" +) + +func TestTrafficTargetValidation(t *testing.T) { + tests := []struct { + name string + tt *TrafficTarget + want *apis.FieldError + wc func(context.Context) context.Context + }{{ + name: "valid with revisionName", + tt: &TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "valid with revisionName and name (spec)", + tt: &TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "valid with revisionName and name (status)", + tt: &TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(12), + URL: &apis.URL{ + Scheme: "http", + Host: "foo.bar.com", + }, + }, + wc: apis.WithinStatus, + want: nil, + }, { + name: "invalid with revisionName and name (status)", + tt: &TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinStatus, + want: apis.ErrMissingField("url"), + }, { + name: "invalid with bad revisionName", + tt: &TrafficTarget{ + RevisionName: "b ar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: apis.ErrInvalidKeyName( + "b ar", "revisionName", "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')"), + }, { + name: "valid with revisionName and latestRevision", + tt: &TrafficTarget{ + RevisionName: "bar", + LatestRevision: ptr.Bool(false), + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "invalid with revisionName and latestRevision (spec)", + tt: &TrafficTarget{ + RevisionName: "bar", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: apis.ErrGeneric(`may not set revisionName "bar" when latestRevision is true`, "latestRevision"), + }, { + name: "valid with revisionName and latestRevision (status)", + tt: &TrafficTarget{ + RevisionName: "bar", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(12), + }, + wc: apis.WithinStatus, + want: nil, + }, { + name: "valid with configurationName", + tt: &TrafficTarget{ + ConfigurationName: "bar", + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "valid with configurationName and name (spec)", + tt: &TrafficTarget{ + Tag: "foo", + ConfigurationName: "bar", + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "invalid with bad configurationName", + tt: &TrafficTarget{ + ConfigurationName: "b ar", + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: apis.ErrInvalidKeyName( + "b ar", "configurationName", "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')"), + }, { + name: "valid with configurationName and latestRevision", + tt: &TrafficTarget{ + ConfigurationName: "blah", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "invalid with configurationName and latestRevision", + tt: &TrafficTarget{ + ConfigurationName: "blah", + LatestRevision: ptr.Bool(false), + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: apis.ErrGeneric(`may not set revisionName "" when latestRevision is false`, "latestRevision"), + }, { + name: "invalid with configurationName and default configurationName", + tt: &TrafficTarget{ + ConfigurationName: "blah", + Percent: ptr.Int64(37), + }, + wc: WithDefaultConfigurationName, + want: apis.ErrDisallowedFields("configurationName"), + }, { + name: "valid with only default configurationName", + tt: &TrafficTarget{ + Percent: ptr.Int64(37), + }, + wc: func(ctx context.Context) context.Context { + return WithDefaultConfigurationName(apis.WithinSpec(ctx)) + }, + want: nil, + }, { + name: "valid with default configurationName and latestRevision", + tt: &TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(37), + }, + wc: func(ctx context.Context) context.Context { + return WithDefaultConfigurationName(apis.WithinSpec(ctx)) + }, + want: nil, + }, { + name: "invalid with default configurationName and latestRevision", + tt: &TrafficTarget{ + LatestRevision: ptr.Bool(false), + Percent: ptr.Int64(37), + }, + wc: func(ctx context.Context) context.Context { + return WithDefaultConfigurationName(apis.WithinSpec(ctx)) + }, + want: apis.ErrGeneric(`may not set revisionName "" when latestRevision is false`, "latestRevision"), + }, { + name: "invalid without revisionName in status", + tt: &TrafficTarget{ + ConfigurationName: "blah", + Percent: ptr.Int64(37), + }, + wc: apis.WithinStatus, + want: apis.ErrMissingField("revisionName"), + }, { + name: "valid with revisionName and default configurationName", + tt: &TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: WithDefaultConfigurationName, + want: nil, + }, { + name: "valid with no percent", + tt: &TrafficTarget{ + ConfigurationName: "booga", + }, + want: nil, + }, { + name: "valid with nil percent", + tt: &TrafficTarget{ + ConfigurationName: "booga", + Percent: nil, + }, + want: nil, + }, { + name: "valid with zero percent", + tt: &TrafficTarget{ + ConfigurationName: "booga", + Percent: ptr.Int64(0), + }, + want: nil, + }, { + name: "valid with no name", + tt: &TrafficTarget{ + ConfigurationName: "booga", + Percent: ptr.Int64(100), + }, + want: nil, + }, { + name: "invalid with both", + tt: &TrafficTarget{ + RevisionName: "foo", + ConfigurationName: "bar", + }, + want: &apis.FieldError{ + Message: "expected exactly one, got both", + Paths: []string{"revisionName", "configurationName"}, + }, + }, { + name: "invalid with neither", + tt: &TrafficTarget{ + Percent: ptr.Int64(100), + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{"revisionName", "configurationName"}, + }, + }, { + name: "invalid percent too low", + tt: &TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(-5), + }, + want: apis.ErrOutOfBoundsValue("-5", "0", "100", "percent"), + }, { + name: "invalid percent too high", + tt: &TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(101), + }, + want: apis.ErrOutOfBoundsValue("101", "0", "100", "percent"), + }, { + name: "disallowed url set", + tt: &TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(100), + URL: &apis.URL{ + Host: "should.not.be.set", + }, + }, + wc: apis.WithinSpec, + want: apis.ErrDisallowedFields("url"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.tt.Validate(ctx) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestRouteValidation(t *testing.T) { + tests := []struct { + name string + r *Route + want *apis.FieldError + }{{ + name: "valid", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + Status: RouteStatus{ + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + URL: &apis.URL{ + Scheme: "http", + Host: "bar.blah.com", + }, + }}, + }, + }, + }, + want: nil, + }, { + name: "valid split", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "prod", + RevisionName: "foo", + Percent: ptr.Int64(90), + }, { + Tag: "experiment", + ConfigurationName: "bar", + Percent: ptr.Int64(10), + }}, + }, + }, + want: nil, + }, { + name: "valid split without tags", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(90), + }, { + RevisionName: "bar", + Percent: ptr.Int64(10), + }}, + }, + }, + want: nil, + }, { + name: "missing url in status", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + Status: RouteStatus{ + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "missing field(s)", + Paths: []string{ + "status.traffic[0].url", + }, + }, + }, { + name: "invalid traffic entry (missing oneof)", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{ + "spec.traffic[0].configurationName", + "spec.traffic[0].revisionName", + }, + }, + }, { + name: "invalid traffic entry (multiple names)", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(50), + }, { + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(50), + }}, + }, + }, + want: &apis.FieldError{ + Message: `Multiple definitions for "foo"`, + Paths: []string{ + "spec.traffic[0].tag", + "spec.traffic[1].tag", + }, + }, + }, { + name: "invalid name - dots", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid name - dots and spec percent is not 100", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(90), + }}, + }, + }, + want: (&apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }).Also(&apis.FieldError{ + Message: "Traffic targets sum to 90, want 100", + Paths: []string{"spec.traffic"}, + }), + }, { + name: "invalid name - too long", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("a", 64), + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters]", + Paths: []string{"metadata.name"}, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestRouteLabelValidation(t *testing.T) { + validRouteSpec := RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + } + tests := []struct { + name string + r *Route + want *apis.FieldError + }{{ + name: "valid visibility name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "cluster-local", + }, + }, + Spec: validRouteSpec, + }, + want: nil, + }, { + name: "invalid visibility name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "bad-value", + }, + }, + Spec: validRouteSpec, + }, + want: apis.ErrInvalidValue("bad-value", "metadata.labels.serving.knative.dev/visibility"), + }, { + name: "valid knative service name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validRouteSpec, + }, + want: nil, + }, { + name: "invalid knative service name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "absent-svc", + }, + }, + Spec: validRouteSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "Mismatch knative service label and owner ref", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "BrandNewService", + Name: "brand-new-svc", + }}, + }, + Spec: validRouteSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative service name without correct owner ref", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "absent-svc", + }}, + }, + Spec: validRouteSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative service name with multiple owner ref", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "NewSerice", + Name: "test-new-svc", + }, { + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validRouteSpec, + }, + }, { + name: "invalid knative label", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + "serving.knative.dev/testlabel": "value", + }, + }, + Spec: validRouteSpec, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/testlabel", "metadata.labels"), + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func getRouteSpec(confName string) RouteSpec { + return RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + ConfigurationName: confName, + }}, + } +} + +func TestRouteAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Route + this *Route + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update creator annotation with spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("new"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier annotation without spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier annotation with spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getRouteSpec("new"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }, { + name: "no validation for lastModifier annotation even after update without spec changes as route owned by service", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: serving.GroupName, + }}, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }, { + name: "no validation for creator annotation even after update as route owned by service", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u3, + serving.UpdaterAnnotation: u1, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: serving.GroupName, + }}, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go new file mode 100644 index 0000000000..efb6605f33 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertUp implements apis.Convertible +func (source *Service) ConvertUp(ctx context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertDown implements apis.Convertible +func (sink *Service) ConvertDown(ctx context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion_test.go new file mode 100644 index 0000000000..0158818ec7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" +) + +func TestServiceConversionBadType(t *testing.T) { + good, bad := &Service{}, &Revision{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go similarity index 62% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go index 1826400629..095ca9c456 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go @@ -14,15 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" - "k8s.io/apimachinery/pkg/api/equality" - - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" ) // SetDefaults implements apis.Defaultable @@ -30,23 +28,10 @@ func (s *Service) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, s.ObjectMeta) s.Spec.SetDefaults(apis.WithinSpec(ctx)) - if ui := apis.GetUserInfo(ctx); ui != nil { - ans := s.GetAnnotations() - if ans == nil { - ans = map[string]string{} - defer s.SetAnnotations(ans) - } - - if apis.IsInUpdate(ctx) { - old := apis.GetBaseline(ctx).(*Service) - if equality.Semantic.DeepEqual(old.Spec, s.Spec) { - return - } - ans[serving.UpdaterAnnotation] = ui.Username - } else { - ans[serving.CreatorAnnotation] = ui.Username - ans[serving.UpdaterAnnotation] = ui.Username - } + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Service).Spec, s.Spec, s) + } else { + serving.SetUserInfo(ctx, nil, s.Spec, s) } } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults_test.go new file mode 100644 index 0000000000..d42c34d1fe --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults_test.go @@ -0,0 +1,351 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" +) + +func TestServiceDefaulting(t *testing.T) { + tests := []struct { + name string + in *Service + want *Service + }{{ + name: "empty", + in: &Service{}, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }, { + name: "run latest", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }, { + name: "run latest with some default overrides", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(60), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }, { + name: "byo traffic block", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "current", + RevisionName: "foo", + Percent: ptr.Int64(90), + }, { + Tag: "candidate", + RevisionName: "bar", + Percent: ptr.Int64(10), + }, { + Tag: "latest", + }}, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "current", + RevisionName: "foo", + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(false), + }, { + Tag: "candidate", + RevisionName: "bar", + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(false), + }, { + Tag: "latest", + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + got.SetDefaults(context.Background()) + if !cmp.Equal(got, test.want, ignoreUnexportedResources) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got, ignoreUnexportedResources)) + } + }) + } +} + +func TestAnnotateUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + + withUserAnns := func(u1, u2 string, s *Service) *Service { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + + tests := []struct { + name string + user string + this *Service + prev *Service + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Service{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Service{}, + prev: &Service{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Service{}), + prev: withUserAnns(u1, u1, &Service{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }, + }, + prev: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }, + }), + prev: withUserAnns(u1, u2, &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle.go index 4794216db5..f842245478 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) var serviceCondSet = apis.NewLivingConditionSet() diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle_test.go new file mode 100644 index 0000000000..9fe1ee1596 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_lifecycle_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestServiceDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Service{}, test.t) + if err != nil { + t.Errorf("VerifyType(Service, %T) = %v", test.t, err) + } + }) + } +} + +func TestServiceGetGroupVersionKind(t *testing.T) { + r := &Service{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1", + Kind: "Service", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestServiceIsReady(t *testing.T) { + tests := []struct { + name string + ss *ServiceStatus + expected bool + }{{ + name: "Ready undefined", + ss: &ServiceStatus{}, + expected: false, + }, { + name: "Ready=False", + ss: &ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + expected: false, + }, { + name: "Ready=Unknown", + ss: &ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + expected: false, + }, { + name: "Ready=True", + ss: &ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + expected: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ready := test.ss.IsReady() + if ready != test.expected { + t.Errorf("IsReady() = %t; expected %t", ready, test.expected) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_types.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_types.go index 95a680a922..0fa860b600 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_types.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -91,7 +91,7 @@ const ( // ServiceStatus represents the Status stanza of the Service resource. type ServiceStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` // In addition to inlining ConfigurationSpec, we also inline the fields // specific to ConfigurationStatus. diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation.go similarity index 72% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation.go index e1e49f6b57..bbe5477bc8 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation.go @@ -14,13 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( "context" + "strings" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/route/config" ) // Validate makes sure that Service is properly configured. @@ -30,7 +32,8 @@ func (s *Service) Validate(ctx context.Context) (errs *apis.FieldError) { // have changed (i.e. due to config-defaults changes), we elide the metadata and // spec validation. if !apis.IsInStatusUpdate(ctx) { - errs = errs.Also(serving.ValidateObjectMetadata(s.GetObjectMeta()).ViaField("metadata")) + errs = errs.Also(serving.ValidateObjectMetadata(s.GetObjectMeta()).Also( + s.validateLabels().ViaField("labels")).ViaField("metadata")) ctx = apis.WithinParent(ctx, s.ObjectMeta) errs = errs.Also(s.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } @@ -39,12 +42,12 @@ func (s *Service) Validate(ctx context.Context) (errs *apis.FieldError) { if apis.IsInUpdate(ctx) { original := apis.GetBaseline(ctx).(*Service) - + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, s.Spec, original.GetAnnotations(), + s.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) err := s.Spec.ConfigurationSpec.Template.VerifyNameChange(ctx, original.Spec.ConfigurationSpec.Template) errs = errs.Also(err.ViaField("spec.template")) } - return errs } @@ -61,3 +64,16 @@ func (ss *ServiceStatus) Validate(ctx context.Context) *apis.FieldError { return ss.ConfigurationStatusFields.Validate(ctx).Also( ss.RouteStatusFields.Validate(ctx)) } + +// validateLabels function validates service labels +func (s *Service) validateLabels() (errs *apis.FieldError) { + for key, val := range s.GetLabels() { + switch { + case key == config.VisibilityLabelKey: + errs = errs.Also(validateClusterVisibilityLabel(val)) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation_test.go new file mode 100644 index 0000000000..f1f26767ce --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/service_validation_test.go @@ -0,0 +1,787 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" + + "knative.dev/pkg/apis" +) + +func TestServiceValidation(t *testing.T) { + goodConfigSpec := ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + } + goodRouteSpec := RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + } + + tests := []struct { + name string + r *Service + want *apis.FieldError + }{{ + name: "valid run latest", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "valid visibility label", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "cluster-local", + }, + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: nil, + }, { + name: "invalid knative label", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + "serving.knative.dev/name": "some-value", + }, + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/name", "metadata.labels"), + }, { + name: "valid non knative label", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + "serving.name": "some-name", + }, + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: nil, + }, { + name: "invalid visibility label value", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "bad-label", + }, + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: apis.ErrInvalidValue("bad-label", "metadata.labels.serving.knative.dev/visibility"), + }, { + name: "valid release", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + Tag: "current", + LatestRevision: ptr.Bool(false), + RevisionName: "valid-00001", + Percent: ptr.Int64(98), + }, { + Tag: "candidate", + LatestRevision: ptr.Bool(false), + RevisionName: "valid-00002", + Percent: ptr.Int64(2), + }, { + Tag: "latest", + LatestRevision: ptr.Bool(true), + Percent: nil, + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid configurationName", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + ConfigurationName: "valid", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.traffic[0].configurationName"), + }, { + name: "invalid latestRevision", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "valid", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: apis.ErrGeneric(`may not set revisionName "valid" when latestRevision is true`, "spec.traffic[0].latestRevision"), + }, { + name: "invalid container concurrency", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + ContainerConcurrency: ptr.Int64(-10), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: apis.ErrOutOfBoundsValue( + -10, 0, config.DefaultMaxRevisionContainerConcurrency, + "spec.template.spec.containerConcurrency"), + }} + + // TODO(dangerd): PodSpec validation failures. + // TODO(mattmoor): BYO revision name. + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestImmutableServiceFields(t *testing.T) { + tests := []struct { + name string + new *Service + old *Service + want *apis.FieldError + }{{ + name: "without byo-name", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (name change)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "byo-name-foo", // Used it! + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "byo-name-bar", // Used it! + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (with delta)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "byo-name-bar", // Leave old. + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + RevisionName: "byo-name-bar", // Used it! + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "bad byo-name", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Saw the following changes without a name change (-old +new)", + Paths: []string{"spec.template.metadata.name"}, + Details: "{*v1.RevisionTemplateSpec}.Spec.PodSpec.Containers[0].Image:\n\t-: \"helloworld:bar\"\n\t+: \"helloworld:foo\"\n", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + got := test.new.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v\nwant: %v\ngot: %v", + diff, test.want, got) + } + }) + } +} + +func TestServiceSubresourceUpdate(t *testing.T) { + tests := []struct { + name string + service *Service + subresource string + want *apis.FieldError + }{{ + name: "status update with valid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid status", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + Status: ServiceStatus{ + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), URL: &apis.URL{ + Scheme: "http", + Host: "foo.bar.com", + }, + }}, + }, + }, + }, + subresource: "status", + want: &apis.FieldError{ + Message: "Traffic targets sum to 50, want 100", + Paths: []string{"status.traffic"}, + }, + }, { + name: "non-status sub resource update with valid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "foo", + want: nil, + }, { + name: "non-status sub resource update with invalid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "foo", + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionTimeoutSeconds+1, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "spec.template.spec.timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.service) + ctx = apis.WithinSubResourceUpdate(ctx, test.service, test.subresource) + if diff := cmp.Diff(test.want.Error(), test.service.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getServiceSpec(image string) ServiceSpec { + return ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: RevisionTemplateSpec{ + Spec: RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: image, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds), + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + } +} + +func TestServiceAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Service + this *Service + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier without spec changes", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier with spec changes", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getServiceSpec("helloworld:bar"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go similarity index 96% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go index da1db5915a..bfff38f197 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,12 +18,12 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( - apis "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -58,7 +58,7 @@ func (in *Configuration) DeepCopyObject() runtime.Object { func (in *ConfigurationList) DeepCopyInto(out *ConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Configuration, len(*in)) @@ -170,7 +170,7 @@ func (in *Revision) DeepCopyObject() runtime.Object { func (in *RevisionList) DeepCopyInto(out *RevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Revision, len(*in)) @@ -203,6 +203,11 @@ func (in *RevisionList) DeepCopyObject() runtime.Object { func (in *RevisionSpec) DeepCopyInto(out *RevisionSpec) { *out = *in in.PodSpec.DeepCopyInto(&out.PodSpec) + if in.ContainerConcurrency != nil { + in, out := &in.ContainerConcurrency, &out.ContainerConcurrency + *out = new(int64) + **out = **in + } if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds *out = new(int64) @@ -288,7 +293,7 @@ func (in *Route) DeepCopyObject() runtime.Object { func (in *RouteList) DeepCopyInto(out *RouteList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Route, len(*in)) @@ -368,7 +373,7 @@ func (in *RouteStatusFields) DeepCopyInto(out *RouteStatusFields) { } if in.Address != nil { in, out := &in.Address, &out.Address - *out = new(duckv1beta1.Addressable) + *out = new(duckv1.Addressable) (*in).DeepCopyInto(*out) } if in.Traffic != nil { @@ -423,7 +428,7 @@ func (in *Service) DeepCopyObject() runtime.Object { func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -497,6 +502,11 @@ func (in *TrafficTarget) DeepCopyInto(out *TrafficTarget) { *out = new(bool) **out = **in } + if in.Percent != nil { + in, out := &in.Percent, &out.Percent + *out = new(int64) + **out = **in + } if in.URL != nil { in, out := &in.URL, &out.URL *out = new(apis.URL) diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/README.md b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/README.md new file mode 100644 index 0000000000..e143c5cd54 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/README.md @@ -0,0 +1,9 @@ +# Knative Serving API v1alpha1 + +This is the implementation of the Knative Serving API, which is specified in +[`docs/spec/spec.md`](/docs/spec/spec.md) and verified via +[the conformance tests](/test/conformance). + +**Updates to this implementation should include a corresponding change to +[the spec](/docs/spec/spec.md) and [the conformance tests](/test/conformance).** +([#780](https://github.com/knative/serving/issues/780)) diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go similarity index 81% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go index 067d470d2e..a7d804a293 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion.go @@ -20,8 +20,9 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/pkg/apis" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" ) // ConvertUp implements apis.Convertible @@ -33,13 +34,19 @@ func (source *Configuration) ConvertUp(ctx context.Context, obj apis.Convertible return err } return source.Status.ConvertUp(ctx, &sink.Status) + case *v1.Configuration: + sink.ObjectMeta = source.ObjectMeta + if err := source.Spec.ConvertUp(ctx, &sink.Spec); err != nil { + return err + } + return source.Status.ConvertUp(ctx, &sink.Status) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertUp helps implement apis.Convertible -func (source *ConfigurationSpec) ConvertUp(ctx context.Context, sink *v1beta1.ConfigurationSpec) error { +func (source *ConfigurationSpec) ConvertUp(ctx context.Context, sink *v1.ConfigurationSpec) error { if source.DeprecatedBuild != nil { return ConvertErrorf("build", "build cannot be migrated forward.") } @@ -56,14 +63,14 @@ func (source *ConfigurationSpec) ConvertUp(ctx context.Context, sink *v1beta1.Co } // ConvertUp helps implement apis.Convertible -func (source *ConfigurationStatus) ConvertUp(ctx context.Context, sink *v1beta1.ConfigurationStatus) error { +func (source *ConfigurationStatus) ConvertUp(ctx context.Context, sink *v1.ConfigurationStatus) error { source.Status.ConvertTo(ctx, &sink.Status) return source.ConfigurationStatusFields.ConvertUp(ctx, &sink.ConfigurationStatusFields) } // ConvertUp helps implement apis.Convertible -func (source *ConfigurationStatusFields) ConvertUp(ctx context.Context, sink *v1beta1.ConfigurationStatusFields) error { +func (source *ConfigurationStatusFields) ConvertUp(ctx context.Context, sink *v1.ConfigurationStatusFields) error { sink.LatestReadyRevisionName = source.LatestReadyRevisionName sink.LatestCreatedRevisionName = source.LatestCreatedRevisionName return nil @@ -78,26 +85,32 @@ func (sink *Configuration) ConvertDown(ctx context.Context, obj apis.Convertible return err } return sink.Status.ConvertDown(ctx, source.Status) + case *v1.Configuration: + sink.ObjectMeta = source.ObjectMeta + if err := sink.Spec.ConvertDown(ctx, source.Spec); err != nil { + return err + } + return sink.Status.ConvertDown(ctx, source.Status) default: return fmt.Errorf("unknown version, got: %T", source) } } // ConvertDown helps implement apis.Convertible -func (sink *ConfigurationSpec) ConvertDown(ctx context.Context, source v1beta1.ConfigurationSpec) error { +func (sink *ConfigurationSpec) ConvertDown(ctx context.Context, source v1.ConfigurationSpec) error { sink.Template = &RevisionTemplateSpec{} return sink.Template.ConvertDown(ctx, source.Template) } // ConvertDown helps implement apis.Convertible -func (sink *ConfigurationStatus) ConvertDown(ctx context.Context, source v1beta1.ConfigurationStatus) error { +func (sink *ConfigurationStatus) ConvertDown(ctx context.Context, source v1.ConfigurationStatus) error { source.Status.ConvertTo(ctx, &sink.Status) return sink.ConfigurationStatusFields.ConvertDown(ctx, source.ConfigurationStatusFields) } // ConvertDown helps implement apis.Convertible -func (sink *ConfigurationStatusFields) ConvertDown(ctx context.Context, source v1beta1.ConfigurationStatusFields) error { +func (sink *ConfigurationStatusFields) ConvertDown(ctx context.Context, source v1.ConfigurationStatusFields) error { sink.LatestReadyRevisionName = source.LatestReadyRevisionName sink.LatestCreatedRevisionName = source.LatestCreatedRevisionName return nil diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion_test.go new file mode 100644 index 0000000000..a6108efcdc --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_conversion_test.go @@ -0,0 +1,237 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +func TestConfigurationConversionBadType(t *testing.T) { + good, bad := &Configuration{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} + +func TestConfigurationConversionTemplateError(t *testing.T) { + tests := []struct { + name string + cs *ConfigurationSpec + }{{ + name: "multiple of", + cs: &ConfigurationSpec{ + Template: &RevisionTemplateSpec{}, + DeprecatedRevisionTemplate: &RevisionTemplateSpec{}, + }, + }, { + name: "missing", + cs: &ConfigurationSpec{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := &v1.ConfigurationSpec{} + if err := test.cs.ConvertUp(context.Background(), result); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", result) + } + }) + } +} + +func TestConfigurationConversion(t *testing.T) { + versions := []apis.Convertible{&v1.Configuration{}, &v1beta1.Configuration{}} + + tests := []struct { + name string + in *Configuration + badField string + }{{ + name: "simple configuration", + in: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "foo", + }}, + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + Status: ConfigurationStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestReadyRevisionName: "foo-00002", + LatestCreatedRevisionName: "foo-00009", + }, + }, + }, + }, { + name: "cannot convert build", + badField: "build", + in: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ConfigurationSpec{ + DeprecatedBuild: &runtime.RawExtension{ + Object: &Revision{}, + }, + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + Status: ConfigurationStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestReadyRevisionName: "foo-00002", + LatestCreatedRevisionName: "foo-00009", + }, + }, + }, + }} + + toDeprecated := func(in *Configuration) *Configuration { + out := in.DeepCopy() + out.Spec.Template.Spec.DeprecatedContainer = &out.Spec.Template.Spec.Containers[0] + out.Spec.Template.Spec.Containers = nil + out.Spec.DeprecatedRevisionTemplate = out.Spec.Template + out.Spec.Template = nil + return out + } + + for _, test := range tests { + for _, version := range versions { + t.Run(test.name, func(t *testing.T) { + ver := version + if err := test.in.ConvertUp(context.Background(), ver); err != nil { + if test.badField != "" { + cce, ok := err.(*CannotConvertError) + if ok && cce.Field == test.badField { + return + } + } + t.Errorf("ConvertUp() = %v", err) + } else if test.badField != "" { + t.Errorf("ConvertUp() = %#v, wanted bad field %q", ver, + test.badField) + return + } + got := &Configuration{} + if err := got.ConvertDown(context.Background(), ver); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + + // A variant of the test that uses `revisionTemplate:` and `container:`, + // but end up with what we have above anyways. + t.Run(test.name+" (deprecated)", func(t *testing.T) { + ver := version + start := toDeprecated(test.in) + if err := start.ConvertUp(context.Background(), ver); err != nil { + if test.badField != "" { + cce, ok := err.(*CannotConvertError) + if ok && cce.Field == test.badField { + return + } + } + t.Errorf("ConvertUp() = %v", err) + } else if test.badField != "" { + t.Errorf("CovnertUp() = %#v, wanted bad field %q", ver, + test.badField) + return + } + got := &Configuration{} + if err := got.ConvertDown(context.Background(), ver); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + } + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go similarity index 63% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go index 9f8706bc30..56ef58c062 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults.go @@ -19,26 +19,34 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) func (c *Configuration) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, c.ObjectMeta) c.Spec.SetDefaults(apis.WithinSpec(ctx)) + if c.GetOwnerReferences() == nil { + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Configuration).Spec, c.Spec, c) + } else { + serving.SetUserInfo(ctx, nil, c.Spec, c) + } + } } func (cs *ConfigurationSpec) SetDefaults(ctx context.Context) { - if v1beta1.IsUpgradeViaDefaulting(ctx) { - beta := v1beta1.ConfigurationSpec{} - if cs.ConvertUp(ctx, &beta) == nil { + if v1.IsUpgradeViaDefaulting(ctx) { + v := v1.ConfigurationSpec{} + if cs.ConvertUp(ctx, &v) == nil { alpha := ConfigurationSpec{} - if alpha.ConvertDown(ctx, beta) == nil { + if alpha.ConvertDown(ctx, v) == nil { *cs = alpha } } } - cs.GetTemplate().Spec.SetDefaults(ctx) + cs.GetTemplate().SetDefaults(ctx) } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults_test.go new file mode 100644 index 0000000000..13fe1b46b3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_defaults_test.go @@ -0,0 +1,319 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +var ( + defaultResources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + Limits: corev1.ResourceList{}, + } + ignoreUnexportedResources = cmpopts.IgnoreUnexported(resource.Quantity{}) +) + +func TestConfigurationDefaulting(t *testing.T) { + tests := []struct { + name string + in *Configuration + want *Configuration + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Configuration{}, + want: &Configuration{}, + }, { + name: "shell", + in: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + want: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, { + name: "lemonade", + wc: v1.WithUpgradeViaDefaulting, + in: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + want: &Configuration{ + Spec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + }, + }, { + name: "shell podspec", + in: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{}}, + }, + }, + }, + }, + }, + }, + want: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + }, + }, { + name: "no overwrite values", + in: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + DeprecatedContainer: &corev1.Container{ + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + want: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if diff := cmp.Diff(test.want, got, ignoreUnexportedResources); diff != "" { + t.Errorf("SetDefaults (-want, +got) = %v", diff) + } + }) + } +} + +func TestConfigurationUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + withUserAnns := func(u1, u2 string, s *Configuration) *Configuration { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + tests := []struct { + name string + user string + this *Configuration + prev *Configuration + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Configuration{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Configuration{}, + prev: &Configuration{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Configuration{}), + prev: withUserAnns(u1, u1, &Configuration{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{}, + }, + }, + prev: &Configuration{ + Spec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{}, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Configuration{ + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + }), + prev: withUserAnns(u1, u2, &Configuration{ + Spec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + }, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go similarity index 93% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go index 56c3dc3b5a..25d532de58 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle.go @@ -17,10 +17,10 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) var confCondSet = apis.NewLivingConditionSet() @@ -88,7 +88,9 @@ func (cs *ConfigurationStatus) SetLatestCreatedRevisionName(name string) { func (cs *ConfigurationStatus) SetLatestReadyRevisionName(name string) { cs.LatestReadyRevisionName = name - confCondSet.Manage(cs).MarkTrue(ConfigurationConditionReady) + if cs.LatestReadyRevisionName == cs.LatestCreatedRevisionName { + confCondSet.Manage(cs).MarkTrue(ConfigurationConditionReady) + } } func (cs *ConfigurationStatus) MarkLatestCreatedFailed(name, message string) { @@ -112,6 +114,6 @@ func (cs *ConfigurationStatus) MarkLatestReadyDeleted() { "Revision %q was deleted.", cs.LatestReadyRevisionName) } -func (cs *ConfigurationStatus) duck() *duckv1beta1.Status { +func (cs *ConfigurationStatus) duck() *duckv1.Status { return &cs.Status } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle_test.go new file mode 100644 index 0000000000..abd2d51962 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_lifecycle_test.go @@ -0,0 +1,338 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" +) + +func TestConfigurationDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Configuration{}, test.t) + if err != nil { + t.Errorf("VerifyType(Configuration, %T) = %v", test.t, err) + } + }) + } +} + +func TestConfigurationIsReady(t *testing.T) { + cases := []struct { + name string + status ConfigurationStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: ConfigurationStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Foo", + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status should be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Foo", + Status: corev1.ConditionTrue, + }, { + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status false should not be ready", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Foo", + Status: corev1.ConditionTrue, + }, { + Type: ConfigurationConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.isReady, tc.status.IsReady(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + }) + } +} + +func TestLatestReadyRevisionNameUpToDate(t *testing.T) { + cases := []struct { + name string + status ConfigurationStatus + isUpdateToDate bool + }{{ + name: "Not ready status should not be up-to-date", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isUpdateToDate: false, + }, { + name: "Missing LatestReadyRevisionName should not be up-to-date", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestCreatedRevisionName: "rev-1", + }, + }, + isUpdateToDate: false, + }, { + name: "Different revision names should not be up-to-date", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestCreatedRevisionName: "rev-2", + LatestReadyRevisionName: "rev-1", + }, + }, + isUpdateToDate: false, + }, { + name: "Same revision names and ready status should be up-to-date", + status: ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestCreatedRevisionName: "rev-1", + LatestReadyRevisionName: "rev-1", + }, + }, + isUpdateToDate: true, + }} + + for _, tc := range cases { + if e, a := tc.isUpdateToDate, tc.status.IsLatestReadyRevisionNameUpToDate(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + } +} + +func TestTypicalFlow(t *testing.T) { + r := &ConfigurationStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + r.SetLatestCreatedRevisionName("foo") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestReadyRevisionName("foo") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) + + // Verify a second call to SetLatestCreatedRevisionName doesn't change the status from Ready + // e.g. on a subsequent reconciliation. + r.SetLatestCreatedRevisionName("foo") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestCreatedRevisionName("bar") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestReadyRevisionName("bar") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) + + r.MarkResourceNotConvertible(ConvertErrorf("build", "something something not allowed.").(*CannotConvertError)) + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) + apitestv1.CheckConditionFailed(r.duck(), ConditionTypeConvertible, t) +} + +func TestFailingFirstRevisionWithRecovery(t *testing.T) { + r := &ConfigurationStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + // Our first attempt to create the revision fails + const want = "transient API server failure" + r.MarkRevisionCreationFailed(want) + apitestv1.CheckConditionFailed(r.duck(), ConfigurationConditionReady, t) + if c := r.GetCondition(ConfigurationConditionReady); !strings.Contains(c.Message, want) { + t.Errorf("MarkRevisionCreationFailed = %v, want substring %v", c.Message, want) + } + + r.SetLatestCreatedRevisionName("foo") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + // Then we create it, but it fails to come up. + const want2 = "the message" + r.MarkLatestCreatedFailed("foo", want2) + apitestv1.CheckConditionFailed(r.duck(), ConfigurationConditionReady, t) + if c := r.GetCondition(ConfigurationConditionReady); !strings.Contains(c.Message, want2) { + t.Errorf("MarkLatestCreatedFailed = %v, want substring %v", c.Message, want2) + } + + // When a new revision comes along the Ready condition becomes Unknown. + r.SetLatestCreatedRevisionName("bar") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + // When the new revision becomes ready, then Ready becomes true as well. + r.SetLatestReadyRevisionName("bar") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) +} + +func TestFailingSecondRevision(t *testing.T) { + r := &ConfigurationStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestCreatedRevisionName("foo") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestReadyRevisionName("foo") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestCreatedRevisionName("bar") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + // When the second revision fails, the Configuration becomes Failed. + const want = "the message" + r.MarkLatestCreatedFailed("bar", want) + apitestv1.CheckConditionFailed(r.duck(), ConfigurationConditionReady, t) + if c := r.GetCondition(ConfigurationConditionReady); !strings.Contains(c.Message, want) { + t.Errorf("MarkLatestCreatedFailed = %v, want substring %v", c.Message, want) + } +} + +func TestLatestRevisionDeletedThenFixed(t *testing.T) { + r := &ConfigurationStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestCreatedRevisionName("foo") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestReadyRevisionName("foo") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) + + // When the latest revision is deleted, the Configuration became Failed. + const want = "was deleted" + r.MarkLatestReadyDeleted() + apitestv1.CheckConditionFailed(r.duck(), ConfigurationConditionReady, t) + if cnd := r.GetCondition(ConfigurationConditionReady); cnd == nil || !strings.Contains(cnd.Message, want) { + t.Errorf("MarkLatestReadyDeleted = %v, want substring %v", cnd.Message, want) + } + + // But creating new revision 'bar' and making it Ready will fix things. + r.SetLatestCreatedRevisionName("bar") + apitestv1.CheckConditionOngoing(r.duck(), ConfigurationConditionReady, t) + + r.SetLatestReadyRevisionName("bar") + apitestv1.CheckConditionSucceeded(r.duck(), ConfigurationConditionReady, t) +} + +func TestConfigurationGetGroupVersionKind(t *testing.T) { + c := &Configuration{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1alpha1", + Kind: "Configuration", + } + if got := c.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_types.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_types.go index 1ee2ddb0ec..0bc29f921f 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_types.go @@ -17,11 +17,11 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -98,7 +98,7 @@ const ( ConfigurationConditionReady = apis.ConditionReady ) -// ConfigurationStatusFields holds all of the non-duckv1beta1.Status status fields of a Route. +// ConfigurationStatusFields holds all of the non-duckv1.Status status fields of a Route. // These are defined outline so that we can also inline them into Service, and more easily // copy them. type ConfigurationStatusFields struct { @@ -115,7 +115,7 @@ type ConfigurationStatusFields struct { // ConfigurationStatus communicates the observed state of the Configuration (from the controller). type ConfigurationStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` ConfigurationStatusFields `json:",inline"` } diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation.go similarity index 85% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation.go index 14d92b2bd7..2704da20b8 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/configuration_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation.go @@ -21,8 +21,8 @@ import ( "k8s.io/apimachinery/pkg/api/equality" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" ) // Validate makes sure that Configuration is properly configured. @@ -39,7 +39,12 @@ func (c *Configuration) Validate(ctx context.Context) (errs *apis.FieldError) { if apis.IsInUpdate(ctx) { original := apis.GetBaseline(ctx).(*Configuration) - + // Don't validate annotations(creator and lastModifier) when configuration owned by service + // validate only when configuration created independently. + if c.GetOwnerReferences() == nil { + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, c.Spec, original.GetAnnotations(), + c.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + } err := c.Spec.GetTemplate().VerifyNameChange(ctx, original.Spec.GetTemplate()) errs = errs.Also(err.ViaField("spec.revisionTemplate")) diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation_test.go new file mode 100644 index 0000000000..073a538fe2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/configuration_validation_test.go @@ -0,0 +1,860 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "strings" + "testing" + + "knative.dev/pkg/ptr" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestConfigurationSpecValidation(t *testing.T) { + tests := []struct { + name string + c *ConfigurationSpec + want *apis.FieldError + }{{ + name: "valid", + c: &ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + want: nil, + }, { + name: "valid podspec", + c: &ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "propagate revision failures", + c: &ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "stuart", + Image: "hellworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("revisionTemplate.spec.container.lifecycle"), + }, { + name: "build is not allowed", + c: &ConfigurationSpec{ + DeprecatedBuild: &runtime.RawExtension{}, + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + want: apis.ErrDisallowedFields("build"), + }, { + name: "no revision template", + c: &ConfigurationSpec{ + DeprecatedBuild: &runtime.RawExtension{}, + }, + want: apis.ErrMissingOneOf("revisionTemplate", "template"), + }, { + name: "too many revision templates", + c: &ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + want: apis.ErrMultipleOneOf("revisionTemplate", "template"), + }, { + name: "just template", + c: &ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "just template (don't allow deprecated fields)", + c: &ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedConcurrencyModel: "Multi", + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + want: apis.ErrDisallowedFields( + "template.spec.concurrencyModel", "template.spec.container"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.c.Validate(context.Background()).Error()); diff != "" { + t.Errorf("validateContainer (-want, +got) = %v", diff) + } + }) + } +} + +func TestConfigurationValidation(t *testing.T) { + tests := []struct { + name string + c *Configuration + want *apis.FieldError + }{{ + name: "valid", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "propagate revision failures", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "stuart", + Image: "hellworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.revisionTemplate.spec.container.lifecycle"), + }, { + name: "propagate revision failures (template)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.template.spec.container"), + }, { + name: "empty spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + }, + want: apis.ErrMissingField("spec"), + }, { + name: "invalid name - dots", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + }, + want: (&apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }).Also(apis.ErrMissingField("spec")), + }, { + name: "invalid name - too long", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("a", 64), + }, + }, + want: (&apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters]", + Paths: []string{"metadata.name"}, + }).Also(apis.ErrMissingField("spec")), + }, { + name: "valid BYO name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "invalid name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "name or generateName is required", + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid BYO name (with generateName)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "byo-name-", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.revisionTemplate.metadata.name"), + }, { + name: "invalid BYO name (not prefixed)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue(`"foo" must have prefix "byo-name-"`, + "spec.revisionTemplate.metadata.name"), + }, { + name: "invalid name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo@bar", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "spec.revisionTemplate.metadata.name"), + }, { + name: "invalid generate name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "foo@bar", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "spec.revisionTemplate.metadata.generateName"), + }, { + name: "valid generate name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "valid-generatename", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: nil, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.c.Validate(context.Background()).Error()); diff != "" { + t.Errorf("validateContainer (-want, +got) = %v", diff) + } + }) + } +} + +func TestImmutableConfigurationFields(t *testing.T) { + tests := []struct { + name string + new *Configuration + old *Configuration + want *apis.FieldError + }{{ + name: "without byo-name", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo name change", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo name (no change)", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "bad byo name change", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "Saw the following changes without a name change (-old +new)", + Paths: []string{"spec.revisionTemplate.metadata.name"}, + Details: "{*v1alpha1.RevisionTemplateSpec}.Spec.DeprecatedContainer.Image:\n\t-: \"helloworld:bar\"\n\t+: \"helloworld:foo\"\n", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + if diff := cmp.Diff(test.want.Error(), test.new.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestConfigurationSubresourceUpdate(t *testing.T) { + tests := []struct { + name string + config *Configuration + subresource string + want *apis.FieldError + }{{ + name: "status update with valid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "non-status sub resource update with valid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + }, + subresource: "foo", + want: nil, + }, { + name: "non-status sub resource update with invalid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + }, + subresource: "foo", + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionTimeoutSeconds+1, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "spec.revisionTemplate.spec.timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinSubResourceUpdate(ctx, test.config, test.subresource) + if diff := cmp.Diff(test.want.Error(), test.config.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getConfigurationSpec(image string) ConfigurationSpec { + return ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{Containers: []corev1.Container{{ + Image: image, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds), + }, + }, + }, + } +} + +func TestConfigurationAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Configuration + this *Configuration + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: &apis.FieldError{Message: "annotation value is immutable", + Paths: []string{"metadata.annotations." + serving.CreatorAnnotation}}, + }, { + name: "update creator annotation with spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:bar"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: &apis.FieldError{Message: "annotation value is immutable", + Paths: []string{"metadata.annotations." + serving.CreatorAnnotation}}, + }, { + name: "update lastModifier without spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: apis.ErrInvalidValue(u2, "metadata.annotations."+serving.UpdaterAnnotation), + }, { + name: "update lastModifier with spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getConfigurationSpec("helloworld:bar"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }, { + name: "no validation for lastModifier annotation even after update as configuration owned by service", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1alpha1", + Kind: serving.GroupName, + }}, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }, { + name: "no validation for creator annotation even after update as configuration owned by service", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u3, + serving.UpdaterAnnotation: u1, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1alpha1", + Kind: serving.GroupName, + }}, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/conversion_error.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/conversion_error.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error.go index 96a40def77..ddad8e3b22 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/conversion_error.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error.go @@ -19,7 +19,7 @@ package v1alpha1 import ( "fmt" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) const ( diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error_test.go new file mode 100644 index 0000000000..becb4f2441 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/conversion_error_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "testing" + +func TestConvertError(t *testing.T) { + ce := ConvertErrorf("field", "foo %v %v %v", "bar", true, 42) + + if got, want := ce.Error(), "foo bar true 42"; got != want { + t.Errorf("Error() = %s, wanted %s", got, want) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/doc.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/doc.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/doc.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/doc.go diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/register.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/register.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register.go index 0bad032a10..7f07381570 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register_test.go new file mode 100644 index 0000000000..cca560a8b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/register_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" +) + +func TestRegisterHelpers(t *testing.T) { + if got, want := Kind("Revision"), "Revision.serving.knative.dev"; got.String() != want { + t.Errorf("Kind(Revision) = %v, want %v", got.String(), want) + } + + if got, want := Resource("Revision"), "Revision.serving.knative.dev"; got.String() != want { + t.Errorf("Resource(Revision) = %v, want %v", got.String(), want) + } + + if got, want := SchemeGroupVersion.String(), "serving.knative.dev/v1alpha1"; got != want { + t.Errorf("SchemeGroupVersion() = %v, want %v", got, want) + } + + scheme := runtime.NewScheme() + if err := addKnownTypes(scheme); err != nil { + t.Errorf("addKnownTypes() = %v", err) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion.go similarity index 87% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion.go index 6a8f9db8ec..b234f4a286 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion.go @@ -20,10 +20,11 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/ptr" - "github.com/knative/serving/pkg/apis/serving/v1beta1" corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" ) // ConvertUp implements apis.Convertible @@ -39,18 +40,19 @@ func (source *Revision) ConvertUp(ctx context.Context, obj apis.Convertible) err } // ConvertUp helps implement apis.Convertible -func (source *RevisionTemplateSpec) ConvertUp(ctx context.Context, sink *v1beta1.RevisionTemplateSpec) error { +func (source *RevisionTemplateSpec) ConvertUp(ctx context.Context, sink *v1.RevisionTemplateSpec) error { sink.ObjectMeta = source.ObjectMeta return source.Spec.ConvertUp(ctx, &sink.Spec) } // ConvertUp helps implement apis.Convertible -func (source *RevisionSpec) ConvertUp(ctx context.Context, sink *v1beta1.RevisionSpec) error { - sink.ContainerConcurrency = v1beta1.RevisionContainerConcurrencyType( - source.ContainerConcurrency) +func (source *RevisionSpec) ConvertUp(ctx context.Context, sink *v1.RevisionSpec) error { if source.TimeoutSeconds != nil { sink.TimeoutSeconds = ptr.Int64(*source.TimeoutSeconds) } + if source.ContainerConcurrency != nil { + sink.ContainerConcurrency = ptr.Int64(*source.ContainerConcurrency) + } switch { case source.DeprecatedContainer != nil && len(source.Containers) > 0: return apis.ErrMultipleOneOf("container", "containers") @@ -59,6 +61,7 @@ func (source *RevisionSpec) ConvertUp(ctx context.Context, sink *v1beta1.Revisio ServiceAccountName: source.ServiceAccountName, Containers: []corev1.Container{*source.DeprecatedContainer}, Volumes: source.Volumes, + ImagePullSecrets: source.ImagePullSecrets, } case len(source.Containers) == 1: sink.PodSpec = source.PodSpec @@ -75,7 +78,7 @@ func (source *RevisionSpec) ConvertUp(ctx context.Context, sink *v1beta1.Revisio } // ConvertUp helps implement apis.Convertible -func (source *RevisionStatus) ConvertUp(ctx context.Context, sink *v1beta1.RevisionStatus) { +func (source *RevisionStatus) ConvertUp(ctx context.Context, sink *v1.RevisionStatus) { source.Status.ConvertTo(ctx, &sink.Status) sink.ServiceName = source.ServiceName @@ -96,19 +99,19 @@ func (sink *Revision) ConvertDown(ctx context.Context, obj apis.Convertible) err } // ConvertDown helps implement apis.Convertible -func (sink *RevisionTemplateSpec) ConvertDown(ctx context.Context, source v1beta1.RevisionTemplateSpec) error { +func (sink *RevisionTemplateSpec) ConvertDown(ctx context.Context, source v1.RevisionTemplateSpec) error { sink.ObjectMeta = source.ObjectMeta return sink.Spec.ConvertDown(ctx, source.Spec) } // ConvertDown helps implement apis.Convertible -func (sink *RevisionSpec) ConvertDown(ctx context.Context, source v1beta1.RevisionSpec) error { +func (sink *RevisionSpec) ConvertDown(ctx context.Context, source v1.RevisionSpec) error { sink.RevisionSpec = *source.DeepCopy() return nil } // ConvertDown helps implement apis.Convertible -func (sink *RevisionStatus) ConvertDown(ctx context.Context, source v1beta1.RevisionStatus) { +func (sink *RevisionStatus) ConvertDown(ctx context.Context, source v1.RevisionStatus) { source.Status.ConvertTo(ctx, &sink.Status) sink.ServiceName = source.ServiceName diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion_test.go new file mode 100644 index 0000000000..e2d43101d9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_conversion_test.go @@ -0,0 +1,293 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +func TestRevisionConversionBadType(t *testing.T) { + good, bad := &Revision{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} + +func TestRevisionConversion(t *testing.T) { + tests := []struct { + name string + in *Revision + badField string + }{{ + name: "good roundtrip w/ lots of parts", + in: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + Status: RevisionStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ServiceName: "foo-bar", + LogURL: "http://logger.io", + }, + }, + }, { + name: "bad roundtrip w/ build ref", + badField: "buildRef", + in: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: RevisionSpec{ + DeprecatedBuildRef: &corev1.ObjectReference{ + APIVersion: "build.knative.dev/v1alpha1", + Kind: "Build", + Name: "foo", + }, + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + Status: RevisionStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + }, + }, + }} + + toDeprecated := func(in *Revision) *Revision { + out := in.DeepCopy() + out.Spec.DeprecatedContainer = &out.Spec.Containers[0] + out.Spec.Containers = nil + return out + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + beta := &v1beta1.Revision{} + if err := test.in.ConvertUp(context.Background(), beta); err != nil { + if test.badField != "" { + cce, ok := err.(*CannotConvertError) + if ok && cce.Field == test.badField { + return + } + } + t.Errorf("ConvertUp() = %v", err) + } else if test.badField != "" { + t.Errorf("CovnertUp() = %#v, wanted bad field %q", beta, + test.badField) + return + } + got := &Revision{} + if err := got.ConvertDown(context.Background(), beta); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + + // A variant of the test that uses `container:`, + // but end up with what we have above anyways. + t.Run(test.name+" (deprecated)", func(t *testing.T) { + start := toDeprecated(test.in) + beta := &v1beta1.Revision{} + if err := start.ConvertUp(context.Background(), beta); err != nil { + if test.badField != "" { + cce, ok := err.(*CannotConvertError) + if ok && cce.Field == test.badField { + return + } + } + t.Errorf("ConvertUp() = %v", err) + } else if test.badField != "" { + t.Errorf("CovnertUp() = %#v, wanted bad field %q", beta, + test.badField) + return + } + got := &Revision{} + if err := got.ConvertDown(context.Background(), beta); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + } +} + +func TestRevisionConversionError(t *testing.T) { + tests := []struct { + name string + in *Revision + want *apis.FieldError + }{{ + name: "multiple containers in podspec", + in: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + }, { + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + Status: RevisionStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ServiceName: "foo-bar", + LogURL: "http://logger.io", + }, + }, + want: apis.ErrMultipleOneOf("containers"), + }, { + name: "no containers in podspec", + in: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + Status: RevisionStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ServiceName: "foo-bar", + LogURL: "http://logger.io", + }, + }, + want: apis.ErrMissingOneOf("container", "containers"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + beta := &v1beta1.Revision{} + got := test.in.ConvertUp(context.Background(), beta) + if got == nil { + t.Errorf("ConvertUp() = %#v, wanted %v", beta, test.want) + } + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults.go similarity index 69% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults.go index 9ac6f63695..887487fa95 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults.go @@ -19,31 +19,36 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" corev1 "k8s.io/api/core/v1" - - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) func (r *Revision) SetDefaults(ctx context.Context) { r.Spec.SetDefaults(apis.WithinSpec(ctx)) } +// SetDefaults implements apis.Defaultable +func (rts *RevisionTemplateSpec) SetDefaults(ctx context.Context) { + rts.Spec.SetDefaults(apis.WithinSpec(ctx)) +} + func (rs *RevisionSpec) SetDefaults(ctx context.Context) { - if v1beta1.IsUpgradeViaDefaulting(ctx) { - beta := v1beta1.RevisionSpec{} - if rs.ConvertUp(ctx, &beta) == nil { + if v1.IsUpgradeViaDefaulting(ctx) { + v1 := v1.RevisionSpec{} + if rs.ConvertUp(ctx, &v1) == nil { alpha := RevisionSpec{} - if alpha.ConvertDown(ctx, beta) == nil { + if alpha.ConvertDown(ctx, v1) == nil { *rs = alpha } } } // When ConcurrencyModel is specified but ContainerConcurrency - // is not (0), use the ConcurrencyModel value. - if rs.DeprecatedConcurrencyModel == RevisionRequestConcurrencyModelSingle && rs.ContainerConcurrency == 0 { - rs.ContainerConcurrency = 1 + // is not (`nil`), use the ConcurrencyModel value. + if rs.DeprecatedConcurrencyModel == DeprecatedRevisionRequestConcurrencyModelSingle && rs.ContainerConcurrency == nil { + rs.ContainerConcurrency = ptr.Int64(1) } // When the PodSpec has no containers, move the single Container diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults_test.go new file mode 100644 index 0000000000..f905243c7c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_defaults_test.go @@ -0,0 +1,312 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/config" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +var defaultProbe = &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, +} + +func TestRevisionDefaulting(t *testing.T) { + tests := []struct { + name string + in *Revision + want *Revision + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Revision{}, + want: &Revision{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, { + name: "shell", + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, { + name: "with context", + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }}, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "123", + }, + }) + + return s.ToContext(ctx) + }, + want: &Revision{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(123), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, { + name: "readonly volumes", + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + }}, + }, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + ReadOnly: true, + }}, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + }, { + name: "lemonade", + wc: v1.WithUpgradeViaDefaulting, + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + }}, + }, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + ReadOnly: true, + }}, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + }, { + name: "lemonade (no overwrite)", + wc: v1.WithUpgradeViaDefaulting, + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "bar", + }, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "foo", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "bar", + }, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "foo", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, { + name: "no overwrite", + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, { + name: "partially initialized", + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(123), + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(123), + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, { + name: "fall back to concurrency model", + in: &Revision{ + Spec: RevisionSpec{ + DeprecatedConcurrencyModel: "Single", + DeprecatedContainer: &corev1.Container{}, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: nil, // unspecified + }, + }, + }, + want: &Revision{ + Spec: RevisionSpec{ + DeprecatedConcurrencyModel: "Single", + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if diff := cmp.Diff(test.want, got, ignoreUnexportedResources); diff != "" { + t.Errorf("SetDefaults (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go similarity index 56% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go index b54212249b..f8ca021bdc 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle.go @@ -21,12 +21,15 @@ import ( "strconv" "time" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - net "github.com/knative/serving/pkg/apis/networking" - "github.com/knative/serving/pkg/apis/serving" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/config" + net "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" ) const ( @@ -41,7 +44,7 @@ const ( // QueueAdminPortName specifies the port name for // health check and lifecycle hooks for queue-proxy. - QueueAdminPortName string = "queueadm-port" + QueueAdminPortName string = "http-queueadm" // AutoscalingQueueMetricsPortName specifies the port name to use for metrics // emitted by queue-proxy for autoscaler. @@ -49,11 +52,7 @@ const ( // UserQueueMetricsPortName specifies the port name to use for metrics // emitted by queue-proxy for end user. - UserQueueMetricsPortName = "user-metrics" - - // ServiceQueueMetricsPortName is the name of the port that serves metrics - // on the Kubernetes service. - ServiceQueueMetricsPortName = "metrics" + UserQueueMetricsPortName = "http-usermetric" ) var revCondSet = apis.NewLivingConditionSet( @@ -79,6 +78,20 @@ func (rs *RevisionSpec) GetContainer() *corev1.Container { return &corev1.Container{} } +// GetContainerConcurrency returns the container concurrency. If +// container concurrency is not set, the default value will be returned. +// We use the original default (0) here for backwards compatibility. +// Previous versions of Knative equated unspecified and zero, so to avoid +// changing the value used by Revisions with unspecified values when a different +// default is configured, we use the original default instead of the configured +// default to remain safe across upgrades. +func (rs *RevisionSpec) GetContainerConcurrency() int64 { + if rs.ContainerConcurrency == nil { + return config.DefaultContainerConcurrency + } + return *rs.ContainerConcurrency +} + func (r *Revision) DeprecatedBuildRef() *corev1.ObjectReference { if r.Spec.DeprecatedBuildRef != nil { buildRef := r.Spec.DeprecatedBuildRef.DeepCopy() @@ -144,60 +157,94 @@ func (rs *RevisionStatus) MarkResourceNotConvertible(err *CannotConvertError) { }) } -// MarkResourceNotOwned changes the "ResourcesAvailable" condition to false to reflect that the -// resource of the given kind and name has already been created, and we do not own it. -func (rs *RevisionStatus) MarkResourceNotOwned(kind, name string) { - revCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, "NotOwned", - fmt.Sprintf("There is an existing %s %q that we do not own.", kind, name)) -} +const ( + // NotOwned defines the reason for marking revision availability status as + // false due to resource ownership issues. + NotOwned = "NotOwned" + + // Deploying defines the reason for marking revision availability status as + // unknown if the revision is still deploying. + Deploying = "Deploying" -func (rs *RevisionStatus) MarkDeploying(reason string) { - revCondSet.Manage(rs).MarkUnknown(RevisionConditionResourcesAvailable, reason, "") - revCondSet.Manage(rs).MarkUnknown(RevisionConditionContainerHealthy, reason, "") + // ProgressDeadlineExceeded defines the reason for marking revision availability + // status as false if progress has exceeded the deadline. + ProgressDeadlineExceeded = "ProgressDeadlineExceeded" + + // ContainerMissing defines the reason for marking container healthiness status + // as false if the a container image for the revision is missing. + ContainerMissing = "ContainerMissing" +) + +// MarkResourcesAvailableTrue marks ResourcesAvailable status on revision as True +func (rs *RevisionStatus) MarkResourcesAvailableTrue() { + revCondSet.Manage(rs).MarkTrue(RevisionConditionResourcesAvailable) } -func (rs *RevisionStatus) MarkServiceTimeout() { - revCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, "ServiceTimeout", - "Timed out waiting for a service endpoint to become ready") +// MarkResourcesAvailableFalse marks ResourcesAvailable status on revision as False +func (rs *RevisionStatus) MarkResourcesAvailableFalse(reason, message string) { + revCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, reason, message) } -func (rs *RevisionStatus) MarkProgressDeadlineExceeded(message string) { - revCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, "ProgressDeadlineExceeded", message) +// MarkResourcesAvailableUnknown marks ResourcesAvailable status on revision as Unknown +func (rs *RevisionStatus) MarkResourcesAvailableUnknown(reason, message string) { + revCondSet.Manage(rs).MarkUnknown(RevisionConditionResourcesAvailable, reason, message) } -func (rs *RevisionStatus) MarkContainerHealthy() { +// MarkContainerHealthyTrue marks ContainerHealthy status on revision as True +func (rs *RevisionStatus) MarkContainerHealthyTrue() { revCondSet.Manage(rs).MarkTrue(RevisionConditionContainerHealthy) } -func (rs *RevisionStatus) MarkContainerExiting(exitCode int32, message string) { - exitCodeString := fmt.Sprintf("ExitCode%d", exitCode) - revCondSet.Manage(rs).MarkFalse(RevisionConditionContainerHealthy, exitCodeString, RevisionContainerExitingMessage(message)) +// MarkContainerHealthyFalse marks ContainerHealthy status on revision as False +func (rs *RevisionStatus) MarkContainerHealthyFalse(reason, message string) { + revCondSet.Manage(rs).MarkFalse(RevisionConditionContainerHealthy, reason, message) } -func (rs *RevisionStatus) MarkResourcesAvailable() { - revCondSet.Manage(rs).MarkTrue(RevisionConditionResourcesAvailable) +// MarkContainerHealthyUnknown marks ContainerHealthy status on revision as Unknown +func (rs *RevisionStatus) MarkContainerHealthyUnknown(reason, message string) { + revCondSet.Manage(rs).MarkUnknown(RevisionConditionContainerHealthy, reason, message) } -// MarkResourcesUnavailable changes "ResourcesAvailable" condition to false to reflect that the -// resources of the given kind and name cannot be created. -func (rs *RevisionStatus) MarkResourcesUnavailable(reason, message string) { - revCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, reason, message) +// MarkActiveTrue marks Active status on revision as True +func (rs *RevisionStatus) MarkActiveTrue() { + revCondSet.Manage(rs).MarkTrue(RevisionConditionActive) } -func (rs *RevisionStatus) MarkActive() { - revCondSet.Manage(rs).MarkTrue(RevisionConditionActive) +// MarkActiveFalse marks Active status on revision as False +func (rs *RevisionStatus) MarkActiveFalse(reason, message string) { + revCondSet.Manage(rs).MarkFalse(RevisionConditionActive, reason, message) } -func (rs *RevisionStatus) MarkActivating(reason, message string) { +// MarkActiveUnknown marks Active status on revision as Unknown +func (rs *RevisionStatus) MarkActiveUnknown(reason, message string) { revCondSet.Manage(rs).MarkUnknown(RevisionConditionActive, reason, message) } -func (rs *RevisionStatus) MarkInactive(reason, message string) { - revCondSet.Manage(rs).MarkFalse(RevisionConditionActive, reason, message) -} +// PropagateAutoscalerStatus propagates autoscaler's status to the revision's status. +func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *av1alpha1.PodAutoscalerStatus) { + // Propagate the service name from the PA. + rs.ServiceName = ps.ServiceName + + // Reflect the PA status in our own. + cond := ps.GetCondition(av1alpha1.PodAutoscalerConditionReady) + if cond == nil { + rs.MarkActiveUnknown("Deploying", "") + return + } -func (rs *RevisionStatus) MarkContainerMissing(message string) { - revCondSet.Manage(rs).MarkFalse(RevisionConditionContainerHealthy, "ContainerMissing", message) + switch cond.Status { + case corev1.ConditionUnknown: + rs.MarkActiveUnknown(cond.Reason, cond.Message) + case corev1.ConditionFalse: + rs.MarkActiveFalse(cond.Reason, cond.Message) + case corev1.ConditionTrue: + rs.MarkActiveTrue() + + // Precondition for PA being active is SKS being active and + // that entices that |service.endpoints| > 0. + rs.MarkResourcesAvailableTrue() + rs.MarkContainerHealthyTrue() + } } // RevisionContainerMissingMessage constructs the status message if a given image @@ -212,6 +259,17 @@ func RevisionContainerExitingMessage(message string) string { return fmt.Sprintf("Container failed with: %s", message) } +// ResourceNotOwnedMessage constructs the status message if ownership on the +// resource is not right. +func ResourceNotOwnedMessage(kind, name string) string { + return fmt.Sprintf("There is an existing %s %q that we do not own.", kind, name) +} + +// ExitCodeReason constructs the status message from an exit code +func ExitCodeReason(exitCode int32) string { + return fmt.Sprintf("ExitCode%d", exitCode) +} + const ( AnnotationParseErrorTypeMissing = "Missing" AnnotationParseErrorTypeInvalid = "Invalid" @@ -272,6 +330,30 @@ func (r *Revision) GetLastPinned() (time.Time, error) { return time.Unix(secs, 0), nil } -func (rs *RevisionStatus) duck() *duckv1beta1.Status { +// IsReachable returns whether or not the revision can be reached by a route. +func (r *Revision) IsReachable() bool { + return r.ObjectMeta.Labels[serving.RouteLabelKey] != "" +} + +func (rs *RevisionStatus) duck() *duckv1.Status { return &rs.Status } + +// PropagateDeploymentStatus takes the Deployment status and applies its values +// to the Revision status. +func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentStatus) { + ds := serving.TransformDeploymentStatus(original) + cond := ds.GetCondition(serving.DeploymentConditionReady) + if cond == nil { + return + } + + switch cond.Status { + case corev1.ConditionUnknown: + revCondSet.Manage(rs).MarkUnknown(RevisionConditionResourcesAvailable, cond.Reason, cond.Message) + case corev1.ConditionTrue: + revCondSet.Manage(rs).MarkTrue(RevisionConditionResourcesAvailable) + case corev1.ConditionFalse: + revCondSet.Manage(rs).MarkFalse(RevisionConditionResourcesAvailable, cond.Reason, cond.Message) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go new file mode 100644 index 0000000000..c97398ce06 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_lifecycle_test.go @@ -0,0 +1,843 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + apitestv1 "knative.dev/pkg/apis/testing/v1" + "knative.dev/pkg/ptr" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + net "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestRevisionDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Revision{}, test.t) + if err != nil { + t.Errorf("VerifyType(Revision, %T) = %v", test.t, err) + } + }) + } +} + +func TestIsActivationRequired(t *testing.T) { + cases := []struct { + name string + status RevisionStatus + isActivationRequired bool + }{{ + name: "empty status should not be inactive", + status: RevisionStatus{}, + isActivationRequired: false, + }, { + name: "Ready status should not be inactive", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isActivationRequired: false, + }, { + name: "Inactive status should be inactive", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionActive, + Status: corev1.ConditionFalse, + }}, + }, + }, + isActivationRequired: true, + }, { + name: "Updating status should be inactive", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionUnknown, + Reason: "Updating", + }, { + Type: RevisionConditionActive, + Status: corev1.ConditionUnknown, + Reason: "Updating", + }}, + }, + }, + isActivationRequired: true, + }, { + name: "NotReady status without reason should not be inactive", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isActivationRequired: false, + }, { + name: "Ready/Unknown status without reason should not be inactive", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isActivationRequired: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.isActivationRequired, tc.status.IsActivationRequired(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + }) + } +} + +func TestIsReady(t *testing.T) { + cases := []struct { + name string + status RevisionStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: RevisionStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionResourcesAvailable, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status should be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionResourcesAvailable, + Status: corev1.ConditionTrue, + }, { + Type: RevisionConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status false should not be ready", + status: RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionResourcesAvailable, + Status: corev1.ConditionTrue, + }, { + Type: RevisionConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.isReady, tc.status.IsReady(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + }) + } +} + +func TestGetSetCondition(t *testing.T) { + rs := &RevisionStatus{} + if a := rs.GetCondition(RevisionConditionReady); a != nil { + t.Errorf("empty RevisionStatus returned %v when expected nil", a) + } + + rc := &apis.Condition{ + Type: RevisionConditionResourcesAvailable, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + } + + rs.MarkResourcesAvailableTrue() + + if diff := cmp.Diff(rc, rs.GetCondition(RevisionConditionResourcesAvailable), cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime")); diff != "" { + t.Errorf("GetCondition refs diff (-want +got): %v", diff) + } + if a := rs.GetCondition(RevisionConditionReady); a != nil { + t.Errorf("GetCondition expected nil got: %v", a) + } +} + +func TestTypicalFlowWithProgressDeadlineExceeded(t *testing.T) { + r := &RevisionStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionReady, t) + + const want = "the error message" + r.MarkResourcesAvailableFalse(ProgressDeadlineExceeded, want) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionReady, t) + if got := r.GetCondition(RevisionConditionResourcesAvailable); got == nil || got.Message != want { + t.Errorf("MarkProgressDeadlineExceeded = %v, want %v", got, want) + } + if got := r.GetCondition(RevisionConditionReady); got == nil || got.Message != want { + t.Errorf("MarkProgressDeadlineExceeded = %v, want %v", got, want) + } +} + +func TestTypicalFlowWithContainerMissing(t *testing.T) { + r := &RevisionStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionReady, t) + + const want = "something about the container being not found" + r.MarkContainerHealthyFalse(ContainerMissing, want) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionReady, t) + if got := r.GetCondition(RevisionConditionContainerHealthy); got == nil || got.Message != want { + t.Errorf("MarkContainerMissing = %v, want %v", got, want) + } else if got.Reason != "ContainerMissing" { + t.Errorf("MarkContainerMissing = %v, want %v", got, "ContainerMissing") + } + if got := r.GetCondition(RevisionConditionReady); got == nil || got.Message != want { + t.Errorf("MarkContainerMissing = %v, want %v", got, want) + } else if got.Reason != "ContainerMissing" { + t.Errorf("MarkContainerMissing = %v, want %v", got, "ContainerMissing") + } +} + +func TestTypicalFlowWithSuspendResume(t *testing.T) { + r := &RevisionStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionReady, t) + + // Enter a Ready state. + r.MarkActiveTrue() + r.MarkContainerHealthyTrue() + r.MarkResourcesAvailableTrue() + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) + + // From a Ready state, make the revision inactive to simulate scale to zero. + const want = "Deactivated" + r.MarkActiveFalse(want, "Reserve") + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionActive, t) + if got := r.GetCondition(RevisionConditionActive); got == nil || got.Reason != want { + t.Errorf("MarkInactive = %v, want %v", got, want) + } + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) + + // From an Inactive state, start to activate the revision. + const want2 = "Activating" + r.MarkActiveUnknown(want2, "blah blah blah") + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionActive, t) + if got := r.GetCondition(RevisionConditionActive); got == nil || got.Reason != want2 { + t.Errorf("MarkInactive = %v, want %v", got, want2) + } + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) + + // From the activating state, simulate the transition back to readiness. + r.MarkActiveTrue() + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) +} + +func TestRevisionNotOwnedStuff(t *testing.T) { + r := &RevisionStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionReady, t) + + const want = "NotOwned" + r.MarkResourcesAvailableFalse(NotOwned, "mark") + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionReady, t) + if got := r.GetCondition(RevisionConditionResourcesAvailable); got == nil || got.Reason != want { + t.Errorf("MarkResourceNotOwned = %v, want %v", got, want) + } + if got := r.GetCondition(RevisionConditionReady); got == nil || got.Reason != want { + t.Errorf("MarkResourceNotOwned = %v, want %v", got, want) + } +} + +func TestRevisionResourcesUnavailable(t *testing.T) { + r := &RevisionStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionReady, t) + + const wantReason, wantMessage = "unschedulable", "insufficient energy" + r.MarkResourcesAvailableFalse(wantReason, wantMessage) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionReady, t) + if got := r.GetCondition(RevisionConditionResourcesAvailable); got == nil || got.Reason != wantReason { + t.Errorf("RevisionConditionResourcesAvailable = %v, want %v", got, wantReason) + } + if got := r.GetCondition(RevisionConditionResourcesAvailable); got == nil || got.Message != wantMessage { + t.Errorf("RevisionConditionResourcesAvailable = %v, want %v", got, wantMessage) + } +} + +func TestRevisionGetGroupVersionKind(t *testing.T) { + r := &Revision{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1alpha1", + Kind: "Revision", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestRevisionBuildRefFromName(t *testing.T) { + r := &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo-space", + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedBuildName: "bar-build", + }, + } + got := *r.DeprecatedBuildRef() + want := corev1.ObjectReference{ + APIVersion: "build.knative.dev/v1alpha1", + Kind: "Build", + Namespace: "foo-space", + Name: "bar-build", + } + if got != want { + t.Errorf("got: %#v, want: %#v", got, want) + } +} + +func TestRevisionBuildRef(t *testing.T) { + buildRef := corev1.ObjectReference{ + APIVersion: "testing.build.knative.dev/v1alpha1", + Kind: "Build", + Namespace: "foo-space", + Name: "foo-build", + } + r := &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo-space", + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedBuildName: "bar", + DeprecatedBuildRef: &buildRef, + }, + } + got := *r.DeprecatedBuildRef() + want := buildRef + if got != want { + t.Errorf("got: %#v, want: %#v", got, want) + } +} + +func TestRevisionBuildRefNil(t *testing.T) { + r := &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo-space", + Name: "foo", + }, + } + got := r.DeprecatedBuildRef() + + var want *corev1.ObjectReference + if got != want { + t.Errorf("got: %#v, want: %#v", got, want) + } +} + +func TestRevisionGetProtocol(t *testing.T) { + containerWithPortName := func(name string) corev1.Container { + return corev1.Container{Ports: []corev1.ContainerPort{{Name: name}}} + } + + tests := []struct { + name string + container corev1.Container + protocol net.ProtocolType + }{{ + name: "undefined", + container: corev1.Container{}, + protocol: net.ProtocolHTTP1, + }, { + name: "http1", + container: containerWithPortName("http1"), + protocol: net.ProtocolHTTP1, + }, { + name: "h2c", + container: containerWithPortName("h2c"), + protocol: net.ProtocolH2C, + }, { + name: "unknown", + container: containerWithPortName("whatever"), + protocol: net.ProtocolHTTP1, + }, { + name: "empty", + container: containerWithPortName(""), + protocol: net.ProtocolHTTP1, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Revision{ + Spec: RevisionSpec{ + DeprecatedContainer: &tt.container, + }, + } + + got := r.GetProtocol() + want := tt.protocol + + if got != want { + t.Errorf("got: %#v, want: %#v", got, want) + } + }) + } +} + +func TestRevisionGetLastPinned(t *testing.T) { + cases := []struct { + name string + annotations map[string]string + expectTime time.Time + setLastPinnedTime time.Time + expectErr error + }{{ + name: "Nil annotations", + annotations: nil, + expectErr: LastPinnedParseError{ + Type: AnnotationParseErrorTypeMissing, + }, + }, { + name: "Empty map annotations", + annotations: map[string]string{}, + expectErr: LastPinnedParseError{ + Type: AnnotationParseErrorTypeMissing, + }, + }, { + name: "Empty map annotations - with set time", + annotations: map[string]string{}, + setLastPinnedTime: time.Unix(1000, 0), + expectTime: time.Unix(1000, 0), + }, { + name: "Invalid time", + annotations: map[string]string{serving.RevisionLastPinnedAnnotationKey: "abcd"}, + expectErr: LastPinnedParseError{ + Type: AnnotationParseErrorTypeInvalid, + Value: "abcd", + }, + }, { + name: "Valid time", + annotations: map[string]string{serving.RevisionLastPinnedAnnotationKey: "10000"}, + expectTime: time.Unix(10000, 0), + }, { + name: "Valid time empty annotations", + annotations: nil, + setLastPinnedTime: time.Unix(1000, 0), + expectTime: time.Unix(1000, 0), + expectErr: nil, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + rev := Revision{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: tc.annotations, + }, + } + + if tc.setLastPinnedTime != (time.Time{}) { + rev.SetLastPinned(tc.setLastPinnedTime) + } + + pt, err := rev.GetLastPinned() + failErr := func() { + t.Fatalf("Expected error %v got %v", tc.expectErr, err) + } + + if tc.expectErr == nil { + if err != nil { + failErr() + } + } else { + if tc.expectErr.Error() != err.Error() { + failErr() + } + } + + if tc.expectTime != pt { + t.Fatalf("Expected pin time %v got %v", tc.expectTime, pt) + } + }) + } +} + +func TestRevisionIsReachable(t *testing.T) { + tests := []struct { + name string + labels map[string]string + want bool + }{{ + name: "has route annotation", + labels: map[string]string{serving.RouteLabelKey: "the-route"}, + want: true, + }, { + name: "empty route annotation", + labels: map[string]string{serving.RouteLabelKey: ""}, + want: false, + }, { + name: "no route annotation", + labels: nil, + want: false, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rev := Revision{ObjectMeta: metav1.ObjectMeta{Labels: tt.labels}} + + got := rev.IsReachable() + + if got != tt.want { + t.Errorf("got: %t, want: %t", got, tt.want) + } + }) + } +} + +func TestPropagateDeploymentStatus(t *testing.T) { + rev := &RevisionStatus{} + rev.InitializeConditions() + + // We start out ongoing. + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionResourcesAvailable, t) + + // Empty deployment conditions shouldn't affect our readiness. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{}, + }) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionResourcesAvailable, t) + + // Deployment failures should be propagated and not affect ContainerHealthy. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionFalse, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionUnknown, + }}, + }) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionContainerHealthy, t) + + // Marking container healthy doesn't affect deployment status. + rev.MarkContainerHealthyTrue() + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionContainerHealthy, t) + + // We can recover from deployment failures. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }}, + }) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionContainerHealthy, t) + + // We can go unknown. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionUnknown, + }}, + }) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionOngoing(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionContainerHealthy, t) + + // ReplicaFailure=True translates into Ready=False. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionTrue, + }}, + }) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionContainerHealthy, t) + + // ReplicaFailure=True trumps Progressing=Unknown. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionUnknown, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionTrue, + }}, + }) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionFailed(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionContainerHealthy, t) + + // ReplicaFailure=False + Progressing=True yields Ready. + rev.PropagateDeploymentStatus(&appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }, { + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionFalse, + }}, + }) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionResourcesAvailable, t) + apitestv1.CheckConditionSucceeded(rev.duck(), RevisionConditionContainerHealthy, t) +} + +func TestPropagateAutoscalerStatus(t *testing.T) { + r := &RevisionStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionReady, t) + + // PodAutoscaler has no active condition, so we are just coming up. + r.PropagateAutoscalerStatus(&av1alpha1.PodAutoscalerStatus{ + Status: duckv1.Status{}, + }) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionActive, t) + + // PodAutoscaler becomes ready, making us active. + r.PropagateAutoscalerStatus(&av1alpha1.PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: av1alpha1.PodAutoscalerConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionActive, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) + + // PodAutoscaler flipping back to Unknown causes Active become ongoing immediately. + r.PropagateAutoscalerStatus(&av1alpha1.PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: av1alpha1.PodAutoscalerConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }) + apitestv1.CheckConditionOngoing(r.duck(), RevisionConditionActive, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) + + // PodAutoscaler becoming unready makes Active false, but doesn't affect readiness. + r.PropagateAutoscalerStatus(&av1alpha1.PodAutoscalerStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: av1alpha1.PodAutoscalerConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(r.duck(), RevisionConditionActive, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionReady, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionContainerHealthy, t) + apitestv1.CheckConditionSucceeded(r.duck(), RevisionConditionResourcesAvailable, t) +} + +func TestGetContainerConcurrency(t *testing.T) { + cases := []struct { + name string + status RevisionSpec + want int64 + }{{ + name: "empty revisionSpec should return default value", + status: RevisionSpec{}, + want: 0, + }, { + name: "get containerConcurrency by passing value", + status: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(10), + }, + }, + want: 10, + }} + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if want, got := tc.want, tc.status.GetContainerConcurrency(); want != got { + t.Errorf("got: %v want: %v", got, want) + } + }) + } +} + +func TestGetContainer(t *testing.T) { + cases := []struct { + name string + status RevisionSpec + want *corev1.Container + }{{ + name: "empty revisionSpec should return default value", + status: RevisionSpec{}, + want: &corev1.Container{}, + }, { + name: "get deprecatedContainer info", + status: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "deprecatedContainer", + Image: "foo", + }, + }, + want: &corev1.Container{ + Name: "deprecatedContainer", + Image: "foo", + }, + }, { + name: "get first container info even after passing multiple", + status: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "firstContainer", + Image: "firstImage", + }, { + Name: "secondContainer", + Image: "secondImage", + }}, + }, + }, + }, + want: &corev1.Container{ + Name: "firstContainer", + Image: "firstImage", + }, + }} + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if want, got := tc.want, tc.status.GetContainer(); !equality.Semantic.DeepEqual(want, got) { + t.Errorf("got: %v want: %v", got, want) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_types.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_types.go index 3e58602dc6..e7a90c6571 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_types.go @@ -17,13 +17,13 @@ limitations under the License. package v1alpha1 import ( - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) // +genclient @@ -91,25 +91,26 @@ const ( DeprecatedRevisionServingStateRetired DeprecatedRevisionServingStateType = "Retired" ) -// RevisionRequestConcurrencyModelType is an enumeration of the +// DeprecatedRevisionRequestConcurrencyModelType is an enumeration of the // concurrency models supported by a Revision. -// DEPRECATED in favor of RevisionContainerConcurrencyType. -type RevisionRequestConcurrencyModelType string +// DEPRECATED in favor of an integer based ContainerConcurrency setting. +// TODO(vagababov): retire completely in 0.9. +type DeprecatedRevisionRequestConcurrencyModelType string const ( - // RevisionRequestConcurrencyModelSingle guarantees that only one + // DeprecatedRevisionRequestConcurrencyModelSingle guarantees that only one // request will be handled at a time (concurrently) per instance // of Revision Container. - RevisionRequestConcurrencyModelSingle RevisionRequestConcurrencyModelType = "Single" - // RevisionRequestConcurencyModelMulti allows more than one request to + DeprecatedRevisionRequestConcurrencyModelSingle DeprecatedRevisionRequestConcurrencyModelType = "Single" + // DeprecatedRevisionRequestConcurencyModelMulti allows more than one request to // be handled at a time (concurrently) per instance of Revision // Container. - RevisionRequestConcurrencyModelMulti RevisionRequestConcurrencyModelType = "Multi" + DeprecatedRevisionRequestConcurrencyModelMulti DeprecatedRevisionRequestConcurrencyModelType = "Multi" ) // RevisionSpec holds the desired state of the Revision (from the client). type RevisionSpec struct { - v1beta1.RevisionSpec `json:",inline"` + v1.RevisionSpec `json:",inline"` // DeprecatedGeneration was used prior in Kubernetes versions <1.11 // when metadata.generation was not being incremented by the api server @@ -134,7 +135,7 @@ type RevisionSpec struct { // Revision. Defaults to Multi. // Deprecated in favor of ContainerConcurrency. // +optional - DeprecatedConcurrencyModel RevisionRequestConcurrencyModelType `json:"concurrencyModel,omitempty"` + DeprecatedConcurrencyModel DeprecatedRevisionRequestConcurrencyModelType `json:"concurrencyModel,omitempty"` // DeprecatedBuildName optionally holds the name of the Build responsible for // producing the container image for its Revision. @@ -172,7 +173,7 @@ const ( // RevisionStatus communicates the observed state of the Revision (from the controller). type RevisionStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` // ServiceName holds the name of a core Kubernetes Service resource that // load balances over the pods backing this Revision. diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation.go similarity index 65% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation.go index f19536a946..713c56770b 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/revision_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation.go @@ -18,16 +18,12 @@ package v1alpha1 import ( "context" - "fmt" - "strconv" - "strings" - "github.com/knative/serving/pkg/apis/config" - - "github.com/knative/pkg/apis" - "github.com/knative/pkg/kmp" - "github.com/knative/serving/pkg/apis/serving" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmp" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/serving" ) func (r *Revision) checkImmutableFields(ctx context.Context, original *Revision) *apis.FieldError { @@ -62,29 +58,11 @@ func (r *Revision) Validate(ctx context.Context) *apis.FieldError { // Validate ensures RevisionTemplateSpec is properly configured. func (rt *RevisionTemplateSpec) Validate(ctx context.Context) *apis.FieldError { errs := rt.Spec.Validate(ctx).ViaField("spec") - + errs = errs.Also(autoscaling.ValidateAnnotations(rt.GetAnnotations()).ViaField("metadata.annotations")) // If the DeprecatedRevisionTemplate has a name specified, then check that // it follows the requirements on the name. - if rt.Name != "" { - om := apis.ParentMeta(ctx) - prefix := om.Name + "-" - if om.Name != "" { - // Even if there is GenerateName, allow the use - // of Name post-creation. - } else if om.GenerateName != "" { - // We disallow bringing your own name when the parent - // resource uses generateName (at creation). - return apis.ErrDisallowedFields("metadata.name") - } - - if !strings.HasPrefix(rt.Name, prefix) { - errs = errs.Also(apis.ErrInvalidValue( - fmt.Sprintf("%q must have prefix %q", rt.Name, prefix), - "metadata.name")) - } - } - - errs = errs.Also(validateAnnotations(rt.Annotations)) + errs = errs.Also(serving.ValidateRevisionName(ctx, rt.Name, rt.GenerateName)) + errs = errs.Also(serving.ValidateQueueSidecarAnnotation(rt.Annotations).ViaField("metadata.annotations")) return errs } @@ -109,7 +87,7 @@ func (current *RevisionTemplateSpec) VerifyNameChange(ctx context.Context, og *R } else if diff != "" { return &apis.FieldError{ Message: "Saw the following changes without a name change (-old +new)", - Paths: []string{apis.CurrentField}, + Paths: []string{"metadata.name"}, Details: diff, } } @@ -147,52 +125,17 @@ func (rs *RevisionSpec) Validate(ctx context.Context) *apis.FieldError { if err := rs.DeprecatedConcurrencyModel.Validate(ctx).ViaField("concurrencyModel"); err != nil { errs = errs.Also(err) } else { - errs = errs.Also(rs.ContainerConcurrency.Validate(ctx).ViaField("containerConcurrency")) + if rs.ContainerConcurrency != nil { + errs = errs.Also(serving.ValidateContainerConcurrency(rs.ContainerConcurrency).ViaField("containerConcurrency")) + } } if rs.TimeoutSeconds != nil { - errs = errs.Also(validateTimeoutSeconds(ctx, *rs.TimeoutSeconds)) + errs = errs.Also(serving.ValidateTimeoutSeconds(ctx, *rs.TimeoutSeconds)) } return errs } -func validateAnnotations(annotations map[string]string) *apis.FieldError { - return validatePercentageAnnotationKey(annotations, serving.QueueSideCarResourcePercentageAnnotation) -} - -func validatePercentageAnnotationKey(annotations map[string]string, resourcePercentageAnnotationKey string) *apis.FieldError { - if len(annotations) == 0 { - return nil - } - - v, ok := annotations[resourcePercentageAnnotationKey] - if !ok { - return nil - } - value, err := strconv.ParseFloat(v, 32) - if err != nil { - return apis.ErrInvalidValue(v, apis.CurrentField).ViaKey(resourcePercentageAnnotationKey) - } - - if value <= float64(0.1) || value > float64(100) { - return apis.ErrOutOfBoundsValue(value, 0.1, 100.0, resourcePercentageAnnotationKey) - } - - return nil -} - -func validateTimeoutSeconds(ctx context.Context, timeoutSeconds int64) *apis.FieldError { - if timeoutSeconds != 0 { - cfg := config.FromContextOrDefaults(ctx) - if timeoutSeconds > cfg.Defaults.MaxRevisionTimeoutSeconds || timeoutSeconds < 0 { - return apis.ErrOutOfBoundsValue(timeoutSeconds, 0, - cfg.Defaults.MaxRevisionTimeoutSeconds, - "timeoutSeconds") - } - } - return nil -} - // Validate ensures DeprecatedRevisionServingStateType is properly configured. func (ss DeprecatedRevisionServingStateType) Validate(ctx context.Context) *apis.FieldError { switch ss { @@ -207,11 +150,11 @@ func (ss DeprecatedRevisionServingStateType) Validate(ctx context.Context) *apis } // Validate ensures RevisionRequestConcurrencyModelType is properly configured. -func (cm RevisionRequestConcurrencyModelType) Validate(ctx context.Context) *apis.FieldError { +func (cm DeprecatedRevisionRequestConcurrencyModelType) Validate(ctx context.Context) *apis.FieldError { switch cm { - case RevisionRequestConcurrencyModelType(""), - RevisionRequestConcurrencyModelMulti, - RevisionRequestConcurrencyModelSingle: + case DeprecatedRevisionRequestConcurrencyModelType(""), + DeprecatedRevisionRequestConcurrencyModelMulti, + DeprecatedRevisionRequestConcurrencyModelSingle: return nil default: return apis.ErrInvalidValue(cm, apis.CurrentField) diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation_test.go new file mode 100644 index 0000000000..7362075688 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/revision_validation_test.go @@ -0,0 +1,887 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/config" + net "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestConcurrencyModelValidation(t *testing.T) { + tests := []struct { + name string + cm DeprecatedRevisionRequestConcurrencyModelType + want *apis.FieldError + }{{ + name: "single", + cm: DeprecatedRevisionRequestConcurrencyModelSingle, + want: nil, + }, { + name: "multi", + cm: DeprecatedRevisionRequestConcurrencyModelMulti, + want: nil, + }, { + name: "empty", + cm: "", + want: nil, + }, { + name: "bogus", + cm: "bogus", + want: apis.ErrInvalidValue("bogus", apis.CurrentField), + }, { + name: "balderdash", + cm: "balderdash", + want: apis.ErrInvalidValue("balderdash", apis.CurrentField), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cm.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestServingStateType(t *testing.T) { + tests := []struct { + name string + cm DeprecatedRevisionServingStateType + want *apis.FieldError + }{{ + name: "active", + cm: DeprecatedRevisionServingStateActive, + want: nil, + }, { + name: "reserve", + cm: DeprecatedRevisionServingStateReserve, + want: nil, + }, { + name: "retired", + cm: DeprecatedRevisionServingStateRetired, + want: nil, + }, { + name: "empty", + cm: "", + want: nil, + }, { + name: "bogus", + cm: "bogus", + want: apis.ErrInvalidValue("bogus", apis.CurrentField), + }, { + name: "balderdash", + cm: "balderdash", + want: apis.ErrInvalidValue("balderdash", apis.CurrentField), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.cm.Validate(context.Background()) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestRevisionSpecValidation(t *testing.T) { + tests := []struct { + name string + rs *RevisionSpec + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "valid", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + want: nil, + }, { + name: "invalid deprecated fields", + wc: apis.DisallowDeprecated, + rs: &RevisionSpec{ + DeprecatedGeneration: 123, + DeprecatedServingState: "Active", + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + DeprecatedConcurrencyModel: "Multi", + DeprecatedBuildName: "banana", + }, + want: apis.ErrDisallowedFields("buildName", "concurrencyModel", "container", + "generation", "servingState"), + }, { + name: "missing container", + rs: &RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + }, + }, + want: apis.ErrMissingOneOf("container", "containers"), + }, { + name: "more container", + rs: &RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + DeprecatedContainer: &corev1.Container{ + Name: "deprecatedContainer", + }, + }, + want: apis.ErrMultipleOneOf("container", "containers"), + }, { + name: "with ContainerConcurrency", + rs: &RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + ContainerConcurrency: ptr.Int64(10), + }, + }, + want: nil, + }, { + name: "with volume (ok)", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + }, + }, + want: nil, + }, { + name: "with volume name collision", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, { + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{}, + }, + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: fmt.Sprintf(`duplicate volume name "the-name"`), + Paths: []string{"name"}, + }).ViaFieldIndex("volumes", 1), + }, { + name: "has build ref (disallowed)", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + DeprecatedBuildRef: &corev1.ObjectReference{}, + }, + want: apis.ErrDisallowedFields("buildRef"), + }, { + name: "bad concurrency model", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + DeprecatedConcurrencyModel: "bogus", + }, + want: apis.ErrInvalidValue("bogus", "concurrencyModel"), + }, { + name: "bad container spec", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "steve", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + want: apis.ErrDisallowedFields("container.lifecycle"), + }, { + name: "exceed max timeout", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(6000), + }, + }, + want: apis.ErrOutOfBoundsValue(6000, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }, { + name: "exceed custom max timeout", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(100), + }, + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "25", + "max-revision-timeout-seconds": "50"}, + }) + return s.ToContext(ctx) + }, + want: apis.ErrOutOfBoundsValue(100, 0, 50, "timeoutSeconds"), + }, { + name: "provided zero timeout (ok)", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(0), + }, + }, + want: nil, + }, { + name: "negative timeout", + rs: &RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(-30), + }, + }, + want: apis.ErrOutOfBoundsValue(-30, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.rs.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestRevisionTemplateSpecValidation(t *testing.T) { + tests := []struct { + name string + rts *RevisionTemplateSpec + want *apis.FieldError + }{{ + name: "valid", + rts: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: nil, + }, { + name: "empty spec", + rts: &RevisionTemplateSpec{}, + want: apis.ErrMissingField("spec"), + }, { + name: "nested spec error", + rts: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "kevin", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.container.lifecycle"), + }, { + name: "has revision template name", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision name. + Name: "parent-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: nil, + }, { + name: "valid name for revision template", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // When user provides empty string in the name field it will behave like no name provided. + Name: "", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: nil, + }, { + name: "invalid name for revision template", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision name. + Name: "parent-@foo-bar", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "metadata.name"), + }, { + name: "invalid generate name for revision template", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision generate name. + GenerateName: "parent-@foo-bar", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "metadata.generateName"), + }, { + name: "Queue sidecar resource percentage annotation more than 100", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "200", + }, + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: (&apis.FieldError{ + Message: "expected 0.1 <= 200 <= 100", + Paths: []string{serving.QueueSideCarResourcePercentageAnnotation}, + }).ViaField("metadata.annotations"), + }, { + name: "Invalid queue sidecar resource percentage annotation", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "50mx", + }, + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: (&apis.FieldError{ + Message: "invalid value: 50mx", + Paths: []string{fmt.Sprintf("[%s]", serving.QueueSideCarResourcePercentageAnnotation)}, + }).ViaField("metadata.annotations"), + }, { + name: "invalid metadata.annotations for scale", + rts: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.MinScaleAnnotationKey: "5", + autoscaling.MaxScaleAnnotationKey: "", + }, + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: (&apis.FieldError{ + Message: "expected 1 <= <= 2147483647", + Paths: []string{autoscaling.MaxScaleAnnotationKey}, + }).ViaField("annotations").ViaField("metadata"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithinParent(context.Background(), metav1.ObjectMeta{ + Name: "parent", + }) + + got := test.rts.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestRevisionValidation(t *testing.T) { + tests := []struct { + name string + r *Revision + want *apis.FieldError + }{{ + name: "valid", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: nil, + }, { + name: "empty spec", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + }, + want: apis.ErrMissingField("spec"), + }, { + name: "nested spec error", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "kevin", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.container.lifecycle"), + }, { + name: "invalid name - dots and too long", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a" + strings.Repeat(".", 62) + "a", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid metadata.annotations - scale bounds", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "scale-bounds", + Annotations: map[string]string{ + autoscaling.MinScaleAnnotationKey: "5", + autoscaling.MaxScaleAnnotationKey: "2", + }, + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: (&apis.FieldError{ + Message: "maxScale=2 is less than minScale=5", + Paths: []string{autoscaling.MaxScaleAnnotationKey, autoscaling.MinScaleAnnotationKey}, + }).ViaField("annotations").ViaField("metadata"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if got, want := got.Error(), test.want.Error(); got != want { + t.Errorf("Validate got:\n%s, want:\n%s, diff:(-want, +got)=\n%v", got, want, cmp.Diff(got, want)) + } + }) + } +} + +func TestImmutableFields(t *testing.T) { + tests := []struct { + name string + new *Revision + old *Revision + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "good (no change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: nil, + }, { + // Test the case where max-revision-timeout is changed to a value + // that is less than an existing revision's timeout value. + // Existing revision should keep operating normally. + name: "good (max revision timeout change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(100), + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(100), + }, + }, + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "25", + "max-revision-timeout-seconds": "50"}, + }) + return s.ToContext(ctx) + }, + want: nil, + }, { + name: "bad (resources image change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("50m"), + }, + }, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("100m"), + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1alpha1.RevisionSpec}.DeprecatedContainer.Resources.Requests["cpu"]: + -: resource.Quantity: "{i:{value:100 scale:-3} d:{Dec:} s:100m Format:DecimalSI}" + +: resource.Quantity: "{i:{value:50 scale:-3} d:{Dec:} s:50m Format:DecimalSI}" +`, + }, + }, { + name: "bad (container image change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1alpha1.RevisionSpec}.DeprecatedContainer.Image: + -: "busybox" + +: "helloworld" +`, + }, + }, { + name: "bad (concurrency model change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + DeprecatedConcurrencyModel: "Multi", + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + DeprecatedConcurrencyModel: "Single", + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1alpha1.RevisionSpec}.DeprecatedConcurrencyModel: + -: "Single" + +: "Multi" +`, + }, + }, { + name: "bad (new field added)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "foobar", + }, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1alpha1.RevisionSpec}.RevisionSpec.PodSpec.ServiceAccountName: + -: "" + +: "foobar" +`, + }, + }, { + name: "bad (multiple changes)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "foobar", + }, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1alpha1.RevisionSpec}.RevisionSpec.PodSpec.ServiceAccountName: + -: "" + +: "foobar" +{v1alpha1.RevisionSpec}.DeprecatedContainer.Image: + -: "busybox" + +: "helloworld" +`, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.new.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestRevisionProtocolType(t *testing.T) { + tests := []struct { + p net.ProtocolType + want *apis.FieldError + }{{ + net.ProtocolH2C, nil, + }, { + net.ProtocolHTTP1, nil, + }, { + net.ProtocolType(""), apis.ErrMissingField(apis.CurrentField), + }, { + net.ProtocolType("token-ring"), apis.ErrInvalidValue("token-ring", apis.CurrentField), + }} + for _, test := range tests { + e := test.p.Validate(context.Background()) + if got, want := e.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Got = %v, want: %v, diff: %s", got, want, cmp.Diff(got, want)) + } + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion.go similarity index 82% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion.go index 35d6c2faee..f7fa0ba319 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion.go @@ -20,9 +20,11 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" ) // ConvertUp implements apis.Convertible @@ -38,8 +40,8 @@ func (source *Route) ConvertUp(ctx context.Context, obj apis.Convertible) error } // ConvertUp helps implement apis.Convertible -func (source *RouteSpec) ConvertUp(ctx context.Context, sink *v1beta1.RouteSpec) error { - sink.Traffic = make([]v1beta1.TrafficTarget, len(source.Traffic)) +func (source *RouteSpec) ConvertUp(ctx context.Context, sink *v1.RouteSpec) error { + sink.Traffic = make([]v1.TrafficTarget, len(source.Traffic)) for i := range source.Traffic { if err := source.Traffic[i].ConvertUp(ctx, &sink.Traffic[i]); err != nil { return err @@ -49,7 +51,7 @@ func (source *RouteSpec) ConvertUp(ctx context.Context, sink *v1beta1.RouteSpec) } // ConvertUp helps implement apis.Convertible -func (source *TrafficTarget) ConvertUp(ctx context.Context, sink *v1beta1.TrafficTarget) error { +func (source *TrafficTarget) ConvertUp(ctx context.Context, sink *v1.TrafficTarget) error { *sink = source.TrafficTarget switch { case source.Tag != "" && source.DeprecatedName != "": @@ -63,23 +65,26 @@ func (source *TrafficTarget) ConvertUp(ctx context.Context, sink *v1beta1.Traffi } // ConvertUp helps implement apis.Convertible -func (source *RouteStatus) ConvertUp(ctx context.Context, sink *v1beta1.RouteStatus) { +func (source *RouteStatus) ConvertUp(ctx context.Context, sink *v1.RouteStatus) { source.Status.ConvertTo(ctx, &sink.Status) source.RouteStatusFields.ConvertUp(ctx, &sink.RouteStatusFields) } // ConvertUp helps implement apis.Convertible -func (source *RouteStatusFields) ConvertUp(ctx context.Context, sink *v1beta1.RouteStatusFields) { +func (source *RouteStatusFields) ConvertUp(ctx context.Context, sink *v1.RouteStatusFields) { if source.URL != nil { sink.URL = source.URL.DeepCopy() } if source.Address != nil { - sink.Address = source.Address.Addressable.DeepCopy() + if sink.Address == nil { + sink.Address = &duckv1.Addressable{} + } + source.Address.ConvertUp(ctx, sink.Address) } - sink.Traffic = make([]v1beta1.TrafficTarget, len(source.Traffic)) + sink.Traffic = make([]v1.TrafficTarget, len(source.Traffic)) for i := range source.Traffic { source.Traffic[i].ConvertUp(ctx, &sink.Traffic[i]) } @@ -99,7 +104,7 @@ func (sink *Route) ConvertDown(ctx context.Context, obj apis.Convertible) error } // ConvertDown helps implement apis.Convertible -func (sink *RouteSpec) ConvertDown(ctx context.Context, source v1beta1.RouteSpec) { +func (sink *RouteSpec) ConvertDown(ctx context.Context, source v1.RouteSpec) { sink.Traffic = make([]TrafficTarget, len(source.Traffic)) for i := range source.Traffic { sink.Traffic[i].ConvertDown(ctx, source.Traffic[i]) @@ -107,27 +112,28 @@ func (sink *RouteSpec) ConvertDown(ctx context.Context, source v1beta1.RouteSpec } // ConvertDown helps implement apis.Convertible -func (sink *TrafficTarget) ConvertDown(ctx context.Context, source v1beta1.TrafficTarget) { +func (sink *TrafficTarget) ConvertDown(ctx context.Context, source v1.TrafficTarget) { sink.TrafficTarget = source } // ConvertDown helps implement apis.Convertible -func (sink *RouteStatus) ConvertDown(ctx context.Context, source v1beta1.RouteStatus) { +func (sink *RouteStatus) ConvertDown(ctx context.Context, source v1.RouteStatus) { source.Status.ConvertTo(ctx, &sink.Status) sink.RouteStatusFields.ConvertDown(ctx, source.RouteStatusFields) } // ConvertDown helps implement apis.Convertible -func (sink *RouteStatusFields) ConvertDown(ctx context.Context, source v1beta1.RouteStatusFields) { +func (sink *RouteStatusFields) ConvertDown(ctx context.Context, source v1.RouteStatusFields) { if source.URL != nil { sink.URL = source.URL.DeepCopy() } if source.Address != nil { - sink.Address = &duckv1alpha1.Addressable{ - Addressable: *source.Address, + if sink.Address == nil { + sink.Address = &duckv1alpha1.Addressable{} } + sink.Address.ConvertDown(ctx, source.Address) } sink.Traffic = make([]TrafficTarget, len(source.Traffic)) diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion_test.go new file mode 100644 index 0000000000..d879425534 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_conversion_test.go @@ -0,0 +1,326 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +func TestRouteConversionBadType(t *testing.T) { + good, bad := &Route{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} + +func TestRouteConversion(t *testing.T) { + tests := []struct { + name string + in *Route + wantErr bool + }{{ + name: "config name", + in: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(100), + }, + }}, + }, + Status: RouteStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00001", + Percent: ptr.Int64(100), + }, + }}, + Address: &duckv1alpha1.Addressable{ + Addressable: duckv1beta1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: "hostname.com", + }, + }, + }, + URL: &apis.URL{ + Scheme: "http", + Host: "hostname.com", + }, + // TODO(mattmoor): Domain is emptied + // TODO(mattmoor): DomainInternal is emptied + }, + }, + }, + }, { + name: "revision name", + in: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 2, + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00002", + Percent: ptr.Int64(100), + }, + }}, + }, + Status: RouteStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00001", + Percent: ptr.Int64(100), + }, + }}, + Address: &duckv1alpha1.Addressable{ + Addressable: duckv1beta1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: "hostname.com", + }, + }, + }, + URL: &apis.URL{ + Scheme: "http", + Host: "hostname.com", + }, + // TODO(mattmoor): Domain is emptied + // TODO(mattmoor): DomainInternal is emptied + }, + }, + }, + }, { + name: "release", + in: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 3, + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00001", + Percent: ptr.Int64(90), + Tag: "current", + }, + }, { + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00002", + Percent: ptr.Int64(10), + Tag: "candidate", + }, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: nil, + Tag: "latest", + }, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(0), + Tag: "latest2", + }, + }}, + }, + Status: RouteStatus{ + Status: duckv1.Status{ + ObservedGeneration: 3, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00001", + Percent: ptr.Int64(90), + Tag: "current", + URL: &apis.URL{ + Scheme: "http", + Host: "current.foo.bar", + }, + }, + }, { + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00002", + Percent: ptr.Int64(10), + Tag: "candidate", + URL: &apis.URL{ + Scheme: "http", + Host: "candidate.foo.bar", + }, + }, + }, { + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00003", + Percent: nil, + Tag: "latest", + URL: &apis.URL{ + Scheme: "http", + Host: "latest.foo.bar", + }, + }, + }, { + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00003", + Percent: ptr.Int64(0), + Tag: "latest2", + URL: &apis.URL{ + Scheme: "http", + Host: "latest2.foo.bar", + }, + }, + }}, + // TODO(mattmoor): Addressable + // TODO(mattmoor): Domain + // TODO(mattmoor): DomainInternal + }, + }, + }, + }, { + name: "name and tag", + in: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 3, + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "candidate", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00001", + Percent: ptr.Int64(100), + Tag: "current", + }, + }}, + }, + Status: RouteStatus{ + Status: duckv1.Status{ + ObservedGeneration: 3, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{}, + }, + }, + }, + wantErr: true, + }} + + toDeprecated := func(in *Route) *Route { + out := in.DeepCopy() + for idx := range out.Spec.Traffic { + out.Spec.Traffic[idx].DeprecatedName = out.Spec.Traffic[idx].Tag + out.Spec.Traffic[idx].Tag = "" + } + for idx := range out.Status.Traffic { + out.Status.Traffic[idx].DeprecatedName = out.Status.Traffic[idx].Tag + out.Status.Traffic[idx].Tag = "" + } + return out + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + beta := &v1beta1.Route{} + if err := test.in.ConvertUp(context.Background(), beta); err != nil { + if !test.wantErr { + t.Errorf("ConvertUp() = %v", err) + } + return + } else if test.wantErr { + t.Errorf("ConvertUp() = %#v, wanted error", beta) + } + got := &Route{} + if err := got.ConvertDown(context.Background(), beta); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + + // A variant of the test that uses `name:`, + // but end up with what we have above anyways. + t.Run(test.name+" (deprecated)", func(t *testing.T) { + if test.wantErr { + t.Skip("skipping error rows") + } + start := toDeprecated(test.in) + beta := &v1beta1.Route{} + if err := start.ConvertUp(context.Background(), beta); err != nil { + t.Errorf("ConvertUp() = %v", err) + } + got := &Route{} + if err := got.ConvertDown(context.Background(), beta); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults.go similarity index 61% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults.go index 21f55bad1b..fab2d8ce49 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults.go @@ -19,29 +19,38 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/ptr" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) func (r *Route) SetDefaults(ctx context.Context) { r.Spec.SetDefaults(apis.WithinSpec(ctx)) + if r.GetOwnerReferences() == nil { + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Route).Spec, r.Spec, r) + } else { + serving.SetUserInfo(ctx, nil, r.Spec, r) + } + } + } func (rs *RouteSpec) SetDefaults(ctx context.Context) { - if v1beta1.IsUpgradeViaDefaulting(ctx) { - beta := v1beta1.RouteSpec{} - if rs.ConvertUp(ctx, &beta) == nil { + if v1.IsUpgradeViaDefaulting(ctx) { + v := v1.RouteSpec{} + if rs.ConvertUp(ctx, &v) == nil { alpha := RouteSpec{} - alpha.ConvertDown(ctx, beta) + alpha.ConvertDown(ctx, v) *rs = alpha } } - if len(rs.Traffic) == 0 && v1beta1.HasDefaultConfigurationName(ctx) { + if len(rs.Traffic) == 0 && v1.HasDefaultConfigurationName(ctx) { rs.Traffic = []TrafficTarget{{ - TrafficTarget: v1beta1.TrafficTarget{ - Percent: 100, + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), LatestRevision: ptr.Bool(true), }, }} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults_test.go new file mode 100644 index 0000000000..f4f77be94e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_defaults_test.go @@ -0,0 +1,325 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestRouteDefaulting(t *testing.T) { + tests := []struct { + name string + in *Route + want *Route + wc func(context.Context) context.Context + }{{ + name: "simple", + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + }, + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + }}, + }, + }, + }, { + name: "lemonade", + wc: v1.WithUpgradeViaDefaulting, + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + }, + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "foo", + ConfigurationName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + }}, + }, + }, + }, { + name: "lemonade (conflict)", + wc: v1.WithUpgradeViaDefaulting, + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(50), + }, + }, { + DeprecatedName: "baz", + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + }, + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + }, { + DeprecatedName: "baz", + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + }}, + }, + }, + }, { + name: "lemonade (collision)", + wc: v1.WithUpgradeViaDefaulting, + in: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "bar", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + }, + }}, + }, + }, + want: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + ConfigurationName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("SetDefaults (-want, +got) = %v", diff) + } + }) + } +} + +func TestRouteUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + withUserAnns := func(u1, u2 string, s *Route) *Route { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + tests := []struct { + name string + user string + this *Route + prev *Route + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Route{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Route{}, + prev: &Route{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Route{}), + prev: withUserAnns(u1, u1, &Route{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new", + }, + }}, + }, + }, + prev: &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "old", + }, + }}, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new", + }, + }}, + }, + }), + prev: withUserAnns(u1, u2, &Route{ + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "old", + }, + }}, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go index 6b1fa92106..401e18791f 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle.go @@ -22,9 +22,9 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/serving/pkg/apis/networking/v1alpha1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/serving/pkg/apis/networking/v1alpha1" ) var routeCondSet = apis.NewLivingConditionSet( @@ -152,9 +152,9 @@ func (rs *RouteStatus) MarkCertificateNotOwned(name string) { }) } -// PropagateClusterIngressStatus update RouteConditionIngressReady condition +// PropagateIngressStatus update RouteConditionIngressReady condition // in RouteStatus according to IngressStatus. -func (rs *RouteStatus) PropagateClusterIngressStatus(cs v1alpha1.IngressStatus) { +func (rs *RouteStatus) PropagateIngressStatus(cs v1alpha1.IngressStatus) { cc := cs.GetCondition(v1alpha1.IngressConditionReady) if cc == nil { rs.MarkIngressNotConfigured() @@ -170,6 +170,6 @@ func (rs *RouteStatus) PropagateClusterIngressStatus(cs v1alpha1.IngressStatus) } } -func (rs *RouteStatus) duck() *duckv1beta1.Status { +func (rs *RouteStatus) duck() *duckv1.Status { return &rs.Status } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle_test.go new file mode 100644 index 0000000000..9e11a4c63e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_lifecycle_test.go @@ -0,0 +1,385 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + apitestv1 "knative.dev/pkg/apis/testing/v1" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +func TestRouteDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }, { + name: "addressable", + t: &duckv1alpha1.Addressable{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Route{}, test.t) + if err != nil { + t.Errorf("VerifyType(Route, %T) = %v", test.t, err) + } + }) + } +} + +func TestRouteIsReady(t *testing.T) { + cases := []struct { + name string + status RouteStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: RouteStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionAllTrafficAssigned, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: RouteStatus{ + Status: duckv1.Status{ + + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status should be ready", + status: RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionAllTrafficAssigned, + Status: corev1.ConditionTrue, + }, { + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status false should not be ready", + status: RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionAllTrafficAssigned, + Status: corev1.ConditionTrue, + }, { + Type: RouteConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if e, a := tc.isReady, tc.status.IsReady(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + }) + } +} + +func TestTypicalRouteFlow(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkTrafficAssigned() + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: netv1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionReady, t) +} + +func TestTrafficNotAssignedFlow(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkMissingTrafficTarget("Revision", "does-not-exist") + apitestv1.CheckConditionFailed(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionReady, t) +} + +func TestTargetConfigurationNotYetReadyFlow(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkConfigurationNotReady("i-have-no-ready-revision") + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) +} + +func TestUnknownErrorWhenConfiguringTraffic(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkUnknownTrafficError("unknown-error") + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) +} + +func TestTargetConfigurationFailedToBeReadyFlow(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkConfigurationFailed("permanently-failed") + apitestv1.CheckConditionFailed(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionReady, t) +} + +func TestTargetRevisionNotYetReadyFlow(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkRevisionNotReady("not-yet-ready") + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) +} + +func TestTargetRevisionFailedToBeReadyFlow(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkRevisionFailed("cannot-find-image") + apitestv1.CheckConditionFailed(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionReady, t) +} + +func TestIngressFailureRecovery(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: netv1alpha1.IngressConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + // Empty IngressStatus marks ingress "NotConfigured" + r.PropagateIngressStatus(netv1alpha1.IngressStatus{}) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkTrafficAssigned() + r.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: netv1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionReady, t) + + r.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: netv1alpha1.IngressConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionReady, t) + + r.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: netv1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionReady, t) +} + +func TestRouteNotOwnedStuff(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: netv1alpha1.IngressConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }) + + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionReady, t) + + r.MarkServiceNotOwned("evan") + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionAllTrafficAssigned, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionIngressReady, t) + apitestv1.CheckConditionFailed(r.duck(), RouteConditionReady, t) +} + +func TestRouteGetGroupVersionKind(t *testing.T) { + r := &Route{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1alpha1", + Kind: "Route", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestCertificateReady(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.MarkCertificateReady("cert") + + apitestv1.CheckConditionSucceeded(r.duck(), RouteConditionCertificateProvisioned, t) +} + +func TestCertificateNotReady(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.MarkCertificateNotReady("cert") + + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionCertificateProvisioned, t) +} + +func TestCertificateProvisionFailed(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.MarkCertificateProvisionFailed("cert") + + apitestv1.CheckConditionFailed(r.duck(), RouteConditionCertificateProvisioned, t) +} + +func TestRouteNotOwnCertificate(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.MarkCertificateNotOwned("cert") + + apitestv1.CheckConditionFailed(r.duck(), RouteConditionCertificateProvisioned, t) +} + +func TestIngressNotConfigured(t *testing.T) { + r := &RouteStatus{} + r.InitializeConditions() + r.MarkIngressNotConfigured() + + apitestv1.CheckConditionOngoing(r.duck(), RouteConditionIngressReady, t) +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_types.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_types.go index b0d5559df2..da51e92a82 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_types.go @@ -19,12 +19,12 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/knative/pkg/apis" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + "knative.dev/pkg/kmeta" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) // +genclient @@ -70,9 +70,9 @@ type TrafficTarget struct { // +optional DeprecatedName string `json:"name,omitempty"` - // We inherit most of our fields by inlining the v1beta1 type. - // Ultimately all non-v1beta1 fields will be deprecated. - v1beta1.TrafficTarget `json:",inline"` + // We inherit most of our fields by inlining the v1 type. + // Ultimately all non-v1 fields will be deprecated. + v1.TrafficTarget `json:",inline"` } // RouteSpec holds the desired state of the Route (from the client). @@ -104,7 +104,7 @@ const ( RouteConditionAllTrafficAssigned apis.ConditionType = "AllTrafficAssigned" // RouteConditionIngressReady is set to False when the - // ClusterIngress fails to become Ready. + // Ingress fails to become Ready. RouteConditionIngressReady apis.ConditionType = "IngressReady" // RouteConditionCertificateProvisioned is set to False when the @@ -112,7 +112,7 @@ const ( RouteConditionCertificateProvisioned apis.ConditionType = "CertificateProvisioned" ) -// RouteStatusFields holds all of the non-duckv1beta1.Status status fields of a Route. +// RouteStatusFields holds all of the non-duckv1.Status status fields of a Route. // These are defined outline so that we can also inline them into Service, and more easily // copy them. type RouteStatusFields struct { @@ -147,7 +147,7 @@ type RouteStatusFields struct { // RouteStatus communicates the observed state of the Route (from the controller). type RouteStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` RouteStatusFields `json:",inline"` } diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation.go similarity index 80% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation.go index 99f77815d6..025457b7e3 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/route_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation.go @@ -20,14 +20,24 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" ) func (r *Route) Validate(ctx context.Context) *apis.FieldError { errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).ViaField("metadata") errs = errs.Also(r.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Route) + // Don't validate annotations(creator and lastModifier) when route owned by service + // validate only when route created independently. + if r.OwnerReferences == nil { + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, r.Spec, original.GetAnnotations(), + r.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + } + } return errs } @@ -45,12 +55,14 @@ func (rs *RouteSpec) Validate(ctx context.Context) *apis.FieldError { // Track the targets of named TrafficTarget entries (to detect duplicates). trafficMap := make(map[string]diagnostic) - percentSum := 0 + percentSum := int64(0) for i, tt := range rs.Traffic { - // Delegate to the v1beta1 validation. + // Delegate to the v1 validation. errs = errs.Also(tt.TrafficTarget.Validate(ctx).ViaFieldIndex("traffic", i)) - percentSum += tt.Percent + if tt.Percent != nil { + percentSum += *tt.Percent + } if tt.DeprecatedName != "" && tt.Tag != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "tag"). diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation_test.go new file mode 100644 index 0000000000..e1418f6b2b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/route_validation_test.go @@ -0,0 +1,669 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestRouteValidation(t *testing.T) { + tests := []struct { + name string + r *Route + want *apis.FieldError + }{{ + name: "valid", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }, + }}, + }, + }, + want: nil, + }, { + name: "valid split", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "prod", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(90), + }, + }, { + DeprecatedName: "experiment", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "bar", + Percent: ptr.Int64(10), + }, + }}, + }, + }, + want: nil, + }, { + name: "invalid traffic entry", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }}, + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{ + "spec.traffic[0].configurationName", + "spec.traffic[0].revisionName", + }, + }, + }, { + name: "invalid name - dots", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }, + }}, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid name - dots and spec percent is not 100", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(90), + }, + }}, + }, + }, + want: (&apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }).Also(&apis.FieldError{ + Message: "Traffic targets sum to 90, want 100", + Paths: []string{"spec.traffic"}, + }), + }, { + name: "invalid name - too long", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("a", 64), + }, + Spec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }, + }}, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters]", + Paths: []string{"metadata.name"}, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.r.Validate(context.Background()).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestRouteSpecValidation(t *testing.T) { + multipleDefinitionError := &apis.FieldError{ + Message: `Multiple definitions for "foo"`, + Paths: []string{"traffic[0].name", "traffic[1].name"}, + } + tests := []struct { + name string + rs *RouteSpec + want *apis.FieldError + }{{ + name: "valid", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }, + }}, + }, + want: nil, + }, { + name: "valid split", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "prod", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(90), + }, + }, { + DeprecatedName: "experiment", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "bar", + Percent: ptr.Int64(10), + }, + }}, + }, + want: nil, + }, { + name: "empty spec", + rs: &RouteSpec{}, + want: apis.ErrMissingField(apis.CurrentField), + }, { + name: "invalid traffic entry", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }}, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{ + "traffic[0].configurationName", + "traffic[0].revisionName", + }, + }, + }, { + name: "invalid revision name", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "b@r", + Percent: ptr.Int64(100), + }, + }}, + }, + want: &apis.FieldError{ + Message: `invalid key name "b@r"`, + Paths: []string{"traffic[0].revisionName"}, + Details: `name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`, + }, + }, { + name: "invalid revision name", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "f**", + Percent: ptr.Int64(100), + }, + }}, + }, + want: &apis.FieldError{ + Message: `invalid key name "f**"`, + Paths: []string{"traffic[0].configurationName"}, + Details: `name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`, + }, + }, { + name: "invalid name conflict", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(50), + }, + }, { + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "baz", + Percent: ptr.Int64(50), + }, + }}, + }, + want: multipleDefinitionError, + }, { + name: "collision (same revision)", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(50), + }, + }, { + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(50), + }, + }}, + }, + want: multipleDefinitionError, + }, { + name: "collision (same config)", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "bar", + Percent: ptr.Int64(50), + }, + }, { + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "bar", + Percent: ptr.Int64(50), + }, + }}, + }, + want: multipleDefinitionError, + }, { + name: "invalid total percentage", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(99), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + RevisionName: "baz", + Percent: ptr.Int64(99), + }, + }}, + }, + want: &apis.FieldError{ + Message: "Traffic targets sum to 198, want 100", + Paths: []string{"traffic"}, + }, + }, { + name: "multiple names", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(100), + }, + }}, + }, + want: apis.ErrMultipleOneOf("traffic[0].name", "traffic[0].tag"), + }, { + name: "conflicting with different names", + rs: &RouteSpec{ + Traffic: []TrafficTarget{{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(50), + }, + }}, + }, + want: &apis.FieldError{ + Message: `Multiple definitions for "foo"`, + Paths: []string{ + "traffic[0].name", + "traffic[1].tag", + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.rs.Validate(context.Background()).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestTrafficTargetValidation(t *testing.T) { + tests := []struct { + name string + tt *TrafficTarget + want *apis.FieldError + }{{ + name: "valid with name and revision", + tt: &TrafficTarget{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + }, + want: nil, + }, { + name: "valid with name and configuration", + tt: &TrafficTarget{ + DeprecatedName: "baz", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blah", + Percent: ptr.Int64(37), + }, + }, + want: nil, + }, { + name: "valid with no percent", + tt: &TrafficTarget{ + DeprecatedName: "ooga", + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "booga", + }, + }, + want: nil, + }, { + name: "valid with no name", + tt: &TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "booga", + Percent: ptr.Int64(100), + }, + }, + want: nil, + }, { + name: "invalid with both", + tt: &TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + ConfigurationName: "bar", + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got both", + Paths: []string{"revisionName", "configurationName"}, + }, + }, { + name: "invalid with neither", + tt: &TrafficTarget{ + DeprecatedName: "foo", + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{"revisionName", "configurationName"}, + }, + }, { + name: "invalid percent too low", + tt: &TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(-5), + }, + }, + want: apis.ErrOutOfBoundsValue(-5, 0, 100, "percent"), + }, { + name: "invalid percent too high", + tt: &TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(101), + }, + }, + want: apis.ErrOutOfBoundsValue(101, 0, 100, "percent"), + }, { + name: "disallowed url set", + tt: &TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(100), + URL: &apis.URL{ + Host: "should.not.be.set", + }, + }, + }, + want: apis.ErrDisallowedFields("url"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.tt.Validate(context.Background()).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getRouteSpec(confName string) RouteSpec { + return RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + ConfigurationName: confName, + }, + }}, + } +} + +func TestRouteAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Route + this *Route + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update creator annotation with spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("new"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier without spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier with spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getRouteSpec("new"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }, { + name: "no validation for lastModifier annotation even after update without spec changes as route owned by service", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1alpha1", + Kind: serving.GroupName, + }}, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }, { + name: "no validation for creator annotation even after update as route owned by service", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u3, + serving.UpdaterAnnotation: u1, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1alpha1", + Kind: serving.GroupName, + }}, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion.go similarity index 76% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion.go index c95dba1a11..c4a799a18e 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion.go @@ -20,9 +20,10 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" - "github.com/knative/pkg/ptr" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" ) // ConvertUp implements apis.Convertible @@ -34,18 +35,24 @@ func (source *Service) ConvertUp(ctx context.Context, obj apis.Convertible) erro return err } return source.Status.ConvertUp(ctx, &sink.Status) + case *v1.Service: + sink.ObjectMeta = source.ObjectMeta + if err := source.Spec.ConvertUp(ctx, &sink.Spec); err != nil { + return err + } + return source.Status.ConvertUp(ctx, &sink.Status) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertUp helps implement apis.Convertible -func (source *ServiceSpec) ConvertUp(ctx context.Context, sink *v1beta1.ServiceSpec) error { +func (source *ServiceSpec) ConvertUp(ctx context.Context, sink *v1.ServiceSpec) error { switch { case source.DeprecatedRunLatest != nil: - sink.RouteSpec = v1beta1.RouteSpec{ - Traffic: []v1beta1.TrafficTarget{{ - Percent: 100, + sink.RouteSpec = v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), LatestRevision: ptr.Bool(true), }}, } @@ -53,29 +60,29 @@ func (source *ServiceSpec) ConvertUp(ctx context.Context, sink *v1beta1.ServiceS case source.DeprecatedRelease != nil: if len(source.DeprecatedRelease.Revisions) == 2 { - sink.RouteSpec = v1beta1.RouteSpec{ - Traffic: []v1beta1.TrafficTarget{{ + sink.RouteSpec = v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ RevisionName: source.DeprecatedRelease.Revisions[0], - Percent: 100 - source.DeprecatedRelease.RolloutPercent, + Percent: ptr.Int64(int64(100 - source.DeprecatedRelease.RolloutPercent)), Tag: "current", }, { RevisionName: source.DeprecatedRelease.Revisions[1], - Percent: source.DeprecatedRelease.RolloutPercent, + Percent: ptr.Int64(int64(source.DeprecatedRelease.RolloutPercent)), Tag: "candidate", }, { - Percent: 0, + Percent: nil, Tag: "latest", LatestRevision: ptr.Bool(true), }}, } } else { - sink.RouteSpec = v1beta1.RouteSpec{ - Traffic: []v1beta1.TrafficTarget{{ + sink.RouteSpec = v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ RevisionName: source.DeprecatedRelease.Revisions[0], - Percent: 100, + Percent: ptr.Int64(100), Tag: "current", }, { - Percent: 0, + Percent: nil, Tag: "latest", LatestRevision: ptr.Bool(true), }}, @@ -90,10 +97,10 @@ func (source *ServiceSpec) ConvertUp(ctx context.Context, sink *v1beta1.ServiceS return source.DeprecatedRelease.Configuration.ConvertUp(ctx, &sink.ConfigurationSpec) case source.DeprecatedPinned != nil: - sink.RouteSpec = v1beta1.RouteSpec{ - Traffic: []v1beta1.TrafficTarget{{ + sink.RouteSpec = v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ RevisionName: source.DeprecatedPinned.RevisionName, - Percent: 100, + Percent: ptr.Int64(100), }}, } return source.DeprecatedPinned.Configuration.ConvertUp(ctx, &sink.ConfigurationSpec) @@ -108,7 +115,7 @@ func (source *ServiceSpec) ConvertUp(ctx context.Context, sink *v1beta1.ServiceS } // ConvertUp helps implement apis.Convertible -func (source *ServiceStatus) ConvertUp(ctx context.Context, sink *v1beta1.ServiceStatus) error { +func (source *ServiceStatus) ConvertUp(ctx context.Context, sink *v1.ServiceStatus) error { source.Status.ConvertTo(ctx, &sink.Status) source.RouteStatusFields.ConvertUp(ctx, &sink.RouteStatusFields) @@ -124,19 +131,25 @@ func (sink *Service) ConvertDown(ctx context.Context, obj apis.Convertible) erro return err } return sink.Status.ConvertDown(ctx, source.Status) + case *v1.Service: + sink.ObjectMeta = source.ObjectMeta + if err := sink.Spec.ConvertDown(ctx, source.Spec); err != nil { + return err + } + return sink.Status.ConvertDown(ctx, source.Status) default: return fmt.Errorf("unknown version, got: %T", source) } } // ConvertDown helps implement apis.Convertible -func (sink *ServiceSpec) ConvertDown(ctx context.Context, source v1beta1.ServiceSpec) error { +func (sink *ServiceSpec) ConvertDown(ctx context.Context, source v1.ServiceSpec) error { sink.RouteSpec.ConvertDown(ctx, source.RouteSpec) return sink.ConfigurationSpec.ConvertDown(ctx, source.ConfigurationSpec) } // ConvertDown helps implement apis.Convertible -func (sink *ServiceStatus) ConvertDown(ctx context.Context, source v1beta1.ServiceStatus) error { +func (sink *ServiceStatus) ConvertDown(ctx context.Context, source v1.ServiceStatus) error { source.Status.ConvertTo(ctx, &sink.Status) sink.RouteStatusFields.ConvertDown(ctx, source.RouteStatusFields) diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion_test.go new file mode 100644 index 0000000000..9ecda37b5c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_conversion_test.go @@ -0,0 +1,706 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +func TestServiceConversionBadType(t *testing.T) { + good, bad := &Service{}, &Revision{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} + +func TestServiceConversion(t *testing.T) { + versions := []apis.Convertible{&v1.Service{}, &v1beta1.Service{}} + + tests := []struct { + name string + in *Service + }{{ + name: "simple conversion", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }}, + }, + }, + Status: ServiceStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestCreatedRevisionName: "foo-00002", + LatestReadyRevisionName: "foo-00002", + }, + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: ptr.Int64(100), + RevisionName: "foo-00001", + LatestRevision: ptr.Bool(true), + }, + }}, + }, + }, + }, + }} + + for _, test := range tests { + for _, version := range versions { + t.Run(test.name, func(t *testing.T) { + ver := version + if err := test.in.ConvertUp(context.Background(), ver); err != nil { + t.Errorf("ConvertUp() = %v", err) + } + t.Logf("ConvertUp() = %#v", ver) + got := &Service{} + if err := got.ConvertDown(context.Background(), ver); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + t.Logf("ConvertDown() = %#v", got) + if diff := cmp.Diff(test.in, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + } + } +} + +func TestServiceConversionFromDeprecated(t *testing.T) { + status := ServiceStatus{ + Status: duckv1.Status{ + ObservedGeneration: 1, + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ConfigurationStatusFields: ConfigurationStatusFields{ + LatestCreatedRevisionName: "foo-00002", + LatestReadyRevisionName: "foo-00002", + }, + RouteStatusFields: RouteStatusFields{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + RevisionName: "foo-00001", + }, + }}, + }, + } + + versions := []apis.Convertible{&v1.Service{}, &v1beta1.Service{}} + tests := []struct { + name string + in *Service + want *Service + badField string + }{{ + name: "run latest", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + }, + }, + }, + }, + }, + Status: status, + }, + want: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }}, + }, + }, + Status: status, + }, + }, { + name: "release single", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo-00001"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + }, + }, + }, + }, + }, + Status: status, + }, + want: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: "foo-00001", + Percent: ptr.Int64(100), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + LatestRevision: ptr.Bool(true), + Percent: nil, + }, + }}, + }, + }, + Status: status, + }, + }, { + name: "release double", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo-00001", "foo-00002"}, + RolloutPercent: 22, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + }, + }, + }, + }, + }, + Status: status, + }, + want: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: "foo-00001", + Percent: ptr.Int64(78), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + RevisionName: "foo-00002", + Percent: ptr.Int64(22), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + LatestRevision: ptr.Bool(true), + Percent: nil, + }, + }}, + }, + }, + Status: status, + }, + }, { + name: "release double w/ @latest", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo-00001", "@latest"}, + RolloutPercent: 37, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + }, + }, + }, + }, + }, + Status: status, + }, + want: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: "foo-00001", + Percent: ptr.Int64(63), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(37), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + LatestRevision: ptr.Bool(true), + Percent: nil, + }, + }}, + }, + }, + Status: status, + }, + }, { + name: "pinned", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "foo-00001", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }, + }, + }, + }, + }, + }, + Status: status, + }, + want: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "robocop", + Containers: []corev1.Container{{ + Image: "busybox", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + TimeoutSeconds: ptr.Int64(18), + ContainerConcurrency: ptr.Int64(53), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foo-00001", + Percent: ptr.Int64(100), + }, + }}, + }, + }, + Status: status, + }, + }, { + name: "manual", + in: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "asdf", + Namespace: "blah", + Generation: 1, + }, + Spec: ServiceSpec{ + DeprecatedManual: &ManualType{}, + }, + Status: status, + }, + badField: "manual", + }} + + for _, test := range tests { + for _, version := range versions { + t.Run(test.name, func(t *testing.T) { + ver := version + if err := test.in.ConvertUp(context.Background(), ver); err != nil { + if test.badField != "" { + cce, ok := err.(*CannotConvertError) + if ok && cce.Field == test.badField { + return + } + } + t.Errorf("ConvertUp() = %v", err) + } + t.Logf("ConvertUp() = %#v", ver) + got := &Service{} + if err := got.ConvertDown(context.Background(), ver); err != nil { + t.Errorf("ConvertDown() = %v", err) + } + t.Logf("ConvertDown() = %#v", got) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("roundtrip (-want, +got) = %v", diff) + } + }) + } + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults.go similarity index 59% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_defaults.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults.go index d42493fbbf..8e7b77c155 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_defaults.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults.go @@ -19,43 +19,28 @@ package v1alpha1 import ( "context" - "github.com/knative/pkg/apis" - "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) func (s *Service) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, s.ObjectMeta) s.Spec.SetDefaults(apis.WithinSpec(ctx)) - - if ui := apis.GetUserInfo(ctx); ui != nil { - ans := s.GetAnnotations() - if ans == nil { - ans = map[string]string{} - defer s.SetAnnotations(ans) - } - - if apis.IsInUpdate(ctx) { - old := apis.GetBaseline(ctx).(*Service) - if equality.Semantic.DeepEqual(old.Spec, s.Spec) { - return - } - ans[serving.UpdaterAnnotation] = ui.Username - } else { - ans[serving.CreatorAnnotation] = ui.Username - ans[serving.UpdaterAnnotation] = ui.Username - } + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Service).Spec, s.Spec, s) + } else { + serving.SetUserInfo(ctx, nil, s.Spec, s) } } func (ss *ServiceSpec) SetDefaults(ctx context.Context) { - if v1beta1.IsUpgradeViaDefaulting(ctx) { - beta := v1beta1.ServiceSpec{} - if ss.ConvertUp(ctx, &beta) == nil { + if v1.IsUpgradeViaDefaulting(ctx) { + v := v1.ServiceSpec{} + if ss.ConvertUp(ctx, &v) == nil { alpha := ServiceSpec{} - if alpha.ConvertDown(ctx, beta) == nil { + if alpha.ConvertDown(ctx, v) == nil { *ss = alpha } } @@ -70,6 +55,6 @@ func (ss *ServiceSpec) SetDefaults(ctx context.Context) { } else if ss.DeprecatedManual != nil { } else { ss.ConfigurationSpec.SetDefaults(ctx) - ss.RouteSpec.SetDefaults(v1beta1.WithDefaultConfigurationName(ctx)) + ss.RouteSpec.SetDefaults(v1.WithDefaultConfigurationName(ctx)) } } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults_test.go new file mode 100644 index 0000000000..fb43391352 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_defaults_test.go @@ -0,0 +1,890 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/ptr" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestServiceDefaulting(t *testing.T) { + tests := []struct { + name string + in *Service + want *Service + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Service{}, + // When nothing is provided, we still get the "run latest" inline RouteSpec. + want: &Service{ + Spec: ServiceSpec{ + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + }, { + name: "manual", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedManual: &ManualType{}, + }, + }, + // DeprecatedManual does not take a configuration so do nothing + want: &Service{ + Spec: ServiceSpec{ + DeprecatedManual: &ManualType{}, + }, + }, + }, { + name: "run latest", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, + }, + }, { + name: "run latest (lemonade)", + wc: v1.WithUpgradeViaDefaulting, + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + }, { + name: "run latest - no overwrite", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + }, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, + }, + }, { + name: "pinned", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, + }, + }, { + name: "pinned (lemonade)", + wc: v1.WithUpgradeViaDefaulting, + in: &Service{ + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "asdf", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "asdf", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + }}, + }, + }, + }, + }, { + name: "pinned - no overwrite", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, + }, + }, { + name: "release", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo", "bar"}, + RolloutPercent: 43, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo", "bar"}, + RolloutPercent: 43, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, + }, + }, { + name: "release (double, lemonade)", + wc: v1.WithUpgradeViaDefaulting, + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo", "bar"}, + RolloutPercent: 43, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + Percent: ptr.Int64(57), + RevisionName: "foo", + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + Percent: ptr.Int64(43), + RevisionName: "bar", + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: nil, + LatestRevision: ptr.Bool(true), + }, + }}, + }, + }, + }, + }, { + name: "release (double, @latest, lemonade)", + wc: v1.WithUpgradeViaDefaulting, + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo", "@latest"}, + RolloutPercent: 43, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + Percent: ptr.Int64(57), + RevisionName: "foo", + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + Percent: ptr.Int64(43), + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: nil, + LatestRevision: ptr.Bool(true), + }, + }}, + }, + }, + }, + }, { + name: "release (single, lemonade)", + wc: v1.WithUpgradeViaDefaulting, + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + Percent: ptr.Int64(100), + RevisionName: "foo", + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: nil, + LatestRevision: ptr.Bool(true), + }, + }}, + }, + }, + }, + }, { + name: "release - no overwrite", + in: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{}, + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + }, + }, + }, + }, { + name: "inline defaults to run latest", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "blah", + }}, + }, + }, + }, + }, + }, + // No RouteSpec should get defaulted to "run latest" + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "blah", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + }, { + name: "inline with empty RevisionSpec", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{}, + }, + // No RouteSpec should get defaulted to "run latest" + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + DeprecatedContainer: &corev1.Container{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + }, { + name: "inline defaults to run latest (non-nil traffic)", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "blah", + }}, + }, + }, + }, + }, + }, + // No RouteSpec should get defaulted to "run latest" + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{}, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "blah", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + }, { + name: "inline with just percent", + in: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "blah", + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + want: &Service{ + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "blah", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if diff := cmp.Diff(test.want, got, ignoreUnexportedResources); diff != "" { + t.Errorf("SetDefaults (-want, +got) = %v", diff) + } + }) + } +} + +func TestAnnotateUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + + withUserAnns := func(u1, u2 string, s *Service) *Service { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + + tests := []struct { + name string + user string + this *Service + prev *Service + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Service{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Service{}, + prev: &Service{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Service{}), + prev: withUserAnns(u1, u1, &Service{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{}, + }, + }, + prev: &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{}, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Service{ + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{}, + }, + }), + prev: withUserAnns(u1, u2, &Service{ + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{}, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } + +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go index b1fc9210d0..168b5fb4cc 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle.go @@ -22,8 +22,8 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" ) var serviceCondSet = apis.NewLivingConditionSet( @@ -156,6 +156,6 @@ func (ss *ServiceStatus) PropagateRouteStatus(rs *RouteStatus) { } } -func (ss *ServiceStatus) duck() *duckv1beta1.Status { +func (ss *ServiceStatus) duck() *duckv1.Status { return &ss.Status } diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle_test.go new file mode 100644 index 0000000000..1156c86838 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_lifecycle_test.go @@ -0,0 +1,614 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + apitestv1 "knative.dev/pkg/apis/testing/v1" + "knative.dev/pkg/ptr" + + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestServiceDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }, { + name: "addressable", + t: &duckv1alpha1.Addressable{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Service{}, test.t) + if err != nil { + t.Errorf("VerifyType(Service, %T) = %v", test.t, err) + } + }) + } +} + +func TestServiceGeneration(t *testing.T) { + service := Service{} + if got, want := service.GetGeneration(), int64(0); got != want { + t.Errorf("Empty Service generation should be %d, was %d", want, got) + } + + answer := int64(42) + service.SetGeneration(answer) + if got := service.GetGeneration(); got != answer { + t.Errorf("GetGeneration mismatch; got %d, want %d", got, answer) + } +} + +func TestServiceIsReady(t *testing.T) { + cases := []struct { + name string + status ServiceStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: ServiceStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Foo", + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ServiceConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ServiceConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ServiceConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ServiceConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status should be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Foo", + Status: corev1.ConditionTrue, + }, { + Type: ServiceConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status false should not be ready", + status: ServiceStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Foo", + Status: corev1.ConditionTrue, + }, { + Type: ServiceConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }} + + for _, tc := range cases { + if e, a := tc.isReady, tc.status.IsReady(); e != a { + t.Errorf("%q expected: %v got: %v", tc.name, e, a) + } + } +} + +func TestServiceHappyPath(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Nothing from Configuration is nothing to us. + svc.PropagateConfigurationStatus(&ConfigurationStatus{}) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Nothing from Route is nothing to us. + svc.PropagateRouteStatus(&RouteStatus{}) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Done from Configuration moves our ConfigurationsReady condition + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + svc.MarkResourceNotConvertible(ConvertErrorf("manual", "something something not allowed.").(*CannotConvertError)) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ConditionTypeConvertible, t) + + // Done from Route moves our RoutesReady condition, which triggers us to be Ready. + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) + + // Check idempotency. + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) +} + +func TestMarkRouteNotYetReady(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + svc.MarkRouteNotYetReady() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + dt := svc.GetCondition(ServiceConditionReady) + if got, want := dt.Reason, trafficNotMigratedReason; got != want { + t.Errorf("Condition Reason: got: %s, want: %s", got, want) + } + if got, want := dt.Message, trafficNotMigratedMessage; got != want { + t.Errorf("Condition Message: got: %s, want: %s", got, want) + } +} + +func TestFailureRecovery(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Config failure causes us to become unready immediately (route still ok). + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Route failure causes route to become failed (config and service still failed). + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionRoutesReady, t) + + // Fix Configuration moves our ConfigurationsReady condition (route and service still failed). + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionRoutesReady, t) + + // Fix route, should make everything ready. + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) +} + +func TestConfigurationFailurePropagation(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Failure causes us to become unready immediately. + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + +} + +func TestConfigurationFailureRecovery(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Done from Route moves our RoutesReady condition + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) + + // Failure causes us to become unready immediately (route still ok). + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) + + // Fixed the glitch. + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) +} + +func TestConfigurationUnknownPropagation(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Configuration and Route become ready, making us ready. + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) + + // Configuration flipping back to Unknown causes us to become ongoing immediately + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + // Route is unaffected. + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) +} + +func TestConfigurationStatusPropagation(t *testing.T) { + svc := &Service{} + + csf := ConfigurationStatusFields{ + LatestReadyRevisionName: "foo", + LatestCreatedRevisionName: "bar", + } + svc.Status.PropagateConfigurationStatus(&ConfigurationStatus{ + ConfigurationStatusFields: csf, + }) + + want := ServiceStatus{ + ConfigurationStatusFields: csf, + } + + if diff := cmp.Diff(want, svc.Status); diff != "" { + t.Errorf("unexpected ServiceStatus (-want +got): %s", diff) + } +} + +func TestRouteFailurePropagation(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Failure causes us to become unready immediately + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionRoutesReady, t) +} + +func TestRouteFailureRecovery(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Done from Configuration moves our ConfigurationsReady condition + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Failure causes us to become unready immediately (config still ok). + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionRoutesReady, t) + + // Fixed the glitch. + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) +} + +func TestRouteUnknownPropagation(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + // Configuration and Route become ready, making us ready. + svc.PropagateConfigurationStatus(&ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: ConfigurationConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionRoutesReady, t) + + // Route flipping back to Unknown causes us to become ongoing immediately. + svc.PropagateRouteStatus(&RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RouteConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + // Configuration is unaffected. + apitestv1.CheckConditionSucceeded(svc.duck(), ServiceConditionConfigurationsReady, t) +} + +func TestServiceNotOwnedStuff(t *testing.T) { + svc := &ServiceStatus{} + svc.InitializeConditions() + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionOngoing(svc.duck(), ServiceConditionRoutesReady, t) + + svc.MarkRouteNotOwned("mark") + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionRoutesReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) + + svc.MarkConfigurationNotOwned("jon") + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionConfigurationsReady, t) + apitestv1.CheckConditionFailed(svc.duck(), ServiceConditionReady, t) +} + +func TestRouteStatusPropagation(t *testing.T) { + svc := &Service{} + + rsf := RouteStatusFields{ + URL: &apis.URL{ + Scheme: "http", + Host: "route.namespace.example.com", + }, + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + RevisionName: "newstuff", + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Percent: nil, + RevisionName: "oldstuff", + }, + }}, + } + + svc.Status.PropagateRouteStatus(&RouteStatus{ + RouteStatusFields: rsf, + }) + + want := ServiceStatus{ + RouteStatusFields: rsf, + } + + if diff := cmp.Diff(want, svc.Status); diff != "" { + t.Errorf("unexpected ServiceStatus (-want +got): %s", diff) + } +} +func TestServiceGetGroupVersionKind(t *testing.T) { + s := &Service{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1alpha1", + Kind: "Service", + } + if got := s.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_types.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_types.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_types.go index f230f44830..16580a386f 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_types.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_types.go @@ -19,9 +19,9 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/knative/pkg/apis" - duckv1beta1 "github.com/knative/pkg/apis/duck/v1beta1" - "github.com/knative/pkg/kmeta" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" ) // +genclient @@ -176,7 +176,7 @@ const ( // ServiceStatus represents the Status stanza of the Service resource. type ServiceStatus struct { - duckv1beta1.Status `json:",inline"` + duckv1.Status `json:",inline"` RouteStatusFields `json:",inline"` diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation.go similarity index 92% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_validation.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation.go index 79d3a8d408..5798ab249b 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/service_validation.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation.go @@ -20,12 +20,12 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" - "github.com/knative/serving/pkg/apis/serving" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" - "github.com/knative/serving/pkg/apis/serving/v1beta1" + v1 "knative.dev/serving/pkg/apis/serving/v1" ) // Validate validates the fields belonging to Service @@ -42,7 +42,8 @@ func (s *Service) Validate(ctx context.Context) (errs *apis.FieldError) { if apis.IsInUpdate(ctx) { original := apis.GetBaseline(ctx).(*Service) - + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, s.Spec, original.GetAnnotations(), + s.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) field, currentConfig := s.Spec.getConfigurationSpec() _, originalConfig := original.Spec.getConfigurationSpec() @@ -120,7 +121,7 @@ func (ss *ServiceSpec) Validate(ctx context.Context) *apis.FieldError { errs = errs.Also(ss.RouteSpec.Validate( // Within the context of Service, the RouteSpec has a default // configurationName. - v1beta1.WithDefaultConfigurationName(ctx))) + v1.WithDefaultConfigurationName(ctx))) } if len(set) > 1 { @@ -169,7 +170,7 @@ func (rt *ReleaseType) Validate(ctx context.Context) *apis.FieldError { } if numRevisions < 2 && rt.RolloutPercent != 0 { - errs = errs.Also(apis.ErrInvalidValue(rt.RolloutPercent, "rolloutPercent")) + errs = errs.Also(apis.ErrGeneric("may not set rolloutPercent for a single revision", "rolloutPercent")) } if rt.RolloutPercent < 0 || rt.RolloutPercent > 99 { diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation_test.go new file mode 100644 index 0000000000..0074b4fdf8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/service_validation_test.go @@ -0,0 +1,1343 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "strings" + "testing" + + "knative.dev/serving/pkg/apis/config" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +const incorrectDNS1035Label = "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]" + +func TestServiceValidation(t *testing.T) { + tests := []struct { + name string + s *Service + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "valid runLatest", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "invalid runLatest (has spec.generation)", + wc: apis.DisallowDeprecated, + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedGeneration: 12, + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.generation", "spec.runLatest", + "spec.runLatest.configuration.revisionTemplate"), + }, { + name: "valid pinned", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "asdf", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "valid pinned (deprecated disallowed)", + wc: apis.DisallowDeprecated, + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "asdf", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.pinned", + "spec.pinned.configuration.revisionTemplate"), + }, { + name: "valid release -- one revision", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"asdf"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "valid release -- two revisions", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"asdf", "fdsa"}, + RolloutPercent: 42, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "valid manual", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedManual: &ManualType{}, + }, + }, + want: apis.ErrDisallowedFields("spec.manual"), + }, { + name: "invalid multiple types", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + DeprecatedPinned: &PinnedType{ + RevisionName: "asdf", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got both", + Paths: []string{"spec.pinned", "spec.runLatest"}, + }, + }, { + name: "invalid missing type", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{"spec.pinned", "spec.release", + "spec.template", "spec.runLatest"}, + }, + }, { + name: "invalid runLatest", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "foo", + Image: "hellworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.runLatest.configuration.revisionTemplate.spec.container.lifecycle"), + }, { + name: "invalid pinned", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "asdf", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "foo", + Image: "hellworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.pinned.configuration.revisionTemplate.spec.container.lifecycle"), + }, { + name: "invalid release -- too few revisions; nil", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrMissingField("spec.release.revisions"), + }, { + name: "invalid release -- revision name invalid, long", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{strings.Repeat("a", 64)}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label: [must be no more than 63 characters]", "spec.release.revisions[0]"), + }, { + name: "invalid release -- revision name invalid, incorrect", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{".negative"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue(incorrectDNS1035Label, "spec.release.revisions[0]"), + }, { + name: "valid release -- with @latest", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"s-1-00001", ReleaseLatestRevisionKeyword}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "invalid release -- too few revisions; empty slice", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrMissingField("spec.release.revisions"), + }, { + name: "invalid release -- too many revisions", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"asdf", "fdsa", "abcde"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrOutOfBoundsValue(3, 1, 2, "spec.release.revisions"), + }, { + name: "invalid release -- rollout greater than 99", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"asdf", "fdsa"}, + RolloutPercent: 100, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrOutOfBoundsValue(100, 0, 99, "spec.release.rolloutPercent"), + }, { + name: "invalid release -- rollout less than 0", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"asdf", "fdsa"}, + RolloutPercent: -50, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrOutOfBoundsValue(-50, 0, 99, "spec.release.rolloutPercent"), + }, { + name: "invalid release -- non-zero rollout for single revision", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"asdf"}, + RolloutPercent: 10, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: apis.ErrGeneric("may not set rolloutPercent for a single revision", "spec.release.rolloutPercent"), + }, { + name: "invalid name - dots", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: incorrectDNS1035Label, + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid name - too long", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("a", 64), + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters]", + Paths: []string{"metadata.name"}, + }, + }, { + name: "runLatest with traffic", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + want: apis.ErrMultipleOneOf("spec.runLatest", "spec.traffic"), + }, { + name: "valid v1beta1 subset (pinned)", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "valid-00001", + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid v1beta1 subset (deprecated field within inline spec)", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedConcurrencyModel: "Multi", + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "valid-00001", + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.template.spec.concurrencyModel"), + }, { + name: "valid v1beta1 subset (run latest)", + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + want: nil, + }, { + name: "valid inline", + // Should not affect anything. + wc: apis.DisallowDeprecated, + s: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }}, + }, + }, + }, + want: nil, + }} + + // TODO(mattmoor): Add a test for default configurationName + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + if diff := cmp.Diff(test.want.Error(), test.s.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate() (-want, +got) = %v", diff) + } + }) + } +} + +func TestRunLatestTypeValidation(t *testing.T) { + tests := []struct { + name string + rlt *RunLatestType + want *apis.FieldError + }{{ + name: "valid", + rlt: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "propagate revision failures", + rlt: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "stuart", + Image: "hellworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("configuration.revisionTemplate.spec.container.lifecycle"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.rlt.Validate(context.Background()).Error()); diff != "" { + t.Errorf("validateContainer (-want, +got) = %v", diff) + } + }) + } +} + +func TestPinnedTypeValidation(t *testing.T) { + tests := []struct { + name string + pt *PinnedType + want *apis.FieldError + }{{ + name: "valid", + pt: &PinnedType{ + RevisionName: "foo", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: nil, + }, { + name: "missing revision name", + pt: &PinnedType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "hellworld", + }, + }, + }, + }, + }, + want: apis.ErrMissingField("revisionName"), + }, { + name: "propagate revision failures", + pt: &PinnedType{ + RevisionName: "foo", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: "stuart", + Image: "hellworld", + Lifecycle: &corev1.Lifecycle{}, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("configuration.revisionTemplate.spec.container.lifecycle"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if diff := cmp.Diff(test.want.Error(), test.pt.Validate(context.Background()).Error()); diff != "" { + t.Errorf("validateContainer (-want, +got) = %v", diff) + } + }) + } +} + +func TestImmutableServiceFields(t *testing.T) { + tests := []struct { + name string + new *Service + old *Service + want *apis.FieldError + }{{ + name: "without byo-name", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (name change)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (mode change, no delta)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (mode change, with delta)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "bar", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (mode change to manual)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedPinned: &PinnedType{ + RevisionName: "bar", + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedManual: &ManualType{}, + }, + }, + want: nil, + }, { + name: "bad byo-name (mode change, with delta)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:foo", + }, + }, + }, + }, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: ServiceSpec{ + DeprecatedRelease: &ReleaseType{ + Revisions: []string{"foo"}, + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld:bar", + }, + }, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "Saw the following changes without a name change (-old +new)", + Paths: []string{"spec.runLatest.configuration.revisionTemplate.metadata.name"}, + Details: "{*v1alpha1.RevisionTemplateSpec}.Spec.DeprecatedContainer.Image:\n\t-: \"helloworld:bar\"\n\t+: \"helloworld:foo\"\n", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + if diff := cmp.Diff(test.want.Error(), test.new.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func TestServiceSubresourceUpdate(t *testing.T) { + tests := []struct { + name string + service *Service + subresource string + want *apis.FieldError + }{{ + name: "status update with valid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "non-status sub resource update with valid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + }, + }, + }, + subresource: "foo", + want: nil, + }, { + name: "non-status sub resource update with invalid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: ServiceSpec{ + DeprecatedRunLatest: &RunLatestType{ + Configuration: ConfigurationSpec{ + DeprecatedRevisionTemplate: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "helloworld", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + }, + }, + }, + subresource: "foo", + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionTimeoutSeconds+1, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "spec.runLatest.configuration.revisionTemplate.spec.timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinSubResourceUpdate(ctx, test.service, test.subresource) + if diff := cmp.Diff(test.want.Error(), test.service.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getServiceSpec(image string) ServiceSpec { + return ServiceSpec{ + ConfigurationSpec: ConfigurationSpec{ + Template: &RevisionTemplateSpec{ + Spec: RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{Containers: []corev1.Container{{ + Image: image, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds), + }, + }, + }, + }, + RouteSpec: RouteSpec{ + Traffic: []TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100)}, + }}, + }, + } +} + +func TestServiceAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Service + this *Service + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: &apis.FieldError{Message: "annotation value is immutable", + Paths: []string{"metadata.annotations." + serving.CreatorAnnotation}}, + }, { + name: "update lastModifier without spec changes", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: apis.ErrInvalidValue(u2, "metadata.annotations."+serving.UpdaterAnnotation), + }, { + name: "update lastModifier with spec changes", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getServiceSpec("helloworld:bar"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go similarity index 98% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go index 6980f42ce1..8d347d821b 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,10 +21,10 @@ limitations under the License. package v1alpha1 import ( - apis "github.com/knative/pkg/apis" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" + apis "knative.dev/pkg/apis" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -75,7 +75,7 @@ func (in *Configuration) DeepCopyObject() runtime.Object { func (in *ConfigurationList) DeepCopyInto(out *ConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Configuration, len(*in)) @@ -256,7 +256,7 @@ func (in *Revision) DeepCopyObject() runtime.Object { func (in *RevisionList) DeepCopyInto(out *RevisionList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Revision, len(*in)) @@ -379,7 +379,7 @@ func (in *Route) DeepCopyObject() runtime.Object { func (in *RouteList) DeepCopyInto(out *RouteList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Route, len(*in)) @@ -531,7 +531,7 @@ func (in *Service) DeepCopyObject() runtime.Object { func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion.go index 24d17044db..ede69e83f2 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/configuration_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // ConvertUp implements apis.Convertible diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion_test.go new file mode 100644 index 0000000000..0e4ec01174 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" +) + +func TestConfigurationConversionBadType(t *testing.T) { + good, bad := &Configuration{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults.go new file mode 100644 index 0000000000..eb48e8a79c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" +) + +// SetDefaults implements apis.Defaultable +func (c *Configuration) SetDefaults(ctx context.Context) { + ctx = apis.WithinParent(ctx, c.ObjectMeta) + c.Spec.SetDefaults(apis.WithinSpec(ctx)) + if c.GetOwnerReferences() == nil { + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Configuration).Spec, c.Spec, c) + } else { + serving.SetUserInfo(ctx, nil, c.Spec, c) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults_test.go new file mode 100644 index 0000000000..8b00138463 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_defaults_test.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + "knative.dev/pkg/ptr" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestConfigurationDefaulting(t *testing.T) { + tests := []struct { + name string + in *Configuration + want *Configuration + }{{ + name: "empty", + in: &Configuration{}, + want: &Configuration{ + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + got.SetDefaults(context.Background()) + if !cmp.Equal(got, test.want, ignoreUnexportedResources) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got, ignoreUnexportedResources)) + } + }) + } +} + +func TestConfigurationUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + withUserAnns := func(u1, u2 string, s *Configuration) *Configuration { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + tests := []struct { + name string + user string + this *Configuration + prev *Configuration + wantAnns map[string]string + }{ + { + name: "create-new", + user: u1, + this: &Configuration{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Configuration{}, + prev: &Configuration{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Configuration{}), + prev: withUserAnns(u1, u1, &Configuration{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Configuration{ + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }, + prev: &Configuration{ + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Configuration{ + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }), + prev: withUserAnns(u1, u2, &Configuration{ + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go new file mode 100644 index 0000000000..b2b1297b82 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GetGroupVersionKind returns the GroupVersionKind. +func (r *Configuration) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("Configuration") +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle_test.go new file mode 100644 index 0000000000..57a32122ad --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_lifecycle_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestConfigurationDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Configuration{}, test.t) + if err != nil { + t.Errorf("VerifyType(Configuration, %T) = %v", test.t, err) + } + }) + } +} + +func TestConfigurationGetGroupVersionKind(t *testing.T) { + r := &Configuration{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1beta1", + Kind: "Configuration", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_types.go new file mode 100644 index 0000000000..fbc18a92c5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmeta" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Configuration represents the "floating HEAD" of a linear history of Revisions. +// Users create new Revisions by updating the Configuration's spec. +// The "latest created" revision's name is available under status, as is the +// "latest ready" revision's name. +// See also: https://github.com/knative/serving/blob/master/docs/spec/overview.md#configuration +type Configuration struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec v1.ConfigurationSpec `json:"spec,omitempty"` + + // +optional + Status v1.ConfigurationStatus `json:"status,omitempty"` +} + +// Verify that Configuration adheres to the appropriate interfaces. +var ( + // Check that Configuration may be validated and defaulted. + _ apis.Validatable = (*Configuration)(nil) + _ apis.Defaultable = (*Configuration)(nil) + + // Check that Configuration can be converted to higher versions. + _ apis.Convertible = (*Configuration)(nil) + + // Check that we can create OwnerReferences to a Configuration. + _ kmeta.OwnerRefable = (*Configuration)(nil) +) + +const ( + // ConfigurationConditionReady is set when the configuration's latest + // underlying revision has reported readiness. + ConfigurationConditionReady = apis.ConditionReady +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigurationList is a list of Configuration resources +type ConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Configuration `json:"items"` +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation.go new file mode 100644 index 0000000000..f4df84744c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/route/config" +) + +// Validate makes sure that Configuration is properly configured. +func (c *Configuration) Validate(ctx context.Context) (errs *apis.FieldError) { + // If we are in a status sub resource update, the metadata and spec cannot change. + // So, to avoid rejecting controller status updates due to validations that may + // have changed (i.e. due to config-defaults changes), we elide the metadata and + // spec validation. + if !apis.IsInStatusUpdate(ctx) { + errs = errs.Also(serving.ValidateObjectMetadata(c.GetObjectMeta()).Also( + c.validateLabels().ViaField("labels")).ViaField("metadata")) + ctx = apis.WithinParent(ctx, c.ObjectMeta) + errs = errs.Also(c.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + } + + errs = errs.Also(c.Status.Validate(apis.WithinStatus(ctx)).ViaField("status")) + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Configuration) + // Don't validate annotations(creator and lastModifier) when configuration owned by service + // validate only when configuration created independently. + if c.OwnerReferences == nil { + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, c.Spec, original.GetAnnotations(), + c.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + } + err := c.Spec.Template.VerifyNameChange(ctx, original.Spec.Template) + errs = errs.Also(err.ViaField("spec.template")) + } + + return errs +} + +// validateLabels function validates configuration labels +func (c *Configuration) validateLabels() (errs *apis.FieldError) { + for key, val := range c.GetLabels() { + switch { + case key == config.VisibilityLabelKey: + errs = errs.Also(serving.ValidateClusterVisibilityLabel(val)) + case key == serving.RouteLabelKey: + case key == serving.ServiceLabelKey: + errs = errs.Also(verifyLabelOwnerRef(val, serving.ServiceLabelKey, "Service", c.GetOwnerReferences())) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} + +// verifyLabelOwnerRef function verifies the owner references of resource with label key has val value. +func verifyLabelOwnerRef(val, label, resource string, ownerRefs []metav1.OwnerReference) (errs *apis.FieldError) { + for _, ref := range ownerRefs { + if ref.Kind == resource && val == ref.Name { + return + } + } + return errs.Also(apis.ErrMissingField(label)) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation_test.go new file mode 100644 index 0000000000..0ef268cb83 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/configuration_validation_test.go @@ -0,0 +1,863 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" +) + +func TestConfigurationValidation(t *testing.T) { + tests := []struct { + name string + c *Configuration + want *apis.FieldError + }{{ + name: "valid", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "valid BYO name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "invalid name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "name or generateName is required", + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid BYO name (with generateName)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "byo-name-", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.template.metadata.name"), + }, { + name: "invalid BYO name (not prefixed)", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue(`"foo" must have prefix "byo-name-"`, + "spec.template.metadata.name"), + }, { + name: "invalid name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo.bar", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "spec.template.metadata.name"), + }, { + name: "invalid generate name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "foo.bar", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("not a DNS 1035 label prefix: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + "spec.template.metadata.generateName"), + }, { + name: "valid generate name for configuration spec", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "valid-generatename", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "hellworld", + }}, + }, + }, + }, + }, + }, + want: nil, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.c.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestConfigurationLabelValidation(t *testing.T) { + validConfigSpec := v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + } + tests := []struct { + name string + c *Configuration + want *apis.FieldError + }{{ + name: "valid visibility name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "cluster-local", + }, + }, + Spec: validConfigSpec, + }, + want: nil, + }, { + name: "invalid visibility name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "bad-value", + }, + }, + Spec: validConfigSpec, + }, + want: apis.ErrInvalidValue("bad-value", "metadata.labels.serving.knative.dev/visibility"), + }, { + name: "valid route name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + }, + Spec: validConfigSpec, + }, + want: nil, + }, { + name: "valid knative service name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validConfigSpec, + }, + want: nil, + }, { + name: "invalid knative service name without matching owner references", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "absent-svc", + }}, + }, + Spec: validConfigSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative service name with multiple owner ref", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "NewSerice", + Name: "test-new-svc", + }, { + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validConfigSpec, + }, + }, { + name: "invalid knative service name", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "absent-svc", + }, + }, + Spec: validConfigSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "Mismatch knative service label and owner ref", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "BrandNewService", + Name: "brand-new-svc", + }}, + }, + Spec: validConfigSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative label", + c: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + "serving.knative.dev/testlabel": "value", + }, + }, + Spec: validConfigSpec, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/testlabel", "metadata.labels"), + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.c.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} +func TestImmutableConfigurationFields(t *testing.T) { + tests := []struct { + name string + new *Configuration + old *Configuration + want *apis.FieldError + }{{ + name: "without byo-name", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo name change", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "good byo name (no change)", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + want: nil, + }, { + name: "bad byo name change", + new: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + }, + old: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + }, + want: &apis.FieldError{ + Message: "Saw the following changes without a name change (-old +new)", + Paths: []string{"spec.template.metadata.name"}, + Details: "{*v1.RevisionTemplateSpec}.Spec.PodSpec.Containers[0].Image:\n\t-: \"helloworld:bar\"\n\t+: \"helloworld:foo\"\n", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + got := test.new.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v\nwant: %v\ngot: %v", + diff, test.want, got) + } + }) + } +} + +func TestConfigurationSubresourceUpdate(t *testing.T) { + tests := []struct { + name string + config *Configuration + subresource string + want *apis.FieldError + }{{ + name: "status update with valid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "non-status sub resource update with valid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + }, + subresource: "foo", + want: nil, + }, { + name: "non-status sub resource update with invalid revision template", + config: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + }, + subresource: "foo", + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionTimeoutSeconds+1, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "spec.template.spec.timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinSubResourceUpdate(ctx, test.config, test.subresource) + if diff := cmp.Diff(test.want.Error(), test.config.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getConfigurationSpec(image string) v1.ConfigurationSpec { + return v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: image, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds), + }, + }, + } +} + +func TestConfigurationAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Configuration + this *Configuration + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update creator annotation with spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:bar"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier without spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier with spec changes", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getConfigurationSpec("helloworld:bar"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }, { + name: "no validation for lastModifier annotation even after update as configuration owned by service", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1beta1", + Kind: serving.GroupName, + }}, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }, { + name: "no validation for creator annotation even after update as configuration owned by service", + this: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u3, + serving.UpdaterAnnotation: u1, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1beta1", + Kind: serving.GroupName, + }}, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + prev: &Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getConfigurationSpec("helloworld:foo"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/doc.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/doc.go similarity index 100% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/doc.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/doc.go diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/register.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/register.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register.go index 564de10468..1e8d182282 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/register.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "github.com/knative/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register_test.go new file mode 100644 index 0000000000..a9e1881f12 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/register_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" +) + +func TestRegisterHelpers(t *testing.T) { + if got, want := Kind("Revision"), "Revision.serving.knative.dev"; got.String() != want { + t.Errorf("Kind(Revision) = %v, want %v", got.String(), want) + } + + if got, want := Resource("Revision"), "Revision.serving.knative.dev"; got.String() != want { + t.Errorf("Resource(Revision) = %v, want %v", got.String(), want) + } + + if got, want := SchemeGroupVersion.String(), "serving.knative.dev/v1beta1"; got != want { + t.Errorf("SchemeGroupVersion() = %v, want %v", got, want) + } + + scheme := runtime.NewScheme() + if err := addKnownTypes(scheme); err != nil { + t.Errorf("addKnownTypes() = %v", err) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion.go index fabd1e73ea..388cef7410 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // ConvertUp implements apis.Convertible diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion_test.go new file mode 100644 index 0000000000..601996b232 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" +) + +func TestRevisionConversionBadType(t *testing.T) { + good, bad := &Revision{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults.go new file mode 100644 index 0000000000..4a00953945 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" +) + +// SetDefaults implements apis.Defaultable +func (r *Revision) SetDefaults(ctx context.Context) { + r.Spec.SetDefaults(ctx) +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults_test.go new file mode 100644 index 0000000000..f03c4cdd03 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_defaults_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/config" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +var ( + defaultResources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + Limits: corev1.ResourceList{}, + } + defaultProbe = &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + } + ignoreUnexportedResources = cmpopts.IgnoreUnexported(resource.Quantity{}) +) + +func TestRevisionDefaulting(t *testing.T) { + logger := logtesting.TestLogger(t) + tests := []struct { + name string + in *Revision + want *Revision + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Revision{}, + want: &Revision{Spec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }}, + }, { + name: "with context", + in: &Revision{Spec: v1.RevisionSpec{PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}}}, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logger) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "123", + }, + }) + + return s.ToContext(ctx) + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(123), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "readonly volumes", + in: &Revision{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + }}, + }}, + }, + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "foo", + VolumeMounts: []corev1.VolumeMount{{ + Name: "bar", + ReadOnly: true, + }}, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + }, + }, + }, { + name: "timeout sets to default when 0 is specified", + in: &Revision{Spec: v1.RevisionSpec{PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}, TimeoutSeconds: ptr.Int64(0)}}, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logger) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "456", + }, + }) + + return s.ToContext(ctx) + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(456), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "no overwrite", + in: &Revision{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.2", + }, + }, + }, + }}, + }, + }, + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(99), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Resources: defaultResources, + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.2", + }, + }, + }, + }}, + }, + }, + }, + }, { + name: "no overwrite exec", + in: &Revision{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"echo", "hi"}, + }, + }, + }, + }}, + }, + }, + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"echo", "hi"}, + }, + }, + }, + }}, + }, + }, + }, + }, { + name: "partially initialized", + in: &Revision{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{Containers: []corev1.Container{{}}}, + }, + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }, { + name: "multiple containers", + in: &Revision{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "busybox", + }, { + Name: "helloworld", + }}, + }, + }, + }, + want: &Revision{ + Spec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }, { + Name: "helloworld", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if !cmp.Equal(test.want, got, ignoreUnexportedResources) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got, ignoreUnexportedResources)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go similarity index 78% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go index 5cbcec446d..f737dbaf57 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle.go @@ -18,8 +18,6 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/knative/pkg/apis" ) const ( @@ -27,14 +25,7 @@ const ( DefaultUserPort = 8080 ) -var revisionCondSet = apis.NewLivingConditionSet() - // GetGroupVersionKind returns the GroupVersionKind. func (r *Revision) GetGroupVersionKind() schema.GroupVersionKind { return SchemeGroupVersion.WithKind("Revision") } - -// IsReady returns if the revision is ready to serve the requested configuration. -func (rs *RevisionStatus) IsReady() bool { - return revisionCondSet.Manage(rs).IsHappy() -} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle_test.go new file mode 100644 index 0000000000..384bd25be8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_lifecycle_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta1 + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestRevisionDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Revision{}, test.t) + if err != nil { + t.Errorf("VerifyType(Revision, %T) = %v", test.t, err) + } + }) + } +} + +func TestRevisionGetGroupVersionKind(t *testing.T) { + r := &Revision{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1beta1", + Kind: "Revision", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} + +func TestIsReady(t *testing.T) { + cases := []struct { + name string + status v1.RevisionStatus + isReady bool + }{{ + name: "empty status should not be ready", + status: v1.RevisionStatus{}, + isReady: false, + }, { + name: "Different condition type should not be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: false, + }, { + name: "False condition status should not be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }, { + name: "Unknown condition status should not be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionUnknown, + }}, + }, + }, + isReady: false, + }, { + name: "Missing condition status should not be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + }}, + }, + }, + isReady: false, + }, { + name: "True condition status should be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: RevisionConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status should be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }, { + Type: RevisionConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + isReady: true, + }, { + name: "Multiple conditions with ready status false should not be ready", + status: v1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionTrue, + }, { + Type: RevisionConditionReady, + Status: corev1.ConditionFalse, + }}, + }, + }, + isReady: false, + }} + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if want, got := tc.isReady, tc.status.IsReady(); want != got { + t.Errorf("got: %v want: %v", got, want) + } + }) + } +} + +func TestGetContainerConcurrency(t *testing.T) { + cases := []struct { + name string + status v1.RevisionSpec + want int64 + }{{ + name: "empty revisionSpec should return default value", + status: v1.RevisionSpec{}, + want: 0, + }, { + name: "get containerConcurrency by passing value", + status: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(10), + }, + want: 10, + }} + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if want, got := tc.want, tc.status.GetContainerConcurrency(); want != got { + t.Errorf("got: %v want: %v", got, want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_types.go new file mode 100644 index 0000000000..fcaa9fbf47 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmeta" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Revision is an immutable snapshot of code and configuration. A revision +// references a container image. Revisions are created by updates to a +// Configuration. +// +// See also: https://github.com/knative/serving/blob/master/docs/spec/overview.md#revision +type Revision struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec v1.RevisionSpec `json:"spec,omitempty"` + + // +optional + Status v1.RevisionStatus `json:"status,omitempty"` +} + +// Verify that Revision adheres to the appropriate interfaces. +var ( + // Check that Revision can be validated, can be defaulted, and has immutable fields. + _ apis.Validatable = (*Revision)(nil) + _ apis.Defaultable = (*Revision)(nil) + + // Check that Revision can be converted to higher versions. + _ apis.Convertible = (*Revision)(nil) + + // Check that we can create OwnerReferences to a Revision. + _ kmeta.OwnerRefable = (*Revision)(nil) +) + +const ( + // RevisionConditionReady is set when the revision is starting to materialize + // runtime resources, and becomes true when those resources are ready. + RevisionConditionReady = apis.ConditionReady +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RevisionList is a list of Revision resources +type RevisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Revision `json:"items"` +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation.go new file mode 100644 index 0000000000..43fe2f6f26 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "strings" + + "knative.dev/pkg/apis" + "knative.dev/pkg/kmp" + "knative.dev/serving/pkg/apis/serving" +) + +// Validate ensures Revision is properly configured. +func (r *Revision) Validate(ctx context.Context) *apis.FieldError { + errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).Also( + r.ValidateLabels().ViaField("labels")).ViaField("metadata") + errs = errs.Also(r.Status.Validate(apis.WithinStatus(ctx)).ViaField("status")) + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Revision) + if diff, err := kmp.ShortDiff(original.Spec, r.Spec); err != nil { + return &apis.FieldError{ + Message: "Failed to diff Revision", + Paths: []string{"spec"}, + Details: err.Error(), + } + } else if diff != "" { + return &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: diff, + } + } + } else { + errs = errs.Also(r.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + } + + return errs +} + +// ValidateLabels function validates service labels +func (r *Revision) ValidateLabels() (errs *apis.FieldError) { + for key, val := range r.GetLabels() { + switch { + case key == serving.RouteLabelKey || key == serving.ServiceLabelKey || key == serving.ConfigurationGenerationLabelKey: + case key == serving.ConfigurationLabelKey: + errs = errs.Also(verifyLabelOwnerRef(val, serving.ConfigurationLabelKey, "Configuration", r.GetOwnerReferences())) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, "")) + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation_test.go new file mode 100644 index 0000000000..68cab977e4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/revision_validation_test.go @@ -0,0 +1,858 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + "knative.dev/pkg/apis" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestRevisionValidation(t *testing.T) { + tests := []struct { + name string + r *Revision + want *apis.FieldError + }{{ + name: "valid", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid container concurrency", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + ContainerConcurrency: ptr.Int64(-10), + }, + }, + want: apis.ErrOutOfBoundsValue( + -10, 0, config.DefaultMaxRevisionContainerConcurrency, + "spec.containerConcurrency"), + }} + + // TODO(dangerd): PodSpec validation failures. + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestRevisionLabelAnnotationValidation(t *testing.T) { + validRevisionSpec := v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + } + tests := []struct { + name string + r *Revision + want *apis.FieldError + }{{ + name: "valid route name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "valid knative service name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "valid knative service name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationGenerationLabelKey: "1234", + }, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "invalid knative configuration name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "absent-cfg", + }, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/configuration"), + }, { + name: "valid knative configuration name", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-cfg", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: "test-cfg", + }}, + }, + Spec: validRevisionSpec, + }, + want: nil, + }, { + name: "invalid knative configuration name without owner ref", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: "diff-cfg", + }}, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/configuration"), + }, { + name: "invalid knative configuration name with multiple owner ref", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-cfg", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "NewConfiguration", + Name: "test-new-cfg", + }, { + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: "test-cfg", + }}, + }, + Spec: validRevisionSpec, + }, + }, { + name: "Mismatch knative configuration label and owner ref", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "test-cfg", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "BrandNewService", + Name: "brand-new-svc", + }}, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/configuration"), + }, { + name: "invalid knative label", + r: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + "serving.knative.dev/testlabel": "value", + }, + }, + Spec: validRevisionSpec, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/testlabel", "metadata.labels"), + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %s", cmp.Diff(want, got)) + } + }) + } +} + +func TestContainerConcurrencyValidation(t *testing.T) { + tests := []struct { + name string + cc int64 + want *apis.FieldError + }{{ + name: "single", + cc: 1, + want: nil, + }, { + name: "unlimited", + cc: 0, + want: nil, + }, { + name: "ten", + cc: 10, + want: nil, + }, { + name: "invalid container concurrency (too small)", + cc: -1, + want: apis.ErrOutOfBoundsValue(-1, 0, config.DefaultMaxRevisionContainerConcurrency, + apis.CurrentField), + }, { + name: "invalid container concurrency (too large)", + cc: config.DefaultMaxRevisionContainerConcurrency + 1, + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionContainerConcurrency+1, + 0, config.DefaultMaxRevisionContainerConcurrency, apis.CurrentField), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := serving.ValidateContainerConcurrency(&test.cc) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %v", cmp.Diff(want, got)) + } + }) + } +} + +func TestRevisionSpecValidation(t *testing.T) { + tests := []struct { + name string + rs *v1.RevisionSpec + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "valid", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + want: nil, + }, { + name: "with volume (ok)", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }}, + }, + }, + want: nil, + }, { + name: "with volume name collision", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/mount/path", + Name: "the-name", + ReadOnly: true, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "foo", + }, + }, + }, { + Name: "the-name", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{}, + }, + }}, + }, + }, + want: (&apis.FieldError{ + Message: fmt.Sprintf(`duplicate volume name "the-name"`), + Paths: []string{"name"}, + }).ViaFieldIndex("volumes", 1), + }, { + name: "bad pod spec", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "steve", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }}, + }, + }, + want: apis.ErrDisallowedFields("containers[0].lifecycle"), + }, { + name: "missing container", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{}, + }, + }, + want: apis.ErrMissingField("containers"), + }, { + name: "too many containers", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }, { + Image: "helloworld", + }}, + }, + }, + want: apis.ErrMultipleOneOf("containers"), + }, { + name: "exceed max timeout", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(6000), + }, + want: apis.ErrOutOfBoundsValue( + 6000, 0, config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }, { + name: "exceed custom max timeout", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(100), + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "25", + "max-revision-timeout-seconds": "50"}, + }) + return s.ToContext(ctx) + }, + want: apis.ErrOutOfBoundsValue(100, 0, 50, "timeoutSeconds"), + }, { + name: "negative timeout", + rs: &v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(-30), + }, + want: apis.ErrOutOfBoundsValue( + -30, 0, config.DefaultMaxRevisionTimeoutSeconds, + "timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.rs.Validate(ctx) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %v", cmp.Diff(want, got)) + } + }) + } +} + +func TestImmutableFields(t *testing.T) { + tests := []struct { + name string + new *Revision + old *Revision + wc func(context.Context) context.Context + want *apis.FieldError + }{{ + name: "good (no change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + // Test the case where max-revision-timeout is changed to a value + // that is less than an existing revision's timeout value. + // Existing revision should keep operating normally. + name: "good (max revision timeout change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(100), + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + TimeoutSeconds: ptr.Int64(100), + }, + }, + wc: func(ctx context.Context) context.Context { + s := config.NewStore(logtesting.TestLogger(t)) + s.OnConfigChanged(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "revision-timeout-seconds": "25", + "max-revision-timeout-seconds": "50"}, + }) + return s.ToContext(ctx) + }, + want: nil, + }, { + name: "bad (resources image change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("50m"), + }, + }, + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("100m"), + }, + }, + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.Containers[0].Resources.Requests["cpu"]: + -: resource.Quantity: "{i:{value:100 scale:-3} d:{Dec:} s:100m Format:DecimalSI}" + +: resource.Quantity: "{i:{value:50 scale:-3} d:{Dec:} s:50m Format:DecimalSI}" +`, + }, + }, { + name: "bad (container image change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.Containers[0].Image: + -: "busybox" + +: "helloworld" +`, + }, + }, { + name: "bad (concurrency model change)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + ContainerConcurrency: ptr.Int64(1), + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + ContainerConcurrency: ptr.Int64(2), + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `*{v1.RevisionSpec}.ContainerConcurrency: + -: "2" + +: "1" +`, + }, + }, { + name: "bad (new field added)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + ServiceAccountName: "foobar", + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.ServiceAccountName: + -: "" + +: "foobar" +`, + }, + }, { + name: "bad (multiple changes)", + new: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "foobar", + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + old: &Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: `{v1.RevisionSpec}.PodSpec.Containers[0].Image: + -: "busybox" + +: "helloworld" +{v1.RevisionSpec}.PodSpec.ServiceAccountName: + -: "" + +: "foobar" +`, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithinUpdate(context.Background(), test.old) + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.new.Validate(ctx) + if got, want := got.Error(), test.want.Error(); got != want { + t.Errorf("Validate got: %s, want: %s, diff:(-want, +got)=\n%v", got, want, cmp.Diff(got, want)) + } + }) + } +} + +func TestRevisionTemplateSpecValidation(t *testing.T) { + tests := []struct { + name string + rts *v1.RevisionTemplateSpec + want *apis.FieldError + }{{ + name: "valid", + rts: &v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + name: "empty spec", + rts: &v1.RevisionTemplateSpec{}, + want: apis.ErrMissingField("spec.containers"), + }, { + name: "nested spec error", + rts: &v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "kevin", + Image: "helloworld", + Lifecycle: &corev1.Lifecycle{}, + }}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.containers[0].lifecycle"), + }, { + name: "has revision template name", + rts: &v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + // We let users bring their own revision name. + Name: "parent-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid metadata.annotations for scale", + rts: &v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + autoscaling.MinScaleAnnotationKey: "5", + autoscaling.MaxScaleAnnotationKey: "", + }, + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: "expected 1 <= <= 2147483647", + Paths: []string{autoscaling.MaxScaleAnnotationKey}, + }).ViaField("annotations").ViaField("metadata"), + }, { + name: "Queue sidecar resource percentage annotation more than 100", + rts: &v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "200", + }, + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: "expected 0.1 <= 200 <= 100", + Paths: []string{serving.QueueSideCarResourcePercentageAnnotation}, + }).ViaField("metadata.annotations"), + }, { + name: "Invalid queue sidecar resource percentage annotation", + rts: &v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "50mx", + }, + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld", + }}, + }, + }, + }, + want: (&apis.FieldError{ + Message: "invalid value: 50mx", + Paths: []string{fmt.Sprintf("[%s]", serving.QueueSideCarResourcePercentageAnnotation)}, + }).ViaField("metadata.annotations"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithinParent(context.Background(), metav1.ObjectMeta{ + Name: "parent", + }) + + got := test.rts.Validate(ctx) + if got, want := got.Error(), test.want.Error(); !cmp.Equal(got, want) { + t.Errorf("Validate (-want, +got) = %v", cmp.Diff(want, got)) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion.go index 11fc3527bc..3fa1bb1f48 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/route_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // ConvertUp implements apis.Convertible diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion_test.go new file mode 100644 index 0000000000..350ba7f846 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" +) + +func TestRouteConversionBadType(t *testing.T) { + good, bad := &Route{}, &Service{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults.go new file mode 100644 index 0000000000..115c2903c2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" +) + +// SetDefaults implements apis.Defaultable +func (r *Route) SetDefaults(ctx context.Context) { + r.Spec.SetDefaults(apis.WithinSpec(ctx)) + if r.GetOwnerReferences() == nil { + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Route).Spec, r.Spec, r) + } else { + serving.SetUserInfo(ctx, nil, r.Spec, r) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults_test.go new file mode 100644 index 0000000000..0c6bba23a2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_defaults_test.go @@ -0,0 +1,262 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestRouteDefaulting(t *testing.T) { + tests := []struct { + name string + in *Route + want *Route + wc func(context.Context) context.Context + }{{ + name: "empty", + in: &Route{}, + want: &Route{}, + }, { + name: "empty w/ default configuration", + in: &Route{}, + want: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + wc: v1.WithDefaultConfigurationName, + }, { + // Make sure it keeps a 'nil' as a 'nil' and not 'zero' + name: "implied zero percent", + in: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + }}, + }, + }, + want: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + Percent: nil, + LatestRevision: ptr.Bool(false), + }}, + }, + }, + wc: v1.WithDefaultConfigurationName, + }, { + // Just to make sure it doesn't convert a 'zero' into a 'nil' + name: "explicit zero percent", + in: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + Percent: ptr.Int64(0), + }}, + }, + }, + want: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, { + RevisionName: "bar", + Percent: ptr.Int64(0), + LatestRevision: ptr.Bool(false), + }}, + }, + }, + wc: v1.WithDefaultConfigurationName, + }, { + name: "latest revision defaulting", + in: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(12), + }, { + RevisionName: "bar", + Percent: ptr.Int64(34), + }, { + ConfigurationName: "baz", + Percent: ptr.Int64(54), + }}, + }, + }, + want: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(12), + LatestRevision: ptr.Bool(false), + }, { + RevisionName: "bar", + Percent: ptr.Int64(34), + LatestRevision: ptr.Bool(false), + }, { + ConfigurationName: "baz", + Percent: ptr.Int64(54), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got.SetDefaults(ctx) + if !cmp.Equal(test.want, got) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got)) + } + }) + } +} + +func TestRouteUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + withUserAnns := func(u1, u2 string, s *Route) *Route { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + tests := []struct { + name string + user string + this *Route + prev *Route + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Route{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Route{}, + prev: &Route{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Route{}), + prev: withUserAnns(u1, u1, &Route{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + ConfigurationName: "new", + }}, + }, + }, + prev: &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + ConfigurationName: "old", + }}, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + ConfigurationName: "new", + }}, + }, + }), + prev: withUserAnns(u1, u2, &Route{ + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + ConfigurationName: "old", + }}, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle.go new file mode 100644 index 0000000000..f4b93117da --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GetGroupVersionKind returns the GroupVersionKind. +func (r *Route) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("Route") +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle_test.go new file mode 100644 index 0000000000..b97df8f65d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_lifecycle_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestRouteDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Route{}, test.t) + if err != nil { + t.Errorf("VerifyType(Route, %T) = %v", test.t, err) + } + }) + } +} + +func TestRouteGetGroupVersionKind(t *testing.T) { + r := &Route{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1beta1", + Kind: "Route", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_types.go new file mode 100644 index 0000000000..f98d1fc590 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_types.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/kmeta" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Route is responsible for configuring ingress over a collection of Revisions. +// Some of the Revisions a Route distributes traffic over may be specified by +// referencing the Configuration responsible for creating them; in these cases +// the Route is additionally responsible for monitoring the Configuration for +// "latest ready revision" changes, and smoothly rolling out latest revisions. +// See also: https://github.com/knative/serving/blob/master/docs/spec/overview.md#route +type Route struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds the desired state of the Route (from the client). + // +optional + Spec v1.RouteSpec `json:"spec,omitempty"` + + // Status communicates the observed state of the Route (from the controller). + // +optional + Status v1.RouteStatus `json:"status,omitempty"` +} + +// Verify that Route adheres to the appropriate interfaces. +var ( + // Check that Route may be validated and defaulted. + _ apis.Validatable = (*Route)(nil) + _ apis.Defaultable = (*Route)(nil) + + // Check that Route can be converted to higher versions. + _ apis.Convertible = (*Route)(nil) + + // Check that we can create OwnerReferences to a Route. + _ kmeta.OwnerRefable = (*Route)(nil) +) + +const ( + // RouteConditionReady is set when the service is configured + // and has available backends ready to receive traffic. + RouteConditionReady = apis.ConditionReady +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RouteList is a list of Route resources +type RouteList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Route `json:"items"` +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation.go new file mode 100644 index 0000000000..d2ccf00372 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "strings" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/route/config" +) + +// Validate makes sure that Route is properly configured. +func (r *Route) Validate(ctx context.Context) *apis.FieldError { + errs := serving.ValidateObjectMetadata(r.GetObjectMeta()).Also( + r.validateLabels().ViaField("labels")).ViaField("metadata") + errs = errs.Also(r.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(r.Status.Validate(apis.WithinStatus(ctx)).ViaField("status")) + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Route) + // Don't validate annotations(creator and lastModifier) when route owned by service + // validate only when route created independently. + if r.OwnerReferences == nil { + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, r.Spec, original.GetAnnotations(), + r.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + } + } + return errs +} + +// validateLabels function validates route labels. +func (r *Route) validateLabels() (errs *apis.FieldError) { + for key, val := range r.GetLabels() { + switch { + case key == config.VisibilityLabelKey: + errs = errs.Also(serving.ValidateClusterVisibilityLabel(val)) + case key == serving.ServiceLabelKey: + errs = errs.Also(verifyLabelOwnerRef(val, serving.ServiceLabelKey, "Service", r.GetOwnerReferences())) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation_test.go new file mode 100644 index 0000000000..527978d500 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/route_validation_test.go @@ -0,0 +1,814 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" +) + +func TestTrafficTargetValidation(t *testing.T) { + tests := []struct { + name string + tt *v1.TrafficTarget + want *apis.FieldError + wc func(context.Context) context.Context + }{{ + name: "valid with revisionName", + tt: &v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "valid with revisionName and name (spec)", + tt: &v1.TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "valid with revisionName and name (status)", + tt: &v1.TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(12), + URL: &apis.URL{ + Scheme: "http", + Host: "foo.bar.com", + }, + }, + wc: apis.WithinStatus, + want: nil, + }, { + name: "invalid with revisionName and name (status)", + tt: &v1.TrafficTarget{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinStatus, + want: apis.ErrMissingField("url"), + }, { + name: "invalid with bad revisionName", + tt: &v1.TrafficTarget{ + RevisionName: "b ar", + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: apis.ErrInvalidKeyName( + "b ar", "revisionName", "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')"), + }, { + name: "valid with revisionName and latestRevision", + tt: &v1.TrafficTarget{ + RevisionName: "bar", + LatestRevision: ptr.Bool(false), + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "invalid with revisionName and latestRevision (spec)", + tt: &v1.TrafficTarget{ + RevisionName: "bar", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(12), + }, + wc: apis.WithinSpec, + want: apis.ErrGeneric(`may not set revisionName "bar" when latestRevision is true`, "latestRevision"), + }, { + name: "valid with revisionName and latestRevision (status)", + tt: &v1.TrafficTarget{ + RevisionName: "bar", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(12), + }, + wc: apis.WithinStatus, + want: nil, + }, { + name: "valid with configurationName", + tt: &v1.TrafficTarget{ + ConfigurationName: "bar", + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "valid with configurationName and name (spec)", + tt: &v1.TrafficTarget{ + Tag: "foo", + ConfigurationName: "bar", + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "invalid with bad configurationName", + tt: &v1.TrafficTarget{ + ConfigurationName: "b ar", + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: apis.ErrInvalidKeyName( + "b ar", "configurationName", "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')"), + }, { + name: "valid with configurationName and latestRevision", + tt: &v1.TrafficTarget{ + ConfigurationName: "blah", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: nil, + }, { + name: "invalid with configurationName and latestRevision", + tt: &v1.TrafficTarget{ + ConfigurationName: "blah", + LatestRevision: ptr.Bool(false), + Percent: ptr.Int64(37), + }, + wc: apis.WithinSpec, + want: apis.ErrGeneric(`may not set revisionName "" when latestRevision is false`, "latestRevision"), + }, { + name: "invalid with configurationName and default configurationName", + tt: &v1.TrafficTarget{ + ConfigurationName: "blah", + Percent: ptr.Int64(37), + }, + wc: v1.WithDefaultConfigurationName, + want: apis.ErrDisallowedFields("configurationName"), + }, { + name: "valid with only default configurationName", + tt: &v1.TrafficTarget{ + Percent: ptr.Int64(37), + }, + wc: func(ctx context.Context) context.Context { + return v1.WithDefaultConfigurationName(apis.WithinSpec(ctx)) + }, + want: nil, + }, { + name: "valid with default configurationName and latestRevision", + tt: &v1.TrafficTarget{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(37), + }, + wc: func(ctx context.Context) context.Context { + return v1.WithDefaultConfigurationName(apis.WithinSpec(ctx)) + }, + want: nil, + }, { + name: "invalid with default configurationName and latestRevision", + tt: &v1.TrafficTarget{ + LatestRevision: ptr.Bool(false), + Percent: ptr.Int64(37), + }, + wc: func(ctx context.Context) context.Context { + return v1.WithDefaultConfigurationName(apis.WithinSpec(ctx)) + }, + want: apis.ErrGeneric(`may not set revisionName "" when latestRevision is false`, "latestRevision"), + }, { + name: "invalid without revisionName in status", + tt: &v1.TrafficTarget{ + ConfigurationName: "blah", + Percent: ptr.Int64(37), + }, + wc: apis.WithinStatus, + want: apis.ErrMissingField("revisionName"), + }, { + name: "valid with revisionName and default configurationName", + tt: &v1.TrafficTarget{ + RevisionName: "bar", + Percent: ptr.Int64(12), + }, + wc: v1.WithDefaultConfigurationName, + want: nil, + }, { + name: "valid with no percent", + tt: &v1.TrafficTarget{ + ConfigurationName: "booga", + }, + want: nil, + }, { + name: "valid with nil percent", + tt: &v1.TrafficTarget{ + ConfigurationName: "booga", + Percent: nil, + }, + want: nil, + }, { + name: "valid with zero percent", + tt: &v1.TrafficTarget{ + ConfigurationName: "booga", + Percent: ptr.Int64(0), + }, + want: nil, + }, { + name: "valid with no name", + tt: &v1.TrafficTarget{ + ConfigurationName: "booga", + Percent: ptr.Int64(100), + }, + want: nil, + }, { + name: "invalid with both", + tt: &v1.TrafficTarget{ + RevisionName: "foo", + ConfigurationName: "bar", + }, + want: &apis.FieldError{ + Message: "expected exactly one, got both", + Paths: []string{"revisionName", "configurationName"}, + }, + }, { + name: "invalid with neither", + tt: &v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{"revisionName", "configurationName"}, + }, + }, { + name: "invalid percent too low", + tt: &v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(-5), + }, + want: apis.ErrOutOfBoundsValue("-5", "0", "100", "percent"), + }, { + name: "invalid percent too high", + tt: &v1.TrafficTarget{ + RevisionName: "foo", + Percent: ptr.Int64(101), + }, + want: apis.ErrOutOfBoundsValue("101", "0", "100", "percent"), + }, { + name: "disallowed url set", + tt: &v1.TrafficTarget{ + ConfigurationName: "foo", + Percent: ptr.Int64(100), + URL: &apis.URL{ + Host: "should.not.be.set", + }, + }, + wc: apis.WithinSpec, + want: apis.ErrDisallowedFields("url"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + if test.wc != nil { + ctx = test.wc(ctx) + } + got := test.tt.Validate(ctx) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestRouteValidation(t *testing.T) { + tests := []struct { + name string + r *Route + want *apis.FieldError + }{{ + name: "valid", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + Status: v1.RouteStatus{ + RouteStatusFields: v1.RouteStatusFields{ + Traffic: []v1.TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + URL: &apis.URL{ + Scheme: "http", + Host: "bar.blah.com", + }, + }}, + }, + }, + }, + want: nil, + }, { + name: "valid split", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "prod", + RevisionName: "foo", + Percent: ptr.Int64(90), + }, { + Tag: "experiment", + ConfigurationName: "bar", + Percent: ptr.Int64(10), + }}, + }, + }, + want: nil, + }, { + name: "missing url in status", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + Status: v1.RouteStatus{ + RouteStatusFields: v1.RouteStatusFields{ + Traffic: []v1.TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "missing field(s)", + Paths: []string{ + "status.traffic[0].url", + }, + }, + }, { + name: "invalid traffic entry (missing oneof)", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + want: &apis.FieldError{ + Message: "expected exactly one, got neither", + Paths: []string{ + "spec.traffic[0].configurationName", + "spec.traffic[0].revisionName", + }, + }, + }, { + name: "invalid traffic entry (multiple names)", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(50), + }, { + Tag: "foo", + RevisionName: "bar", + Percent: ptr.Int64(50), + }}, + }, + }, + want: &apis.FieldError{ + Message: `Multiple definitions for "foo"`, + Paths: []string{ + "spec.traffic[0].tag", + "spec.traffic[1].tag", + }, + }, + }, { + name: "invalid name - dots", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }, + }, { + name: "invalid name - dots and spec percent is not 100", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "do.not.use.dots", + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(90), + }}, + }, + }, + want: (&apis.FieldError{ + Message: "not a DNS 1035 label: [a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character (e.g. 'my-name', or 'abc-123', regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?')]", + Paths: []string{"metadata.name"}, + }).Also(&apis.FieldError{ + Message: "Traffic targets sum to 90, want 100", + Paths: []string{"spec.traffic"}, + }), + }, { + name: "invalid name - too long", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("a", 64), + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + }, + }, + want: &apis.FieldError{ + Message: "not a DNS 1035 label: [must be no more than 63 characters]", + Paths: []string{"metadata.name"}, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestRouteLabelValidation(t *testing.T) { + validRouteSpec := v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(100), + }}, + } + tests := []struct { + name string + r *Route + want *apis.FieldError + }{{ + name: "valid visibility name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "cluster-local", + }, + }, + Spec: validRouteSpec, + }, + want: nil, + }, { + name: "invalid visibility name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "bad-value", + }, + }, + Spec: validRouteSpec, + }, + want: apis.ErrInvalidValue("bad-value", "metadata.labels.serving.knative.dev/visibility"), + }, { + name: "valid knative service name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validRouteSpec, + }, + want: nil, + }, { + name: "invalid knative service name", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "absent-svc", + }, + }, + Spec: validRouteSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "Mismatch knative service label and owner ref", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "BrandNewService", + Name: "brand-new-svc", + }}, + }, + Spec: validRouteSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative service name without correct owner ref", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "absent-svc", + }}, + }, + Spec: validRouteSpec, + }, + want: apis.ErrMissingField("metadata.labels.serving.knative.dev/service"), + }, { + name: "invalid knative service name with multiple owner ref", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + serving.ServiceLabelKey: "test-svc", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "NewSerice", + Name: "test-new-svc", + }, { + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: "test-svc", + }}, + }, + Spec: validRouteSpec, + }, + }, { + name: "invalid knative label", + r: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + Labels: map[string]string{ + "serving.knative.dev/testlabel": "value", + }, + }, + Spec: validRouteSpec, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/testlabel", "metadata.labels"), + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func getRouteSpec(confName string) v1.RouteSpec { + return v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + ConfigurationName: confName, + }}, + } +} + +func TestRouteAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Route + this *Route + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update creator annotation with spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("new"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier without spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier with spec changes", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getRouteSpec("new"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }, { + name: "no validation for lastModifier annotation even after update as route owned by service", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1beta1", + Kind: serving.GroupName, + }}, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }, { + name: "no validation for creator annotation even after update as route owned by service", + this: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u3, + serving.UpdaterAnnotation: u1, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1beta1", + Kind: serving.GroupName, + }}, + }, + Spec: getRouteSpec("old"), + }, + prev: &Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getRouteSpec("old"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_conversion.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion.go similarity index 97% rename from test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_conversion.go rename to test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion.go index 72092831bb..ca7fdb0818 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/serving/v1beta1/service_conversion.go +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/knative/pkg/apis" + "knative.dev/pkg/apis" ) // ConvertUp implements apis.Convertible diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion_test.go new file mode 100644 index 0000000000..0768535cff --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_conversion_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" +) + +func TestServiceConversionBadType(t *testing.T) { + good, bad := &Service{}, &Revision{} + + if err := good.ConvertUp(context.Background(), bad); err == nil { + t.Errorf("ConvertUp() = %#v, wanted error", bad) + } + + if err := good.ConvertDown(context.Background(), bad); err == nil { + t.Errorf("ConvertDown() = %#v, wanted error", good) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults.go new file mode 100644 index 0000000000..906b875d48 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" +) + +// SetDefaults implements apis.Defaultable +func (s *Service) SetDefaults(ctx context.Context) { + ctx = apis.WithinParent(ctx, s.ObjectMeta) + s.Spec.SetDefaults(apis.WithinSpec(ctx)) + if apis.IsInUpdate(ctx) { + serving.SetUserInfo(ctx, apis.GetBaseline(ctx).(*Service).Spec, s.Spec, s) + } else { + serving.SetUserInfo(ctx, nil, s.Spec, s) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults_test.go new file mode 100644 index 0000000000..600510d792 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_defaults_test.go @@ -0,0 +1,352 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +func TestServiceDefaulting(t *testing.T) { + tests := []struct { + name string + in *Service + want *Service + }{{ + name: "empty", + in: &Service{}, + want: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }, { + name: "run latest", + in: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + }, + want: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }, { + name: "run latest with some default overrides", + in: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + }, + }, + want: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(60), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }, { + name: "byo traffic block", + in: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: "foo", + Percent: ptr.Int64(90), + }, { + Tag: "candidate", + RevisionName: "bar", + Percent: ptr.Int64(10), + }, { + Tag: "latest", + }}, + }, + }, + }, + want: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: config.DefaultUserContainerName, + Image: "busybox", + Resources: defaultResources, + ReadinessProbe: defaultProbe, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultRevisionTimeoutSeconds), + ContainerConcurrency: ptr.Int64(config.DefaultContainerConcurrency), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: "foo", + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(false), + }, { + Tag: "candidate", + RevisionName: "bar", + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(false), + }, { + Tag: "latest", + LatestRevision: ptr.Bool(true), + }}, + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.in + got.SetDefaults(context.Background()) + if !cmp.Equal(got, test.want, ignoreUnexportedResources) { + t.Errorf("SetDefaults (-want, +got) = %v", + cmp.Diff(test.want, got, ignoreUnexportedResources)) + } + }) + } +} + +func TestAnnotateUserInfo(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + + withUserAnns := func(u1, u2 string, s *Service) *Service { + a := s.GetAnnotations() + if a == nil { + a = map[string]string{} + s.SetAnnotations(a) + } + a[serving.CreatorAnnotation] = u1 + a[serving.UpdaterAnnotation] = u2 + return s + } + + tests := []struct { + name string + user string + this *Service + prev *Service + wantAnns map[string]string + }{{ + name: "create-new", + user: u1, + this: &Service{}, + prev: nil, + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + // Old objects don't have the annotation, and unless there's a change in + // data they won't get it. + name: "update-no-diff-old-object", + user: u1, + this: &Service{}, + prev: &Service{}, + wantAnns: map[string]string{}, + }, { + name: "update-no-diff-new-object", + user: u2, + this: withUserAnns(u1, u1, &Service{}), + prev: withUserAnns(u1, u1, &Service{}), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, { + name: "update-diff-old-object", + user: u2, + this: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }, + }, + prev: &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }, + }, + wantAnns: map[string]string{ + serving.UpdaterAnnotation: u2, + }, + }, { + name: "update-diff-new-object", + user: u3, + this: withUserAnns(u1, u2, &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + }, + }, + }), + prev: withUserAnns(u1, u2, &Service{ + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(2), + }, + }, + }, + }, + }), + wantAnns: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := apis.WithUserInfo(context.Background(), &authv1.UserInfo{ + Username: test.user, + }) + if test.prev != nil { + ctx = apis.WithinUpdate(ctx, test.prev) + test.prev.SetDefaults(ctx) + } + test.this.SetDefaults(ctx) + if got, want := test.this.GetAnnotations(), test.wantAnns; !cmp.Equal(got, want) { + t.Errorf("Annotations = %v, want: %v, diff (-got, +want): %s", got, want, cmp.Diff(got, want)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle.go new file mode 100644 index 0000000000..ad59308fcd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GetGroupVersionKind returns the GroupVersionKind. +func (s *Service) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("Service") +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle_test.go new file mode 100644 index 0000000000..8ecd77476b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_lifecycle_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1beta1 + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestServiceDuckTypes(t *testing.T) { + tests := []struct { + name string + t duck.Implementable + }{{ + name: "conditions", + t: &duckv1.Conditions{}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := duck.VerifyType(&Service{}, test.t) + if err != nil { + t.Errorf("VerifyType(Service, %T) = %v", test.t, err) + } + }) + } +} + +func TestServiceGetGroupVersionKind(t *testing.T) { + r := &Service{} + want := schema.GroupVersionKind{ + Group: "serving.knative.dev", + Version: "v1beta1", + Kind: "Service", + } + if got := r.GetGroupVersionKind(); got != want { + t.Errorf("got: %v, want: %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_types.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_types.go new file mode 100644 index 0000000000..3c224273ee --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_types.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + "knative.dev/pkg/kmeta" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service acts as a top-level container that manages a Route and Configuration +// which implement a network service. Service exists to provide a singular +// abstraction which can be access controlled, reasoned about, and which +// encapsulates software lifecycle decisions such as rollout policy and +// team resource ownership. Service acts only as an orchestrator of the +// underlying Routes and Configurations (much as a kubernetes Deployment +// orchestrates ReplicaSets), and its usage is optional but recommended. +// +// The Service's controller will track the statuses of its owned Configuration +// and Route, reflecting their statuses and conditions as its own. +// +// See also: https://github.com/knative/serving/blob/master/docs/spec/overview.md#service +type Service struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec v1.ServiceSpec `json:"spec,omitempty"` + + // +optional + Status v1.ServiceStatus `json:"status,omitempty"` +} + +// Verify that Service adheres to the appropriate interfaces. +var ( + // Check that Service may be validated and defaulted. + _ apis.Validatable = (*Service)(nil) + _ apis.Defaultable = (*Service)(nil) + + // Check that Service can be converted to higher versions. + _ apis.Convertible = (*Service)(nil) + + // Check that we can create OwnerReferences to a Service. + _ kmeta.OwnerRefable = (*Service)(nil) +) + +// ConditionType represents a Service condition value +const ( + // ServiceConditionReady is set when the service is configured + // and has available backends ready to receive traffic. + ServiceConditionReady = apis.ConditionReady +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList is a list of Service resources +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Service `json:"items"` +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation.go new file mode 100644 index 0000000000..6daced418c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "strings" + + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/route/config" +) + +// Validate makes sure that Service is properly configured. +func (s *Service) Validate(ctx context.Context) (errs *apis.FieldError) { + // If we are in a status sub resource update, the metadata and spec cannot change. + // So, to avoid rejecting controller status updates due to validations that may + // have changed (i.e. due to config-defaults changes), we elide the metadata and + // spec validation. + if !apis.IsInStatusUpdate(ctx) { + errs = errs.Also(serving.ValidateObjectMetadata(s.GetObjectMeta()).Also( + s.validateLabels().ViaField("labels")).ViaField("metadata")) + ctx = apis.WithinParent(ctx, s.ObjectMeta) + errs = errs.Also(s.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + } + + errs = errs.Also(s.Status.Validate(apis.WithinStatus(ctx)).ViaField("status")) + + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*Service) + errs = errs.Also(apis.ValidateCreatorAndModifier(original.Spec, s.Spec, original.GetAnnotations(), + s.GetAnnotations(), serving.GroupName).ViaField("metadata.annotations")) + err := s.Spec.ConfigurationSpec.Template.VerifyNameChange(ctx, + original.Spec.ConfigurationSpec.Template) + errs = errs.Also(err.ViaField("spec.template")) + } + return errs +} + +// validateLabels function validates service labels +func (s *Service) validateLabels() (errs *apis.FieldError) { + for key, val := range s.GetLabels() { + switch { + case key == config.VisibilityLabelKey: + errs = errs.Also(serving.ValidateClusterVisibilityLabel(val)) + case strings.HasPrefix(key, serving.GroupNamePrefix): + errs = errs.Also(apis.ErrInvalidKeyName(key, apis.CurrentField)) + } + } + return +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation_test.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation_test.go new file mode 100644 index 0000000000..405d8405e1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/service_validation_test.go @@ -0,0 +1,788 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" + + "knative.dev/pkg/apis" +) + +func TestServiceValidation(t *testing.T) { + goodConfigSpec := v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + } + goodRouteSpec := v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + } + + tests := []struct { + name string + r *Service + want *apis.FieldError + }{{ + name: "valid run latest", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "valid visibility label", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "cluster-local", + }, + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: nil, + }, { + name: "invalid knative label", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + "serving.knative.dev/name": "some-value", + }, + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: apis.ErrInvalidKeyName("serving.knative.dev/name", "metadata.labels"), + }, { + name: "valid non knative label", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + "serving.name": "some-name", + }, + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: nil, + }, { + name: "invalid visibility label value", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Labels: map[string]string{ + routeconfig.VisibilityLabelKey: "bad-label", + }, + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: goodRouteSpec, + }, + }, + want: apis.ErrInvalidValue("bad-label", "metadata.labels.serving.knative.dev/visibility"), + }, { + name: "valid release", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + LatestRevision: ptr.Bool(false), + RevisionName: "valid-00001", + Percent: ptr.Int64(98), + }, { + Tag: "candidate", + LatestRevision: ptr.Bool(false), + RevisionName: "valid-00002", + Percent: ptr.Int64(2), + }, { + Tag: "latest", + LatestRevision: ptr.Bool(true), + Percent: nil, + }}, + }, + }, + }, + want: nil, + }, { + name: "invalid configurationName", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + ConfigurationName: "valid", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: apis.ErrDisallowedFields("spec.traffic[0].configurationName"), + }, { + name: "invalid latestRevision", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: goodConfigSpec, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "valid", + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: apis.ErrGeneric(`may not set revisionName "valid" when latestRevision is true`, "spec.traffic[0].latestRevision"), + }, { + name: "invalid container concurrency", + r: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + ContainerConcurrency: ptr.Int64(-10), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: apis.ErrOutOfBoundsValue( + -10, 0, config.DefaultMaxRevisionContainerConcurrency, + "spec.template.spec.containerConcurrency"), + }} + + // TODO(dangerd): PodSpec validation failures. + // TODO(mattmoor): BYO revision name. + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.r.Validate(context.Background()) + if !cmp.Equal(test.want.Error(), got.Error()) { + t.Errorf("Validate (-want, +got) = %v", + cmp.Diff(test.want.Error(), got.Error())) + } + }) + } +} + +func TestImmutableServiceFields(t *testing.T) { + tests := []struct { + name string + new *Service + old *Service + want *apis.FieldError + }{{ + name: "without byo-name", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (name change)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "byo-name-foo", // Used it! + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "byo-name-bar", // Used it! + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "good byo-name (with delta)", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "byo-name-bar", // Leave old. + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-bar", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + RevisionName: "byo-name-bar", // Used it! + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: nil, + }, { + name: "bad byo-name", + new: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + old: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "byo-name-foo", + }, + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:bar", + }}, + }, + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + want: &apis.FieldError{ + Message: "Saw the following changes without a name change (-old +new)", + Paths: []string{"spec.template.metadata.name"}, + Details: "{*v1.RevisionTemplateSpec}.Spec.PodSpec.Containers[0].Image:\n\t-: \"helloworld:bar\"\n\t+: \"helloworld:foo\"\n", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.old) + got := test.new.Validate(ctx) + if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v\nwant: %v\ngot: %v", + diff, test.want, got) + } + }) + } +} + +func TestServiceSubresourceUpdate(t *testing.T) { + tests := []struct { + name string + service *Service + subresource string + want *apis.FieldError + }{{ + name: "status update with valid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "status", + want: nil, + }, { + name: "status update with invalid status", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + Status: v1.ServiceStatus{ + RouteStatusFields: v1.RouteStatusFields{ + Traffic: []v1.TrafficTarget{{ + Tag: "bar", + RevisionName: "foo", + Percent: ptr.Int64(50), URL: &apis.URL{ + Scheme: "http", + Host: "foo.bar.com", + }, + }}, + }, + }, + }, + subresource: "status", + want: &apis.FieldError{ + Message: "Traffic targets sum to 50, want 100", + Paths: []string{"status.traffic"}, + }, + }, { + name: "non-status sub resource update with valid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds - 1), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "foo", + want: nil, + }, { + name: "non-status sub resource update with invalid revision template", + service: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + }, + Spec: v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "helloworld:foo", + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds + 1), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + }, + }, + subresource: "foo", + want: apis.ErrOutOfBoundsValue(config.DefaultMaxRevisionTimeoutSeconds+1, 0, + config.DefaultMaxRevisionTimeoutSeconds, + "spec.template.spec.timeoutSeconds"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.service) + ctx = apis.WithinSubResourceUpdate(ctx, test.service, test.subresource) + if diff := cmp.Diff(test.want.Error(), test.service.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} + +func getServiceSpec(image string) v1.ServiceSpec { + return v1.ServiceSpec{ + ConfigurationSpec: v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: image, + }}, + }, + TimeoutSeconds: ptr.Int64(config.DefaultMaxRevisionTimeoutSeconds), + }, + }, + }, + RouteSpec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + LatestRevision: ptr.Bool(true), + Percent: ptr.Int64(100), + }}, + }, + } +} + +func TestServiceAnnotationUpdate(t *testing.T) { + const ( + u1 = "oveja@knative.dev" + u2 = "cabra@knative.dev" + u3 = "vaca@knative.dev" + ) + tests := []struct { + name string + prev *Service + this *Service + want *apis.FieldError + }{{ + name: "update creator annotation", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u2, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: (&apis.FieldError{Message: "annotation value is immutable", + Paths: []string{serving.CreatorAnnotation}}).ViaField("metadata.annotations"), + }, { + name: "update lastModifier without spec changes", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u2, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: apis.ErrInvalidValue(u2, serving.UpdaterAnnotation).ViaField("metadata.annotations"), + }, { + name: "update lastModifier with spec changes", + this: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u3, + }, + }, + Spec: getServiceSpec("helloworld:bar"), + }, + prev: &Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid", + Annotations: map[string]string{ + serving.CreatorAnnotation: u1, + serving.UpdaterAnnotation: u1, + }, + }, + Spec: getServiceSpec("helloworld:foo"), + }, + want: nil, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + ctx = apis.WithinUpdate(ctx, test.prev) + if diff := cmp.Diff(test.want.Error(), test.this.Validate(ctx).Error()); diff != "" { + t.Errorf("Validate (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0c9d27d139 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/apis/serving/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,269 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Configuration) DeepCopyInto(out *Configuration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Configuration. +func (in *Configuration) DeepCopy() *Configuration { + if in == nil { + return nil + } + out := new(Configuration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Configuration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigurationList) DeepCopyInto(out *ConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Configuration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationList. +func (in *ConfigurationList) DeepCopy() *ConfigurationList { + if in == nil { + return nil + } + out := new(ConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Revision) DeepCopyInto(out *Revision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Revision. +func (in *Revision) DeepCopy() *Revision { + if in == nil { + return nil + } + out := new(Revision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Revision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RevisionList) DeepCopyInto(out *RevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Revision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RevisionList. +func (in *RevisionList) DeepCopy() *RevisionList { + if in == nil { + return nil + } + out := new(RevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Route) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteList) DeepCopyInto(out *RouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Route, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. +func (in *RouteList) DeepCopy() *RouteList { + if in == nil { + return nil + } + out := new(RouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/OWNERS b/test/vendor/knative.dev/serving/pkg/autoscaler/OWNERS new file mode 100644 index 0000000000..690ff0e48e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers + +reviewers: +- autoscaling-reviewers + +labels: +- area/autoscale diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/README.md b/test/vendor/knative.dev/serving/pkg/autoscaler/README.md new file mode 100644 index 0000000000..bf0440e5d8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/README.md @@ -0,0 +1 @@ +Scaling documentation has moved to the [docs folder](../../docs/scaling). diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation.go b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation.go new file mode 100644 index 0000000000..df68be46ac --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aggregation + +import ( + "time" +) + +// Accumulator is a function accumulating buckets and their time. +type Accumulator func(time time.Time, bucket float64) + +// YoungerThan only applies the accumulator to buckets that are younger than the given +// time. +func YoungerThan(oldest time.Time, acc Accumulator) Accumulator { + return func(time time.Time, bucket float64) { + if !time.Before(oldest) { + acc(time, bucket) + } + } +} + +// Average is used to keep the values necessary to compute an average. +type Average struct { + sum float64 + count float64 +} + +// Accumulate accumulates the values needed to compute an average. +func (a *Average) Accumulate(_ time.Time, bucket float64) { + a.sum += bucket + a.count++ +} + +// Value returns the average or 0 if no buckets have been accumulated. +func (a *Average) Value() float64 { + if a.count == 0 { + return 0 + } + return a.sum / a.count +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation_test.go new file mode 100644 index 0000000000..b821765ffa --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/aggregation_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aggregation + +import ( + "testing" + "time" +) + +func TestAverage(t *testing.T) { + tests := []struct { + name string + values []float64 + want float64 + }{{ + name: "empty", + values: []float64{}, + want: 0.0, + }, { + name: "not empty", + values: []float64{2.0, 4.0}, + want: 3.0, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + average := Average{} + for _, value := range tt.values { + average.Accumulate(time.Now(), value) + } + + if got := average.Value(); got != tt.want { + t.Errorf("Value() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestYoungerThan(t *testing.T) { + t0 := time.Now() + t1 := t0.Add(1 * time.Second) + t2 := t0.Add(2 * time.Second) + t3 := t0.Add(3 * time.Second) + + tests := []struct { + name string + times []time.Time + oldest time.Time + want []time.Time + }{{ + name: "empty", + times: []time.Time{}, + want: []time.Time{}, + }, { + name: "drop all", + times: []time.Time{t0, t1, t2}, + oldest: t3, + want: []time.Time{}, + }, { + name: "keep all", + times: []time.Time{t0, t1, t2}, + oldest: t0, + want: []time.Time{t0, t1, t2}, + }, { + name: "drop some", + times: []time.Time{t0, t1, t2}, + oldest: t2, + want: []time.Time{t2}, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := make(map[time.Time]bool) + acc := YoungerThan(tt.oldest, func(time time.Time, bucket float64) { + got[time] = true + }) + for _, t := range tt.times { + bucket := 0.0 + bucket += 1 + acc(t, bucket) + } + + if got, want := len(got), len(tt.want); got != want { + t.Errorf("len(got) = %v, want %v", got, want) + } + + for _, want := range tt.want { + if !got[want] { + t.Errorf("Expected buckets to contain %v, buckets: %v", want, got) + } + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go new file mode 100644 index 0000000000..efc9ccfe1a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go @@ -0,0 +1,214 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aggregation + +import ( + "math" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +// TimedFloat64Buckets keeps buckets that have been collected at a certain time. +type TimedFloat64Buckets struct { + bucketsMutex sync.RWMutex + buckets []float64 + // The total sum of all valid buckets within the window. + windowTotal float64 + lastWrite time.Time + + granularity time.Duration + window time.Duration +} + +// Implements stringer interface. +func (t *TimedFloat64Buckets) String() string { + return spew.Sdump(t.buckets) +} + +// NewTimedFloat64Buckets generates a new TimedFloat64Buckets with the given +// granularity. +func NewTimedFloat64Buckets(window, granularity time.Duration) *TimedFloat64Buckets { + // Number of buckets is `window` divided by `granularity`, rounded up. + // e.g. 60s / 2s = 30. + nb := int(math.Ceil(float64(window) / float64(granularity))) + return &TimedFloat64Buckets{ + buckets: make([]float64, nb), + granularity: granularity, + window: window, + } +} + +// IsEmpty returns if no data has been recorded for the `window` period. +func (t *TimedFloat64Buckets) IsEmpty(now time.Time) bool { + now = now.Truncate(t.granularity) + t.bucketsMutex.RLock() + defer t.bucketsMutex.RUnlock() + return now.Sub(t.lastWrite) > t.window +} + +func roundToNDigits(n int, f float64) float64 { + p := math.Pow10(n) + return math.Floor(f*p) / p +} + +// WindowAverage returns the average bucket value over the window. +func (t *TimedFloat64Buckets) WindowAverage(now time.Time) float64 { + const precision = 6 + now = now.Truncate(t.granularity) + t.bucketsMutex.RLock() + defer t.bucketsMutex.RUnlock() + switch d := now.Sub(t.lastWrite); { + case d <= 0: + // If LastWrite equal or greater than Now + // return the current WindowTotal. + return roundToNDigits(precision, t.windowTotal/float64(len(t.buckets))) + case d < t.window: + // If we haven't received metrics for some time, which is less than + // the window -- remove the outdated items. + stIdx := t.timeToIndex(t.lastWrite) + eIdx := t.timeToIndex(now) + ret := t.windowTotal + for i := stIdx + 1; i <= eIdx; i++ { + ret -= t.buckets[i%len(t.buckets)] + } + return roundToNDigits(precision, ret/float64(len(t.buckets)-(eIdx-stIdx))) + default: // Nothing for more than a window time, just 0. + return 0. + } +} + +// timeToIndex converts time to an integer that can be used for modulo +// operations to find the index in the bucket list. +// bucketMutex needs to be held. +func (t *TimedFloat64Buckets) timeToIndex(tm time.Time) int { + // I don't think this run in 2038 :-) + // NB: we need to divide by granularity, since it's a compressing mapping + // to buckets. + return int(tm.Unix()) / int(t.granularity.Seconds()) +} + +// Record adds a value with an associated time to the correct bucket. +func (t *TimedFloat64Buckets) Record(now time.Time, value float64) { + bucketTime := now.Truncate(t.granularity) + + t.bucketsMutex.Lock() + defer t.bucketsMutex.Unlock() + + writeIdx := t.timeToIndex(now) + + if t.lastWrite != bucketTime { + // This should not really happen, but is here for correctness. + if bucketTime.Sub(t.lastWrite) > t.window { + // Reset all the buckets. + for i := range t.buckets { + t.buckets[i] = 0 + } + t.windowTotal = 0 + } else { + // In theory we might lose buckets between stats gathering. + // Thus we need to clean not only the current index, but also + // all the ones from the last write. This is slower than the loop above + // due to possible wrap-around, so they are not merged together. + oldIdx := t.timeToIndex(t.lastWrite) + for i := oldIdx + 1; i <= writeIdx; i++ { + idx := i % len(t.buckets) + t.windowTotal -= t.buckets[idx] + t.buckets[idx] = 0 + } + } + // Update the last write time. + t.lastWrite = bucketTime + } + t.buckets[writeIdx%len(t.buckets)] += value + t.windowTotal += value +} + +// ForEachBucket calls the given Accumulator function for each bucket. +// Returns true if any data was recorded. +func (t *TimedFloat64Buckets) ForEachBucket(now time.Time, accs ...Accumulator) bool { + now = now.Truncate(t.granularity) + t.bucketsMutex.RLock() + defer t.bucketsMutex.RUnlock() + + if now.Sub(t.lastWrite) >= t.window { + return false + } + + // So number of buckets we can process is len(buckets)-(now-lastWrite)/granularity. + // Since empty check above failed, we know this is at least 1 bucket. + numBuckets := len(t.buckets) - int(now.Sub(t.lastWrite)/t.granularity) + bucketTime := t.lastWrite // Always aligned with granularity. + si := t.timeToIndex(bucketTime) + for i := 0; i < numBuckets; i++ { + tIdx := si % len(t.buckets) + for _, acc := range accs { + acc(bucketTime, t.buckets[tIdx]) + } + si-- + bucketTime = bucketTime.Add(-t.granularity) + } + + return true +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// ResizeWindow resizes the window. This is an O(N) operation, +// and is not supposed to be executed very often. +func (t *TimedFloat64Buckets) ResizeWindow(w time.Duration) { + // Same window size, bail out. + sameWindow := func() bool { + t.bucketsMutex.RLock() + defer t.bucketsMutex.RUnlock() + return w == t.window + }() + if sameWindow { + return + } + numBuckets := int(math.Ceil(float64(w) / float64(t.granularity))) + newBuckets := make([]float64, numBuckets) + newTotal := 0. + + // We need write lock here. + // So that we can copy the existing buckets into the new array. + t.bucketsMutex.Lock() + defer t.bucketsMutex.Unlock() + // If the window is shrinking, then we need to copy only + // `newBuckets` buckets. + oldNumBuckets := len(t.buckets) + tIdx := t.timeToIndex(t.lastWrite) + for i := 0; i < min(numBuckets, oldNumBuckets); i++ { + oi := tIdx % oldNumBuckets + ni := tIdx % numBuckets + newBuckets[ni] = t.buckets[oi] + // In case we're shringking, make sure the total + // window sum will match. This is no-op in case if + // window is getting bigger. + newTotal += t.buckets[oi] + tIdx-- + } + t.window = w + t.buckets = newBuckets + t.windowTotal = newTotal +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing_test.go new file mode 100644 index 0000000000..603dd9c156 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing_test.go @@ -0,0 +1,474 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aggregation + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +const ( + granularity = time.Second + pod = "pod" +) + +func TestTimedFloat64BucketsSimple(t *testing.T) { + trunc1 := time.Now().Truncate(1 * time.Second) + trunc5 := time.Now().Truncate(5 * time.Second) + + type args struct { + time time.Time + name string + value float64 + } + tests := []struct { + name string + granularity time.Duration + stats []args + want map[time.Time]float64 + }{{ + name: "granularity = 1s", + granularity: time.Second, + stats: []args{ + {trunc1, pod, 1.0}, // activator scale from 0. + {trunc1.Add(100 * time.Millisecond), pod, 10.0}, // from scraping pod/sent by activator. + {trunc1.Add(1 * time.Second), pod, 1.0}, // next bucket + {trunc1.Add(3 * time.Second), pod, 1.0}, // nextnextnext bucket + }, + want: map[time.Time]float64{ + trunc1: 11.0, + trunc1.Add(1 * time.Second): 1.0, + trunc1.Add(3 * time.Second): 1.0, + }, + }, { + name: "granularity = 5s", + granularity: 5 * time.Second, + stats: []args{ + {trunc5, pod, 1.0}, + {trunc5.Add(3 * time.Second), pod, 11.0}, // same bucket + {trunc5.Add(6 * time.Second), pod, 1.0}, // next bucket + }, + want: map[time.Time]float64{ + trunc5: 12.0, + trunc5.Add(5 * time.Second): 1.0, + }, + }, { + name: "empty", + granularity: time.Second, + stats: []args{}, + want: map[time.Time]float64{}, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // New implementation test. + buckets := NewTimedFloat64Buckets(2*time.Minute, tt.granularity) + if !buckets.IsEmpty(trunc1) { + t.Error("Unexpected non empty result") + } + for _, stat := range tt.stats { + buckets.Record(stat.time, stat.value) + } + + got := make(map[time.Time]float64) + // Less time in future than our window is (2mins above), but more than any of the tests report. + buckets.ForEachBucket(trunc1.Add(time.Minute), func(t time.Time, b float64) { + // Since we're storing 0s when there's no data, we need to exclude those + // for this test. + if b > 0 { + got[t] = b + } + }) + + if !cmp.Equal(tt.want, got) { + t.Errorf("Unexpected values (-want +got): %v", cmp.Diff(tt.want, got)) + } + }) + } +} + +func TestTimedFloat64BucketsManyReps(t *testing.T) { + trunc1 := time.Now().Truncate(granularity) + buckets := NewTimedFloat64Buckets(time.Minute, granularity) + for p := 0; p < 5; p++ { + trunc1 = trunc1.Add(granularity) + for t := 0; t < 5; t++ { + buckets.Record(trunc1, float64(p+t)) + } + } + // So the buckets are: + // [0, 1, 2, 3, 4] = 10 + // [1, 2, 3, 4, 5] = 15 + // ... = ... + // [4, 5, 6, 7, 8] = 30 + // = 100 + const want = 100. + sum1, sum2 := 0., 0. + buckets.ForEachBucket(trunc1, func(_ time.Time, b float64) { + sum1 += b + }) + buckets.ForEachBucket(trunc1, func(_ time.Time, b float64) { + sum2 += b + }) + if got, want := sum1, want; got != want { + t.Errorf("Sum1 = %f, want: %f", got, want) + } + + if got, want := sum2, want; got != want { + t.Errorf("Sum2 = %f, want: %f", got, want) + } +} + +func TestTimedFloat64BucketsWindowAverage(t *testing.T) { + now := time.Now() + buckets := NewTimedFloat64Buckets(5*time.Second, granularity) + + for i := 0; i < 5; i++ { + buckets.Record(now.Add(time.Duration(i)*time.Second), float64(i+1)) + } + + if got, want := buckets.WindowAverage(now.Add(4*time.Second)), 15./5; got != want { + t.Errorf("WindowAverage = %v, want: %v", got, want) + } + // Check when `now` lags behind. + if got, want := buckets.WindowAverage(now.Add(3600*time.Millisecond)), 15./5; got != want { + t.Errorf("WindowAverage = %v, want: %v", got, want) + } + + // Check with short hole. + if got, want := buckets.WindowAverage(now.Add(6*time.Second)), (15.-1-2)/(5-2); got != want { + t.Errorf("WindowAverage = %v, want: %v", got, want) + } + + // Check with a long hole. + if got, want := buckets.WindowAverage(now.Add(10*time.Second)), 0.; got != want { + t.Errorf("WindowAverage = %v, want: %v", got, want) + } + + // Check write with holes. + buckets.Record(now.Add(6*time.Second), 91) + if got, want := buckets.WindowAverage(now.Add(6*time.Second)), (15.-1-2+91)/5; got != want { + t.Errorf("WindowAverage = %v, want: %v", got, want) + } + +} + +func TestTimedFloat64BucketsHoles(t *testing.T) { + now := time.Now() + buckets := NewTimedFloat64Buckets(5*time.Second, granularity) + + for i := time.Duration(0); i < 5; i++ { + buckets.Record(now.Add(i*time.Second), float64(i+1)) + } + + sum := 0. + + if !buckets.ForEachBucket(now.Add(4*time.Second), + func(_ time.Time, b float64) { + sum += b + }, + ) { + t.Fatal("ForEachBucket unexpectedly returned empty result") + } + if got, want := sum, 15.; got != want { + t.Errorf("Sum = %v, want: %v", got, want) + } + if got, want := buckets.WindowAverage(now.Add(4*time.Second)), 15./5; got != want { + t.Errorf("WindowAverage = %v, want: %v", got, want) + } + // Now write at 9th second. Which means that seconds + // 5[0], 6[1], 7[2] become 0. + buckets.Record(now.Add(8*time.Second), 2.) + // So now we have [3] = 2, [4] = 5 and sum should be 7. + sum = 0. + + if !buckets.ForEachBucket(now.Add(8*time.Second), + func(_ time.Time, b float64) { + sum += b + }, + ) { + t.Fatal("ForEachBucket unexpectedly returned empty result") + } + if got, want := sum, 7.; got != want { + t.Errorf("Sum = %v, want: %v", got, want) + } +} + +func TestTimedFloat64BucketsForEachBucket(t *testing.T) { + now := time.Now() + buckets := NewTimedFloat64Buckets(2*time.Minute, granularity) + + // Since we recorded 0 data, even in this implementation no iteration must occur. + if buckets.ForEachBucket(now, func(time time.Time, bucket float64) {}) { + t.Fatalf("ForEachBucket unexpectedly returned non-empty result") + } + + buckets.Record(now, 10.0) + buckets.Record(now.Add(1*time.Second), 10.0) + buckets.Record(now.Add(2*time.Second), 5.0) + buckets.Record(now.Add(3*time.Second), 5.0) + + acc1 := 0 + acc2 := 0 + + if !buckets.ForEachBucket(now.Add(4*time.Second), + func(_ time.Time, b float64) { + // We need to exclude the 0s for this test. + if b > 0 { + acc1++ + } + }, + func(_ time.Time, b float64) { + if b > 0 { + acc2++ + } + }, + ) { + t.Fatal("ForEachBucket unexpectedly returned empty result") + } +} + +func TestTimedFloat64BucketsWindowUpdate(t *testing.T) { + startTime := time.Now() + buckets := NewTimedFloat64Buckets(5*time.Second, granularity) + + // Fill the whole bucketing list with rollover. + buckets.Record(startTime, 1) + buckets.Record(startTime.Add(1*time.Second), 2) + buckets.Record(startTime.Add(2*time.Second), 3) + buckets.Record(startTime.Add(3*time.Second), 4) + buckets.Record(startTime.Add(4*time.Second), 5) + buckets.Record(startTime.Add(5*time.Second), 6) + now := startTime.Add(5 * time.Second) + + sum := 0. + buckets.ForEachBucket(now, func(t time.Time, b float64) { + sum += b + }) + const wantInitial = 2. + 3 + 4 + 5 + 6 + if got, want := sum, wantInitial; got != want { + t.Fatalf("Initial data set Sum = %v, want: %v", got, want) + } + if got, want := buckets.WindowAverage(now), wantInitial/5; got != want { + t.Fatalf("Initial data set Sum = %v, want: %v", got, want) + } + + // Increase window. + buckets.ResizeWindow(10 * time.Second) + if got, want := len(buckets.buckets), 10; got != want { + t.Fatalf("Resized bucket count = %d, want: %d", got, want) + } + if got, want := buckets.window, 10*time.Second; got != want { + t.Fatalf("Resized bucket windos = %v, want: %v", got, want) + } + + // Verify values were properly copied. + sum = 0. + buckets.ForEachBucket(now, func(t time.Time, b float64) { + sum += b + }) + if got, want := sum, float64(2+3+4+5+6); got != want { + t.Fatalf("After first resize data set Sum = %v, want: %v", got, want) + } + // Note the average changes, since we're averaging over bigger window now. + if got, want := buckets.WindowAverage(now), wantInitial/10; got != want { + t.Fatalf("Initial data set Sum = %v, want: %v", got, want) + } + + // Add one more. Make sure all the data is preserved, since window is longer. + now = now.Add(time.Second) + buckets.Record(now, 7) + const wantWithUpdate = wantInitial + 7 + sum = 0. + buckets.ForEachBucket(now, func(t time.Time, b float64) { + sum += b + }) + if got, want := sum, wantWithUpdate; got != want { + t.Fatalf("Updated data set Sum = %v, want: %v", got, want) + } + if got, want := buckets.WindowAverage(now), wantWithUpdate/10; got != want { + t.Fatalf("Initial data set Sum = %v, want: %v", got, want) + } + + // Now let's reduce window size. + buckets.ResizeWindow(4 * time.Second) + if got, want := len(buckets.buckets), 4; got != want { + t.Fatalf("Resized bucket count = %d, want: %d", got, want) + } + // Just last 4 buckets should have remained (so 2 oldest are expunged). + const wantWithShrink = wantWithUpdate - 2 - 3 + sum = 0. + buckets.ForEachBucket(now, func(t time.Time, b float64) { + sum += b + }) + if got, want := sum, wantWithShrink; got != want { + t.Fatalf("Updated data set Sum = %v, want: %v", got, want) + } + if got, want := buckets.WindowAverage(now), wantWithShrink/4; got != want { + t.Fatalf("Initial data set Sum = %v, want: %v", got, want) + } + + // Verify idempotence. + ob := &buckets.buckets + buckets.ResizeWindow(4 * time.Second) + if ob != &buckets.buckets { + t.Error("The buckets have changed, though window didn't") + } +} + +func TestTimedFloat64BucketsWindowUpdate3sGranularity(t *testing.T) { + granularity := 3 * time.Second + trunc1 := time.Now().Truncate(granularity) + + // So two buckets here (ceil(5/3)=ceil(1.6(6))=2). + buckets := NewTimedFloat64Buckets(5*time.Second, granularity) + if got, want := len(buckets.buckets), 2; got != want { + t.Fatalf("Initial bucket count = %d, want: %d", got, want) + } + + // Fill the whole bucketing list. + buckets.Record(trunc1, 10) + buckets.Record(trunc1.Add(1*time.Second), 2) + buckets.Record(trunc1.Add(2*time.Second), 3) + buckets.Record(trunc1.Add(3*time.Second), 4) + buckets.Record(trunc1.Add(4*time.Second), 5) + buckets.Record(trunc1.Add(5*time.Second), 6) + buckets.Record(trunc1.Add(6*time.Second), 7) // This overrides the initial 15 (10+2+3) + sum := 0. + buckets.ForEachBucket(trunc1.Add(6*time.Second), func(t time.Time, b float64) { + sum += b + }) + want := (4. + 5 + 6) + 7 + if got, want := sum, want; got != want { + t.Fatalf("Initial data set Sum = %v, want: %v", got, want) + } + + // Increase window. + buckets.ResizeWindow(10 * time.Second) + if got, want := len(buckets.buckets), 4; got != want { + t.Fatalf("Resized bucket count = %d, want: %d", got, want) + } + if got, want := buckets.window, 10*time.Second; got != want { + t.Fatalf("Resized bucket windos = %v, want: %v", got, want) + } + + // Verify values were properly copied. + sum = 0 + buckets.ForEachBucket(trunc1.Add(6*time.Second), func(t time.Time, b float64) { + sum += b + }) + if got, want := sum, want; got != want { + t.Fatalf("After first resize data set Sum = %v, want: %v", got, want) + } + + // Add one more. Make sure all the data is preserved, since window is longer. + buckets.Record(trunc1.Add(9*time.Second+300*time.Millisecond), 42) + sum = 0 + buckets.ForEachBucket(trunc1.Add(9*time.Second), func(t time.Time, b float64) { + sum += b + }) + want += 42 + if got, want := sum, want; got != want { + t.Fatalf("Updated data set Sum = %v, want: %v", got, want) + } + + // Now let's reduce window size. + buckets.ResizeWindow(4 * time.Second) + + sum = 0 + if got, want := len(buckets.buckets), 2; got != want { + t.Fatalf("Resized bucket count = %d, want: %d", got, want) + } + // Just last 4 buckets should have remained. + sum = 0. + want = 42 + 7 // we drop oldest bucket and the one not yet utilizied) + buckets.ForEachBucket(trunc1.Add(9*time.Second), func(t time.Time, b float64) { + sum += b + }) + if got, want := sum, want; got != want { + t.Fatalf("Updated data set Sum = %v, want: %v", got, want) + } + + // Verify idempotence. + ob := &buckets.buckets + buckets.ResizeWindow(4 * time.Second) + if ob != &buckets.buckets { + t.Error("The buckets have changed, though window didn't") + } +} + +func BenchmarkWindowAverage(b *testing.B) { + // Window lengths in secs. + for _, wl := range []int{30, 60, 120, 240, 600} { + b.Run(fmt.Sprintf("%v-win-len", wl), func(b *testing.B) { + tn := time.Now().Truncate(time.Second) // To simplify everything. + buckets := NewTimedFloat64Buckets(time.Duration(wl)*time.Second, + time.Second /*granularity*/) + // Populate with some random data. + for i := 0; i < wl; i++ { + buckets.Record(tn.Add(time.Duration(i)*time.Second), rand.Float64()*100) + } + for i := 0; i < b.N; i++ { + buckets.WindowAverage(tn.Add(time.Duration(wl) * time.Second)) + } + }) + } +} + +func BenchmarkWindowForEach(b *testing.B) { + // Window lengths in secs. + for _, wl := range []int{30, 60, 120, 240, 600} { + b.Run(fmt.Sprintf("%v-win-len", wl), func(b *testing.B) { + tn := time.Now().Truncate(time.Second) // To simplify everything. + buckets := NewTimedFloat64Buckets(time.Duration(wl)*time.Second, + time.Second /*granularity*/) + // Populate with some random data. + for i := 0; i < wl; i++ { + buckets.Record(tn.Add(time.Duration(i)*time.Second), rand.Float64()*100) + } + for i := 0; i < b.N; i++ { + var avg Average + win := tn.Add(time.Duration(wl) * time.Second) + buckets.ForEachBucket(win, + YoungerThan(tn, avg.Accumulate)) + } + }) + } +} + +func TestRoundToNDigits(t *testing.T) { + if got, want := roundToNDigits(6, 3.6e-17), 0.; got != want { + t.Errorf("Rounding = %v, want: %v", got, want) + } + if got, want := roundToNDigits(3, 0.0004), 0.; got != want { + t.Errorf("Rounding = %v, want: %v", got, want) + } + if got, want := roundToNDigits(3, 1.2345), 1.234; got != want { + t.Errorf("Rounding = %v, want: %v", got, want) + } + if got, want := roundToNDigits(4, 1.2345), 1.2345; got != want { + t.Errorf("Rounding = %v, want: %v", got, want) + } + if got, want := roundToNDigits(6, 12345), 12345.; got != want { + t.Errorf("Rounding = %v, want: %v", got, want) + } + +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler.go b/test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler.go new file mode 100644 index 0000000000..985e314ad9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler.go @@ -0,0 +1,253 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "context" + "errors" + "math" + "sync" + "time" + + "go.uber.org/zap" + + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/resources" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// Autoscaler stores current state of an instance of an autoscaler. +type Autoscaler struct { + namespace string + revision string + metricClient MetricClient + lister corev1listers.EndpointsLister + reporter StatsReporter + + // State in panic mode. Carries over multiple Scale calls. Guarded + // by the stateMux. + stateMux sync.Mutex + panicTime time.Time + maxPanicPods int32 + + // specMux guards the current DeciderSpec and the PodCounter. + specMux sync.RWMutex + deciderSpec *DeciderSpec + podCounter resources.ReadyPodCounter +} + +// New creates a new instance of autoscaler +func New( + namespace string, + revision string, + metricClient MetricClient, + lister corev1listers.EndpointsLister, + deciderSpec *DeciderSpec, + reporter StatsReporter) (*Autoscaler, error) { + if lister == nil { + return nil, errors.New("'lister' must not be nil") + } + if reporter == nil { + return nil, errors.New("stats reporter must not be nil") + } + + // We always start in the panic mode, if the deployment is scaled up over 1 pod. + // If the scale is 0 or 1, normal Autoscaler behavior is fine. + // When Autoscaler restarts we lose metric history, which causes us to + // momentarily scale down, and that is not a desired behaviour. + // Thus, we're keeping at least the current scale until we + // accumulate enough data to make conscious decisions. + podCounter := resources.NewScopedEndpointsCounter(lister, + namespace, deciderSpec.ServiceName) + curC, err := podCounter.ReadyCount() + if err != nil { + // This always happens on new revision creation, since decider + // is reconciled before SKS has even chance of creating the service/endpoints. + curC = 0 + } + var pt time.Time + if curC > 1 { + pt = time.Now() + // A new instance of autoscaler is created in panic mode. + reporter.ReportPanic(1) + } else { + reporter.ReportPanic(0) + } + + return &Autoscaler{ + namespace: namespace, + revision: revision, + metricClient: metricClient, + lister: lister, + reporter: reporter, + + deciderSpec: deciderSpec, + podCounter: podCounter, + + panicTime: pt, + maxPanicPods: int32(curC), + }, nil +} + +// Update reconfigures the UniScaler according to the DeciderSpec. +func (a *Autoscaler) Update(deciderSpec *DeciderSpec) error { + a.specMux.Lock() + defer a.specMux.Unlock() + + // Update the podCounter if service name changes. + if deciderSpec.ServiceName != a.deciderSpec.ServiceName { + a.podCounter = resources.NewScopedEndpointsCounter(a.lister, a.namespace, + deciderSpec.ServiceName) + } + a.deciderSpec = deciderSpec + return nil +} + +// Scale calculates the desired scale based on current statistics given the current time. +// desiredPodCount is the calculated pod count the autoscaler would like to set. +// validScale signifies whether the desiredPodCount should be applied or not. +func (a *Autoscaler) Scale(ctx context.Context, now time.Time) (desiredPodCount int32, excessBC int32, validScale bool) { + logger := logging.FromContext(ctx) + + spec, podCounter := a.currentSpecAndPC() + originalReadyPodsCount, err := podCounter.ReadyCount() + // If the error is NotFound, then presume 0. + if err != nil && !apierrors.IsNotFound(err) { + logger.Errorw("Failed to get Endpoints via K8S Lister", zap.Error(err)) + return 0, 0, false + } + // Use 1 if there are zero current pods. + readyPodsCount := math.Max(1, float64(originalReadyPodsCount)) + + metricKey := types.NamespacedName{Namespace: a.namespace, Name: a.revision} + + metricName := spec.ScalingMetric + var observedStableValue, observedPanicValue float64 + switch spec.ScalingMetric { + case autoscaling.RPS: + observedStableValue, observedPanicValue, err = a.metricClient.StableAndPanicRPS(metricKey, now) + a.reporter.ReportStableRPS(observedStableValue) + a.reporter.ReportPanicRPS(observedPanicValue) + a.reporter.ReportTargetRPS(spec.TargetValue) + default: + metricName = autoscaling.Concurrency // concurrency is used by default + observedStableValue, observedPanicValue, err = a.metricClient.StableAndPanicConcurrency(metricKey, now) + a.reporter.ReportStableRequestConcurrency(observedStableValue) + a.reporter.ReportPanicRequestConcurrency(observedPanicValue) + a.reporter.ReportTargetRequestConcurrency(spec.TargetValue) + } + + // Put the scaling metric to logs. + logger = logger.With(zap.String("metric", metricName)) + + if err != nil { + if err == ErrNoData { + logger.Debug("No data to scale on yet") + } else { + logger.Errorw("Failed to obtain metrics", zap.Error(err)) + } + return 0, 0, false + } + + // Make sure we don't get stuck with the same number of pods, if the scale up rate + // is too conservative and MaxScaleUp*RPC==RPC, so this permits us to grow at least by a single + // pod if we need to scale up. + // E.g. MSUR=1.1, OCC=3, RPC=2, TV=1 => OCC/TV=3, MSU=2.2 => DSPC=2, while we definitely, need + // 3 pods. See the unit test for this scenario in action. + maxScaleUp := math.Ceil(spec.MaxScaleUpRate * readyPodsCount) + // Same logic, opposite math applies here. + maxScaleDown := math.Floor(readyPodsCount / spec.MaxScaleDownRate) + + dspc := math.Ceil(observedStableValue / spec.TargetValue) + dppc := math.Ceil(observedPanicValue / spec.TargetValue) + logger.Debugf("DesiredStablePodCount = %0.3f, DesiredPanicPodCount = %0.3f, MaxScaleUp = %0.3f, MaxScaleDown = %0.3f", + dspc, dppc, maxScaleUp, maxScaleDown) + + // We want to keep desired pod count in the [maxScaleDown, maxScaleUp] range. + desiredStablePodCount := int32(math.Min(math.Max(dspc, maxScaleDown), maxScaleUp)) + desiredPanicPodCount := int32(math.Min(math.Max(dppc, maxScaleDown), maxScaleUp)) + + logger.With(zap.String("mode", "stable")).Debugf("Observed average scaling metric value: %0.3f, targeting %0.3f.", + observedStableValue, spec.TargetValue) + logger.With(zap.String("mode", "panic")).Debugf("Observed average scaling metric value: %0.3f, targeting %0.3f.", + observedPanicValue, spec.TargetValue) + + isOverPanicThreshold := observedPanicValue/readyPodsCount >= spec.PanicThreshold + + a.stateMux.Lock() + defer a.stateMux.Unlock() + if a.panicTime.IsZero() && isOverPanicThreshold { + // Begin panicking when we cross the threshold in the panic window. + logger.Info("PANICKING") + a.panicTime = now + a.reporter.ReportPanic(1) + } else if !a.panicTime.IsZero() && !isOverPanicThreshold && a.panicTime.Add(spec.StableWindow).Before(now) { + // Stop panicking after the surge has made its way into the stable metric. + logger.Info("Un-panicking.") + a.panicTime = time.Time{} + a.maxPanicPods = 0 + a.reporter.ReportPanic(0) + } + + if !a.panicTime.IsZero() { + logger.Debug("Operating in panic mode.") + // We do not scale down while in panic mode. Only increases will be applied. + if desiredPanicPodCount > a.maxPanicPods { + logger.Infof("Increasing pods from %d to %d.", originalReadyPodsCount, desiredPanicPodCount) + a.panicTime = now + a.maxPanicPods = desiredPanicPodCount + } else if desiredPanicPodCount < a.maxPanicPods { + logger.Debugf("Skipping decrease from %d to %d.", a.maxPanicPods, desiredPanicPodCount) + } + desiredPodCount = a.maxPanicPods + } else { + logger.Debug("Operating in stable mode.") + desiredPodCount = desiredStablePodCount + } + + // Compute the excess burst capacity based on stable value for now, since we don't want to + // be making knee-jerk decisions about Activator in the request path. Negative EBC means + // that the deployment does not have enough capacity to serve the desired burst off hand. + // EBC = TotCapacity - Cur#ReqInFlight - TargetBurstCapacity + excessBC = int32(-1) + switch { + case a.deciderSpec.TargetBurstCapacity == 0: + excessBC = 0 + case a.deciderSpec.TargetBurstCapacity >= 0: + excessBC = int32(math.Floor(float64(originalReadyPodsCount)*a.deciderSpec.TotalValue - observedStableValue - + a.deciderSpec.TargetBurstCapacity)) + logger.Infof("PodCount=%v Total1PodCapacity=%v ObservedStableValue=%v TargetBC=%v ExcessBC=%v", + originalReadyPodsCount, + a.deciderSpec.TotalValue, + observedStableValue, a.deciderSpec.TargetBurstCapacity, excessBC) + } + + a.reporter.ReportExcessBurstCapacity(float64(excessBC)) + a.reporter.ReportDesiredPodCount(int64(desiredPodCount)) + + return desiredPodCount, excessBC, true +} + +func (a *Autoscaler) currentSpecAndPC() (*DeciderSpec, resources.ReadyPodCounter) { + a.specMux.RLock() + defer a.specMux.RUnlock() + return a.deciderSpec, a.podCounter +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler_test.go new file mode 100644 index 0000000000..8769b38f7f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/autoscaler_test.go @@ -0,0 +1,505 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "fmt" + "math" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kubeinformers "k8s.io/client-go/informers" + fakeK8s "k8s.io/client-go/kubernetes/fake" + . "knative.dev/pkg/logging/testing" + autoscalerfake "knative.dev/serving/pkg/autoscaler/fake" +) + +const ( + stableWindow = 60 * time.Second + targetUtilization = 0.75 +) + +var ( + kubeClient = fakeK8s.NewSimpleClientset() + kubeInformer = kubeinformers.NewSharedInformerFactory(kubeClient, 0) +) + +func TestNewErrorWhenGivenNilReadyPodCounter(t *testing.T) { + _, err := New(testNamespace, testRevision, &autoscalerfake.MetricClient{}, nil, &DeciderSpec{TargetValue: 10, ServiceName: testService}, &mockReporter{}) + if err == nil { + t.Error("Expected error when ReadyPodCounter interface is nil, but got none.") + } +} + +func TestNewErrorWhenGivenNilStatsReporter(t *testing.T) { + var reporter StatsReporter + + l := kubeInformer.Core().V1().Endpoints().Lister() + _, err := New(testNamespace, testRevision, &autoscalerfake.MetricClient{}, l, + &DeciderSpec{TargetValue: 10, ServiceName: testService}, reporter) + if err == nil { + t.Error("Expected error when EndpointsInformer interface is nil, but got none.") + } +} + +func TestAutoscalerNoDataNoAutoscale(t *testing.T) { + metrics := &autoscalerfake.MetricClient{ + ErrF: func(key types.NamespacedName, now time.Time) error { + return errors.New("no metrics") + }, + } + + a := newTestAutoscaler(t, 10, 100, metrics) + a.expectScale(t, time.Now(), 0, 0, false) +} + +func expectedEBC(totCap, targetBC, recordedConcurrency, numPods float64) int32 { + return int32(math.Floor(totCap/targetUtilization*numPods - targetBC - recordedConcurrency)) +} + +func TestAutoscalerChangeOfPodCountService(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 50.0} + a := newTestAutoscaler(t, 10, 100, metrics) + a.expectScale(t, time.Now(), 5, expectedEBC(10, 100, 50, 1), true) + + const newTS = testService + "2" + newDS := *a.deciderSpec + newDS.ServiceName = newTS + a.Update(&newDS) + + // Make two pods in the new service. + endpoints(2, newTS) + // This should change the EBC computation, but target scale doesn't change. + a.expectScale(t, time.Now(), 5, expectedEBC(10, 100, 50, 2), true) +} + +func TestAutoscalerStableModeIncreaseWithConcurrencyDefault(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 50.0} + a := newTestAutoscaler(t, 10, 101, metrics) + a.expectScale(t, time.Now(), 5, expectedEBC(10, 101, 50, 1), true) + + metrics.StableConcurrency = 100 + a.expectScale(t, time.Now(), 10, expectedEBC(10, 101, 100, 1), true) +} + +func TestAutoscalerStableModeIncreaseWithRPS(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableRPS: 50.0} + a := newTestAutoscalerWithScalingMetric(t, 10, 101, metrics, "rps") + a.expectScale(t, time.Now(), 5, expectedEBC(10, 101, 50, 1), true) + + metrics.StableRPS = 100 + a.expectScale(t, time.Now(), 10, expectedEBC(10, 101, 100, 1), true) +} + +func TestAutoscalerStableModeDecrease(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 100.0} + a := newTestAutoscaler(t, 10, 98, metrics) + endpoints(8, testService) + a.expectScale(t, time.Now(), 10, expectedEBC(10, 98, 100, 8), true) + + metrics.StableConcurrency = 50 + a.expectScale(t, time.Now(), 5, expectedEBC(10, 98, 50, 8), true) +} + +func TestAutoscalerStableModeNoTrafficScaleToZero(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 1} + a := newTestAutoscaler(t, 10, 75, metrics) + a.expectScale(t, time.Now(), 1, expectedEBC(10, 75, 1, 1), true) + + metrics.StableConcurrency = 0.0 + a.expectScale(t, time.Now(), 0, expectedEBC(10, 75, 0, 1), true) +} + +// QPS is increasing exponentially. Each scaling event bring concurrency +// back to the target level (1.0) but then traffic continues to increase. +// At 1296 QPS traffic stablizes. +func TestAutoscalerPanicModeExponentialTrackAndStablize(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 6, PanicConcurrency: 6} + a := newTestAutoscaler(t, 1, 101, metrics) + a.expectScale(t, time.Now(), 6, expectedEBC(1, 101, 6, 1), true) + + endpoints(6, testService) + metrics.PanicConcurrency, metrics.StableConcurrency = 36, 36 + a.expectScale(t, time.Now(), 36, expectedEBC(1, 101, 36, 6), true) + + endpoints(36, testService) + metrics.PanicConcurrency, metrics.StableConcurrency = 216, 216 + a.expectScale(t, time.Now(), 216, expectedEBC(1, 101, 216, 36), true) + + endpoints(216, testService) + metrics.PanicConcurrency, metrics.StableConcurrency = 1296, 1296 + a.expectScale(t, time.Now(), 1296, expectedEBC(1, 101, 1296, 216), true) + endpoints(1296, testService) + a.expectScale(t, time.Now(), 1296, expectedEBC(1, 101, 1296, 1296), true) +} + +func TestAutoscalerScale(t *testing.T) { + tests := []struct { + label string + as *Autoscaler + prepFunc func(as *Autoscaler) + wantScale int32 + wantEBC int32 + wantInvalid bool + }{{ + label: "AutoscalerNoDataAtZeroNoAutoscale", + as: newTestAutoscaler(t, 10, 100, &autoscalerfake.MetricClient{}), + wantScale: 0, + wantEBC: expectedEBC(10, 100, 0, 1), + }, { + label: "AutoscalerNoDataAtZeroNoAutoscaleWithExplicitEPs", + as: newTestAutoscaler(t, 10, 100, &autoscalerfake.MetricClient{}), + prepFunc: func(*Autoscaler) { endpoints(1, testService) }, + wantScale: 0, + wantEBC: expectedEBC(10, 100, 0, 1), + }, { + label: "AutoscalerStableModeUnlimitedTBC", + as: newTestAutoscaler(t, 181, -1, &autoscalerfake.MetricClient{StableConcurrency: 21.0}), + wantScale: 1, + wantEBC: -1, + }, { + label: "Autoscaler0TBC", + as: newTestAutoscaler(t, 10, 0, &autoscalerfake.MetricClient{StableConcurrency: 50.0}), + wantScale: 5, + wantEBC: 0, + }, { + label: "AutoscalerStableModeNoChange", + as: newTestAutoscaler(t, 10, 100, &autoscalerfake.MetricClient{StableConcurrency: 50.0}), + wantScale: 5, + wantEBC: expectedEBC(10, 100, 50, 1), + }, { + label: "AutoscalerStableModeNoChangeAlreadyScaled", + as: newTestAutoscaler(t, 10, 100, &autoscalerfake.MetricClient{StableConcurrency: 50.0}), + prepFunc: func(*Autoscaler) { endpoints(5, testService) }, + wantScale: 5, + wantEBC: expectedEBC(10, 100, 50, 5), + }, { + label: "AutoscalerStableModeNoChangeAlreadyScaled", + as: newTestAutoscaler(t, 10, 100, &autoscalerfake.MetricClient{StableConcurrency: 50.0}), + prepFunc: func(*Autoscaler) { endpoints(5, testService) }, + wantScale: 5, + wantEBC: expectedEBC(10, 100, 50, 5), + }, { + label: "AutoscalerStableModeIncreaseWithSmallScaleUpRate", + as: newTestAutoscaler(t, 1 /* target */, 1982 /* TBC */, &autoscalerfake.MetricClient{StableConcurrency: 3}), + prepFunc: func(a *Autoscaler) { + a.deciderSpec.MaxScaleUpRate = 1.1 + endpoints(2, testService) + }, + wantScale: 3, + wantEBC: expectedEBC(1, 1982, 3, 2), + }, { + label: "AutoscalerStableModeIncreaseWithSmallScaleDownRate", + as: newTestAutoscaler(t, 10 /* target */, 1982 /* TBC */, &autoscalerfake.MetricClient{StableConcurrency: 1}), + prepFunc: func(a *Autoscaler) { + a.deciderSpec.MaxScaleDownRate = 1.1 + endpoints(100, testService) + }, + wantScale: 90, + wantEBC: expectedEBC(10, 1982, 1, 100), + }, { + label: "AutoscalerPanicModeDoublePodCount", + as: newTestAutoscaler(t, 10, 84, &autoscalerfake.MetricClient{StableConcurrency: 50, PanicConcurrency: 100}), + // PanicConcurrency takes precedence. + wantScale: 10, + wantEBC: expectedEBC(10, 84, 50, 1), + }} + for _, test := range tests { + t.Run(test.label, func(tt *testing.T) { + // Reset the endpoints state to the default before every test. + endpoints(1, testService) + if test.prepFunc != nil { + test.prepFunc(test.as) + } + test.as.expectScale(tt, time.Now(), test.wantScale, test.wantEBC, !test.wantInvalid) + }) + } +} + +func TestAutoscalerPanicThenUnPanicScaleDown(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 100, PanicConcurrency: 100} + a := newTestAutoscaler(t, 10, 93, metrics) + a.expectScale(t, time.Now(), 10, expectedEBC(10, 93, 100, 1), true) + endpoints(10, testService) + + panicTime := time.Now() + metrics.PanicConcurrency = 1000 + a.expectScale(t, panicTime, 100, expectedEBC(10, 93, 100, 10), true) + + // Traffic dropped off, scale stays as we're still in panic. + metrics.PanicConcurrency = 1 + metrics.StableConcurrency = 1 + a.expectScale(t, panicTime.Add(30*time.Second), 100, expectedEBC(10, 93, 1, 10), true) + + // Scale down after the StableWindow + a.expectScale(t, panicTime.Add(61*time.Second), 1, expectedEBC(10, 93, 1, 10), true) +} + +func TestAutoscalerRateLimitScaleUp(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 1000} + a := newTestAutoscaler(t, 10, 61, metrics) + + // Need 100 pods but only scale x10 + a.expectScale(t, time.Now(), 10, expectedEBC(10, 61, 1000, 1), true) + + endpoints(10, testService) + // Scale x10 again + a.expectScale(t, time.Now(), 100, expectedEBC(10, 61, 1000, 10), true) +} + +func TestAutoscalerRateLimitScaleDown(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 1} + a := newTestAutoscaler(t, 10, 61, metrics) + + // Need 1 pods but can only scale down ten times, to 10. + endpoints(100, testService) + a.expectScale(t, time.Now(), 10, expectedEBC(10, 61, 1, 100), true) + + endpoints(10, testService) + // Scale ÷10 again. + a.expectScale(t, time.Now(), 1, expectedEBC(10, 61, 1, 10), true) +} + +func eraseEndpoints() { + ep, _ := kubeClient.CoreV1().Endpoints(testNamespace).Get(testService, metav1.GetOptions{}) + kubeClient.CoreV1().Endpoints(testNamespace).Delete(testService, nil) + kubeInformer.Core().V1().Endpoints().Informer().GetIndexer().Delete(ep) +} + +func TestAutoscalerUseOnePodAsMinimumIfEndpointsNotFound(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 1000} + a := newTestAutoscaler(t, 10, 81, metrics) + + endpoints(0, testService) + // 2*10 as the rate limited if we can get the actual pods number. + // 1*10 as the rate limited since no read pods are there from K8S API. + a.expectScale(t, time.Now(), 10, expectedEBC(10, 81, 1000, 0), true) + + eraseEndpoints() + // 2*10 as the rate limited if we can get the actual pods number. + // 1*10 as the rate limited since no Endpoints object is there from K8S API. + a.expectScale(t, time.Now(), 10, expectedEBC(10, 81, 1000, 0), true) +} + +func TestAutoscalerUpdateTarget(t *testing.T) { + metrics := &autoscalerfake.MetricClient{StableConcurrency: 100} + a := newTestAutoscaler(t, 10, 77, metrics) + a.expectScale(t, time.Now(), 10, expectedEBC(10, 77, 100, 1), true) + + endpoints(10, testService) + a.Update(&DeciderSpec{ + TargetValue: 1, + TotalValue: 1 / targetUtilization, + TargetBurstCapacity: 71, + PanicThreshold: 2, + MaxScaleDownRate: 10, + MaxScaleUpRate: 10, + StableWindow: stableWindow, + ServiceName: testService, + }) + a.expectScale(t, time.Now(), 100, expectedEBC(1, 71, 100, 10), true) +} + +type mockReporter struct{} + +// ReportDesiredPodCount of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportDesiredPodCount(v int64) error { + return nil +} + +// ReportRequestedPodCount of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportRequestedPodCount(v int64) error { + return nil +} + +// ReportActualPodCount of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportActualPodCount(v int64) error { + return nil +} + +// ReportStableRequestConcurrency of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportStableRequestConcurrency(v float64) error { + return nil +} + +// ReportPanicRequestConcurrency of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportPanicRequestConcurrency(v float64) error { + return nil +} + +// ReportStableRPS of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportStableRPS(v float64) error { + return nil +} + +// ReportPanicRPS of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportPanicRPS(v float64) error { + return nil +} + +// ReportTargetRPS of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportTargetRPS(v float64) error { + return nil +} + +// ReportTargetRequestConcurrency of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportTargetRequestConcurrency(v float64) error { + return nil +} + +// ReportPanic of a mockReporter does nothing and return nil for error. +func (r *mockReporter) ReportPanic(v int64) error { + return nil +} + +// ReportExcessBurstCapacity retports excess burst capacity. +func (r *mockReporter) ReportExcessBurstCapacity(v float64) error { + return nil +} + +func newTestAutoscaler(t *testing.T, targetValue, targetBurstCapacity float64, metrics MetricClient) *Autoscaler { + return newTestAutoscalerWithScalingMetric(t, targetValue, targetBurstCapacity, metrics, "concurrency") +} + +func newTestAutoscalerWithScalingMetric(t *testing.T, targetValue, targetBurstCapacity float64, metrics MetricClient, metric string) *Autoscaler { + t.Helper() + deciderSpec := &DeciderSpec{ + ScalingMetric: metric, + TargetValue: targetValue, + TotalValue: targetValue / targetUtilization, // For UTs presume 75% utilization + TargetBurstCapacity: targetBurstCapacity, + PanicThreshold: 2 * targetValue, + MaxScaleUpRate: 10, + MaxScaleDownRate: 10, + StableWindow: stableWindow, + ServiceName: testService, + } + + l := kubeInformer.Core().V1().Endpoints().Lister() + // This ensures that we have endpoints object to start the autoscaler. + endpoints(0, testService) + a, err := New(testNamespace, testRevision, metrics, l, deciderSpec, &mockReporter{}) + if err != nil { + t.Fatalf("Error creating test autoscaler: %v", err) + } + endpoints(1, testService) + return a +} + +func (a *Autoscaler) expectScale(t *testing.T, now time.Time, expectScale, expectEBC int32, expectOK bool) { + t.Helper() + scale, ebc, ok := a.Scale(TestContextWithLogger(t), now) + if ok != expectOK { + t.Errorf("Unexpected autoscale decision. Expected %v. Got %v.", expectOK, ok) + } + if got, want := scale, expectScale; got != want { + t.Errorf("Scale %d, want: %d", got, want) + } + if got, want := ebc, expectEBC; got != want { + t.Errorf("ExcessBurstCapacity = %d, want: %d", got, want) + } +} + +func endpoints(count int, svc string) { + epAddresses := make([]corev1.EndpointAddress, count) + for i := 0; i < count; i++ { + ip := fmt.Sprintf("127.0.0.%v", i+1) + epAddresses[i] = corev1.EndpointAddress{IP: ip} + } + + ep := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: svc, + }, + Subsets: []corev1.EndpointSubset{{ + Addresses: epAddresses, + }}, + } + kubeClient.CoreV1().Endpoints(testNamespace).Create(ep) + kubeInformer.Core().V1().Endpoints().Informer().GetIndexer().Add(ep) +} + +func TestStartInPanicMode(t *testing.T) { + metrics := &autoscalerfake.StaticMetricClient + deciderSpec := &DeciderSpec{ + TargetValue: 100, + TotalValue: 120, + TargetBurstCapacity: 11, + PanicThreshold: 220, + MaxScaleUpRate: 10, + MaxScaleDownRate: 10, + StableWindow: stableWindow, + ServiceName: testService, + } + + l := kubeInformer.Core().V1().Endpoints().Lister() + for i := 0; i < 2; i++ { + endpoints(i, testService) + a, err := New(testNamespace, testRevision, metrics, l, deciderSpec, &mockReporter{}) + if err != nil { + t.Fatalf("Error creating test autoscaler: %v", err) + } + if !a.panicTime.IsZero() { + t.Errorf("Create at scale %d had panic mode on", i) + } + if got, want := int(a.maxPanicPods), i; got != want { + t.Errorf("MaxPanicPods = %d, want: %d", got, want) + } + } + + // Now start with 2 and make sure we're in panic mode. + endpoints(2, testService) + a, err := New(testNamespace, testRevision, metrics, l, deciderSpec, &mockReporter{}) + if err != nil { + t.Fatalf("Error creating test autoscaler: %v", err) + } + if a.panicTime.IsZero() { + t.Error("Create at scale 2 had panic mode off") + } + if got, want := int(a.maxPanicPods), 2; got != want { + t.Errorf("MaxPanicPods = %d, want: %d", got, want) + } +} + +func TestNewFail(t *testing.T) { + eraseEndpoints() + metrics := &autoscalerfake.StaticMetricClient + deciderSpec := &DeciderSpec{ + TargetValue: 100, + TotalValue: 120, + TargetBurstCapacity: 11, + PanicThreshold: 220, + MaxScaleUpRate: 10, + MaxScaleDownRate: 10, + StableWindow: stableWindow, + ServiceName: testService, + } + + l := kubeInformer.Core().V1().Endpoints().Lister() + a, err := New(testNamespace, testRevision, metrics, l, deciderSpec, &mockReporter{}) + if err != nil { + t.Errorf("No endpoints should succeed, err = %v", err) + } + if got, want := int(a.maxPanicPods), 0; got != want { + t.Errorf("maxPanicPods = %d, want: 0", got) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/collector.go b/test/vendor/knative.dev/serving/pkg/autoscaler/collector.go new file mode 100644 index 0000000000..8e0d541333 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/collector.go @@ -0,0 +1,361 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/logging/logkey" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler/aggregation" +) + +const ( + // scrapeTickInterval is the interval of time between triggering StatsScraper.Scrape() + // to get metrics across all pods of a revision. + scrapeTickInterval = time.Second + + // BucketSize is the size of the buckets of stats we create. + // NB: if this is more than 1s, we need to average values in the + // metrics buckets. + BucketSize = scrapeTickInterval +) + +var ( + // ErrNoData denotes that the collector could not calculate data. + ErrNoData = errors.New("no data available") + + // ErrNotScraping denotes that the collector is not collecting metrics for the given resource. + ErrNotScraping = errors.New("the requested resource is not being scraped") +) + +// StatsScraperFactory creates a StatsScraper for a given Metric. +type StatsScraperFactory func(*av1alpha1.Metric) (StatsScraper, error) + +// Stat defines a single measurement at a point in time +type Stat struct { + // The time the data point was received by autoscaler. + Time time.Time + + // The unique identity of this pod. Used to count how many pods + // are contributing to the metrics. + PodName string + + // Average number of requests currently being handled by this pod. + AverageConcurrentRequests float64 + + // Part of AverageConcurrentRequests, for requests going through a proxy. + AverageProxiedConcurrentRequests float64 + + // Number of requests received since last Stat (approximately requests per second). + RequestCount float64 + + // Part of RequestCount, for requests going through a proxy. + ProxiedRequestCount float64 + + // Process uptime in seconds. + ProcessUptime float64 +} + +var emptyStat = Stat{} + +// StatMessage wraps a Stat with identifying information so it can be routed +// to the correct receiver. +type StatMessage struct { + Key types.NamespacedName + Stat Stat +} + +// Collector starts and stops metric collection for a given entity. +type Collector interface { + // CreateOrUpdate either creates a collection for the given metric or update it, should + // it already exist. + CreateOrUpdate(*av1alpha1.Metric) error + // Record allows stats to be captured that came from outside the Collector. + Record(key types.NamespacedName, stat Stat) + // Delete deletes a Metric and halts collection. + Delete(string, string) error +} + +// MetricClient surfaces the metrics that can be obtained via the collector. +type MetricClient interface { + // StableAndPanicConcurrency returns both the stable and the panic concurrency + // for the given replica as of the given time. + StableAndPanicConcurrency(key types.NamespacedName, now time.Time) (float64, float64, error) + + // StableAndPanicRPS returns both the stable and the panic RPS + // for the given replica as of the given time. + StableAndPanicRPS(key types.NamespacedName, now time.Time) (float64, float64, error) +} + +// MetricCollector manages collection of metrics for many entities. +type MetricCollector struct { + logger *zap.SugaredLogger + + statsScraperFactory StatsScraperFactory + tickProvider func(time.Duration) *time.Ticker + + collections map[types.NamespacedName]*collection + collectionsMutex sync.RWMutex +} + +var _ Collector = (*MetricCollector)(nil) +var _ MetricClient = (*MetricCollector)(nil) + +// NewMetricCollector creates a new metric collector. +func NewMetricCollector(statsScraperFactory StatsScraperFactory, logger *zap.SugaredLogger) *MetricCollector { + return &MetricCollector{ + logger: logger, + collections: make(map[types.NamespacedName]*collection), + statsScraperFactory: statsScraperFactory, + tickProvider: time.NewTicker, + } +} + +// CreateOrUpdate either creates a collection for the given metric or update it, should +// it already exist. +// Map access optimized via double-checked locking. +func (c *MetricCollector) CreateOrUpdate(metric *av1alpha1.Metric) error { + scraper, err := c.statsScraperFactory(metric) + if err != nil { + return err + } + key := types.NamespacedName{Namespace: metric.Namespace, Name: metric.Name} + + c.collectionsMutex.RLock() + collection, exists := c.collections[key] + c.collectionsMutex.RUnlock() + if exists { + collection.updateScraper(scraper) + collection.updateMetric(metric) + return nil + } + + c.collectionsMutex.Lock() + defer c.collectionsMutex.Unlock() + + collection, exists = c.collections[key] + if exists { + collection.updateScraper(scraper) + collection.updateMetric(metric) + return nil + } + + c.collections[key] = newCollection(metric, scraper, c.tickProvider, c.logger) + return nil +} + +// Delete deletes a Metric and halts collection. +func (c *MetricCollector) Delete(namespace, name string) error { + c.collectionsMutex.Lock() + defer c.collectionsMutex.Unlock() + + key := types.NamespacedName{Namespace: namespace, Name: name} + if collection, ok := c.collections[key]; ok { + collection.close() + delete(c.collections, key) + } + return nil +} + +// Record records a stat that's been generated outside of the metric collector. +func (c *MetricCollector) Record(key types.NamespacedName, stat Stat) { + c.collectionsMutex.RLock() + defer c.collectionsMutex.RUnlock() + + if collection, exists := c.collections[key]; exists { + collection.record(stat) + } +} + +// StableAndPanicConcurrency returns both the stable and the panic concurrency. +// It may truncate metric buckets as a side-effect. +func (c *MetricCollector) StableAndPanicConcurrency(key types.NamespacedName, now time.Time) (float64, float64, error) { + c.collectionsMutex.RLock() + defer c.collectionsMutex.RUnlock() + + collection, exists := c.collections[key] + if !exists { + return 0, 0, ErrNotScraping + } + + s, p, noData := collection.stableAndPanicConcurrency(now) + if noData { + return 0, 0, ErrNoData + } + return s, p, nil +} + +// StableAndPanicRPS returns both the stable and the panic RPS. +// It may truncate metric buckets as a side-effect. +func (c *MetricCollector) StableAndPanicRPS(key types.NamespacedName, now time.Time) (float64, float64, error) { + c.collectionsMutex.RLock() + defer c.collectionsMutex.RUnlock() + + collection, exists := c.collections[key] + if !exists { + return 0, 0, ErrNotScraping + } + + s, p, noData := collection.stableAndPanicRPS(now) + if noData { + return 0, 0, ErrNoData + } + return s, p, nil +} + +// collection represents the collection of metrics for one specific entity. +type collection struct { + metricMutex sync.RWMutex + metric *av1alpha1.Metric + + scraperMutex sync.RWMutex + scraper StatsScraper + concurrencyBuckets *aggregation.TimedFloat64Buckets + concurrencyPanicBuckets *aggregation.TimedFloat64Buckets + rpsBuckets *aggregation.TimedFloat64Buckets + rpsPanicBuckets *aggregation.TimedFloat64Buckets + + grp sync.WaitGroup + stopCh chan struct{} +} + +func (c *collection) updateScraper(ss StatsScraper) { + c.scraperMutex.Lock() + defer c.scraperMutex.Unlock() + c.scraper = ss +} + +func (c *collection) getScraper() StatsScraper { + c.scraperMutex.RLock() + defer c.scraperMutex.RUnlock() + return c.scraper +} + +// newCollection creates a new collection, which uses the given scraper to +// collect stats every scrapeTickInterval. +func newCollection(metric *av1alpha1.Metric, scraper StatsScraper, tickFactory func(time.Duration) *time.Ticker, logger *zap.SugaredLogger) *collection { + c := &collection{ + metric: metric, + concurrencyBuckets: aggregation.NewTimedFloat64Buckets( + metric.Spec.StableWindow, BucketSize), + concurrencyPanicBuckets: aggregation.NewTimedFloat64Buckets( + metric.Spec.PanicWindow, BucketSize), + rpsBuckets: aggregation.NewTimedFloat64Buckets( + metric.Spec.StableWindow, BucketSize), + rpsPanicBuckets: aggregation.NewTimedFloat64Buckets( + metric.Spec.PanicWindow, BucketSize), + scraper: scraper, + + stopCh: make(chan struct{}), + } + + logger = logger.Named("collector").With( + zap.String(logkey.Key, fmt.Sprintf("%s/%s", metric.Namespace, metric.Name))) + + c.grp.Add(1) + go func() { + defer c.grp.Done() + + scrapeTicker := tickFactory(scrapeTickInterval) + for { + select { + case <-c.stopCh: + scrapeTicker.Stop() + return + case <-scrapeTicker.C: + stat, err := c.getScraper().Scrape() + if err != nil { + copy := metric.DeepCopy() + switch { + case err == ErrFailedGetEndpoints: + copy.Status.MarkMetricNotReady("NoEndpoints", ErrFailedGetEndpoints.Error()) + case err == ErrDidNotReceiveStat: + copy.Status.MarkMetricFailed("DidNotReceiveStat", ErrDidNotReceiveStat.Error()) + default: + copy.Status.MarkMetricNotReady("CreateOrUpdateFailed", "Collector has failed.") + } + logger.Errorw("Failed to scrape metrics", zap.Error(err)) + c.updateMetric(copy) + } + if stat != emptyStat { + c.record(stat) + } + } + } + }() + + return c +} + +// updateMetric safely updates the metric stored in the collection. +func (c *collection) updateMetric(metric *av1alpha1.Metric) { + c.metricMutex.Lock() + defer c.metricMutex.Unlock() + + c.metric = metric + c.concurrencyBuckets.ResizeWindow(metric.Spec.StableWindow) + c.concurrencyPanicBuckets.ResizeWindow(metric.Spec.PanicWindow) + c.rpsBuckets.ResizeWindow(metric.Spec.StableWindow) + c.rpsPanicBuckets.ResizeWindow(metric.Spec.PanicWindow) +} + +// currentMetric safely returns the current metric stored in the collection. +func (c *collection) currentMetric() *av1alpha1.Metric { + c.metricMutex.RLock() + defer c.metricMutex.RUnlock() + + return c.metric +} + +// record adds a stat to the current collection. +func (c *collection) record(stat Stat) { + // Proxied requests have been counted at the activator. Subtract + // them to avoid double counting. + concurr := stat.AverageConcurrentRequests - stat.AverageProxiedConcurrentRequests + c.concurrencyBuckets.Record(stat.Time, concurr) + c.concurrencyPanicBuckets.Record(stat.Time, concurr) + rps := stat.RequestCount - stat.ProxiedRequestCount + c.rpsBuckets.Record(stat.Time, rps) + c.rpsPanicBuckets.Record(stat.Time, rps) +} + +// stableAndPanicConcurrency calculates both stable and panic concurrency based on the +// current stats. +func (c *collection) stableAndPanicConcurrency(now time.Time) (float64, float64, bool) { + return c.concurrencyBuckets.WindowAverage(now), + c.concurrencyPanicBuckets.WindowAverage(now), + c.concurrencyBuckets.IsEmpty(now) +} + +// stableAndPanicRPS calculates both stable and panic RPS based on the +// current stats. +func (c *collection) stableAndPanicRPS(now time.Time) (float64, float64, bool) { + return c.rpsBuckets.WindowAverage(now), c.rpsPanicBuckets.WindowAverage(now), + c.rpsBuckets.IsEmpty(now) +} + +// close stops collecting metrics, stops the scraper. +func (c *collection) close() { + close(c.stopCh) + c.grp.Wait() +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/collector_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/collector_test.go new file mode 100644 index 0000000000..b4af4ee102 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/collector_test.go @@ -0,0 +1,448 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + duckv1 "knative.dev/pkg/apis/duck/v1" + . "knative.dev/pkg/logging/testing" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/autoscaler/aggregation" +) + +var ( + defaultNamespace = "test-namespace" + defaultName = "test-name" + defaultMetric = av1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: defaultNamespace, + Name: defaultName, + }, + Spec: av1alpha1.MetricSpec{ + StableWindow: 60 * time.Second, + PanicWindow: 6 * time.Second, + ScrapeTarget: "original-target", + }, + } +) + +func TestMetricCollectorCRUD(t *testing.T) { + logger := TestLogger(t) + + scraper := &testScraper{ + s: func() (Stat, error) { + return emptyStat, nil + }, + url: "just-right", + } + scraper2 := &testScraper{ + s: func() (Stat, error) { + return emptyStat, nil + }, + url: "slightly-off", + } + factory := scraperFactory(scraper, nil) + + t.Run("error on creating scraper", func(t *testing.T) { + want := errors.New("factory failure") + failingFactory := scraperFactory(nil, want) + + coll := NewMetricCollector(failingFactory, logger) + got := coll.CreateOrUpdate(&defaultMetric) + + if got != want { + t.Errorf("Create() = %v, want %v", got, want) + } + }) + + t.Run("full crud", func(t *testing.T) { + key := types.NamespacedName{Namespace: defaultMetric.Namespace, Name: defaultMetric.Name} + coll := NewMetricCollector(factory, logger) + if err := coll.CreateOrUpdate(&defaultMetric); err != nil { + t.Errorf("CreateOrUpdate() = %v, want no error", err) + } + + got := coll.collections[key].metric + if !cmp.Equal(&defaultMetric, got) { + t.Errorf("Get() didn't return the same metric: %v", cmp.Diff(&defaultMetric, got)) + } + + defaultMetric.Spec.ScrapeTarget = "new-target" + coll.statsScraperFactory = scraperFactory(scraper2, nil) + if err := coll.CreateOrUpdate(&defaultMetric); err != nil { + t.Errorf("CreateOrUpdate() = %v, want no error", err) + } + + got = coll.collections[key].metric + if !cmp.Equal(&defaultMetric, got) { + t.Errorf("Update() didn't return the same metric: %v", cmp.Diff(&defaultMetric, got)) + } + + newURL := (coll.collections[key]).scraper.(*testScraper).url + if got, want := newURL, "slightly-off"; got != want { + t.Errorf("Updated scraper URL = %s, want: %s, diff: %s", got, want, cmp.Diff(got, want)) + } + + if err := coll.Delete(defaultNamespace, defaultName); err != nil { + t.Errorf("Delete() = %v, want no error", err) + } + }) +} + +type manualTickProvider struct { + ch chan time.Time +} + +func (mtp *manualTickProvider) NewTicker(time.Duration) *time.Ticker { + return &time.Ticker{ + C: mtp.ch, + } +} + +func TestMetricCollectorScraper(t *testing.T) { + logger := TestLogger(t) + + mtp := &manualTickProvider{ + ch: make(chan time.Time), + } + now := time.Now() + metricKey := types.NamespacedName{Namespace: defaultNamespace, Name: defaultName} + const ( + reportConcurrency = 10.0 + reportRPS = 20.0 + wantConcurrency = 3 * 10. / 60 // In 3 seconds we'll scrape 3 times, window is 60s. + wantRPS = 3 * 20. / 60 + wantPConcurrency = 3 * 10 / 6. + wantPRPS = 3 * 20 / 6. + ) + stat := Stat{ + Time: now, + PodName: "testPod", + AverageConcurrentRequests: reportConcurrency, + RequestCount: reportRPS, + } + scraper := &testScraper{ + s: func() (Stat, error) { + return stat, nil + }, + } + factory := scraperFactory(scraper, nil) + + coll := NewMetricCollector(factory, logger) + coll.tickProvider = mtp.NewTicker // custom ticker. + coll.CreateOrUpdate(&defaultMetric) + + // Tick three times. Time doesn't matter since we use the time on the Stat. + mtp.ch <- now + mtp.ch <- now + mtp.ch <- now + var gotRPS, gotConcurrency, panicRPS, panicConcurrency float64 + // Poll to see that the async loop completed. + wait.PollImmediate(10*time.Millisecond, 100*time.Millisecond, func() (bool, error) { + gotConcurrency, _, _ = coll.StableAndPanicConcurrency(metricKey, now) + gotRPS, _, _ = coll.StableAndPanicRPS(metricKey, now) + return gotConcurrency == wantConcurrency && gotRPS == wantRPS, nil + }) + + gotConcurrency, panicConcurrency, _ = coll.StableAndPanicConcurrency(metricKey, now) + gotRPS, panicRPS, err := coll.StableAndPanicRPS(metricKey, now) + if err != nil { + t.Errorf("StableAndPanicRPS = %v", err) + } + if panicConcurrency != wantPConcurrency { + t.Errorf("PanicConcurrency() = %v, want %v", panicConcurrency, wantPConcurrency) + } + if panicRPS != wantPRPS { + t.Errorf("PanicRPS() = %v, want %v", panicRPS, wantPRPS) + } + if gotConcurrency != wantConcurrency { + t.Errorf("StableConcurrency() = %v, want %v", gotConcurrency, wantConcurrency) + } + if gotRPS != wantRPS { + t.Errorf("StableRPS() = %v, want %v", gotRPS, wantRPS) + } + + // Now let's report 2 more values (for a total of 5). + mtp.ch <- now + mtp.ch <- now + + // Wait for async loop to finish. + wait.PollImmediate(10*time.Millisecond, 100*time.Millisecond, func() (bool, error) { + gotConcurrency, _, _ = coll.StableAndPanicConcurrency(metricKey, now.Add(stableWindow).Add(-5*time.Second)) + gotRPS, _, _ = coll.StableAndPanicRPS(metricKey, now.Add(stableWindow).Add(-5*time.Second)) + return gotConcurrency == reportConcurrency && gotRPS == reportRPS, nil + }) + if gotConcurrency != reportConcurrency { + t.Errorf("StableAndPanicConcurrency() = %v, want %v", gotConcurrency, wantConcurrency) + } + if gotRPS != reportRPS { + t.Errorf("StableAndPanicRPS() = %v, want %v", gotRPS, wantRPS) + } + + // Deleting the metric should cause a calculation error. + coll.Delete(defaultNamespace, defaultName) + _, _, err = coll.StableAndPanicConcurrency(metricKey, now) + if err != ErrNotScraping { + t.Errorf("StableAndPanicConcurrency() = %v, want %v", err, ErrNotScraping) + } + _, _, err = coll.StableAndPanicRPS(metricKey, now) + if err != ErrNotScraping { + t.Errorf("StableAndPanicRPS() = %v, want %v", err, ErrNotScraping) + } +} + +func TestMetricCollectorRecord(t *testing.T) { + logger := TestLogger(t) + + now := time.Now() + oldTime := now.Add(-70 * time.Second) + metricKey := types.NamespacedName{Namespace: defaultNamespace, Name: defaultName} + const want = 10.0 + outdatedStat := Stat{ + Time: oldTime, + PodName: "testPod", + AverageConcurrentRequests: 100, + RequestCount: 100, + } + stat := Stat{ + Time: now, + PodName: "testPod", + AverageConcurrentRequests: want + 10, + AverageProxiedConcurrentRequests: 10, // this should be subtracted from the above. + RequestCount: want + 20, + ProxiedRequestCount: 20, // this should be subtracted from the above. + } + scraper := &testScraper{ + s: func() (Stat, error) { + return emptyStat, nil + }, + } + factory := scraperFactory(scraper, nil) + + coll := NewMetricCollector(factory, logger) + mtp := &manualTickProvider{ + ch: make(chan time.Time), + } + coll.tickProvider = mtp.NewTicker // This will ensure time based scraping won't interfere. + + // Freshly created collection does not contain any metrics and should return an error. + coll.CreateOrUpdate(&defaultMetric) + if _, _, err := coll.StableAndPanicConcurrency(metricKey, now); err == nil { + t.Error("StableAndPanicConcurrency() = nil, wanted an error") + } + if _, _, err := coll.StableAndPanicRPS(metricKey, now); err == nil { + t.Error("StableAndPanicRPS() = nil, wanted an error") + } + + // Add two stats. The second record operation will remove the first outdated one. + // After this the concurrencies are calculated correctly. + coll.Record(metricKey, outdatedStat) + coll.Record(metricKey, stat) + stable, panic, err := coll.StableAndPanicConcurrency(metricKey, now) + if err != nil { + t.Fatalf("StableAndPanicConcurrency: %v", err) + } + // Scale to the window sizes. + const ( + wantS = want / 60 + wantP = want / 6 + tolerance = 0.001 + ) + if math.Abs(stable-wantS) > tolerance || math.Abs(panic-wantP) > tolerance { + t.Errorf("StableAndPanicConcurrency() = %v, %v; want %v, %v, nil", stable, panic, wantS, wantP) + } + stable, panic, err = coll.StableAndPanicRPS(metricKey, now) + if err != nil { + t.Fatalf("StableAndPanicRPS: %v", err) + } + if math.Abs(stable-wantS) > tolerance || math.Abs(panic-wantP) > tolerance { + t.Errorf("StableAndPanicRPS() = %v, %v; want %v, %v", stable, panic, wantS, wantP) + } +} + +func TestMetricCollectorError(t *testing.T) { + testCases := []struct { + name string + scraper *testScraper + metric *av1alpha1.Metric + expectedMetricStatus duckv1.Status + }{{ + name: "Failed to get endpoints scraper error", + scraper: &testScraper{ + s: func() (Stat, error) { + return emptyStat, ErrFailedGetEndpoints + }, + }, + metric: &av1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + Labels: map[string]string{ + serving.RevisionLabelKey: testRevision, + }, + }, + Spec: av1alpha1.MetricSpec{ + ScrapeTarget: testRevision + "-zhudex", + }, + }, + expectedMetricStatus: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: av1alpha1.MetricConditionReady, + Status: corev1.ConditionUnknown, + Reason: "NoEndpoints", + Message: ErrFailedGetEndpoints.Error(), + }}, + }, + }, { + name: "Did not receive stat scraper error", + scraper: &testScraper{ + s: func() (Stat, error) { + return emptyStat, ErrDidNotReceiveStat + }, + }, + metric: &av1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + Labels: map[string]string{ + serving.RevisionLabelKey: testRevision, + }, + }, + Spec: av1alpha1.MetricSpec{ + ScrapeTarget: testRevision + "-zhudex", + }, + }, + expectedMetricStatus: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: av1alpha1.MetricConditionReady, + Status: corev1.ConditionFalse, + Reason: "DidNotReceiveStat", + Message: ErrDidNotReceiveStat.Error(), + }}, + }, + }, { + name: "Other scraper error", + scraper: &testScraper{ + s: func() (Stat, error) { + return emptyStat, errors.New("foo") + }, + }, + metric: &av1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + Labels: map[string]string{ + serving.RevisionLabelKey: testRevision, + }, + }, + Spec: av1alpha1.MetricSpec{ + ScrapeTarget: testRevision + "-zhudex", + }, + }, + expectedMetricStatus: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: av1alpha1.MetricConditionReady, + Status: corev1.ConditionUnknown, + Reason: "CreateOrUpdateFailed", + Message: "Collector has failed.", + }}, + }, + }} + + logger := TestLogger(t) + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + factory := scraperFactory(test.scraper, nil) + coll := NewMetricCollector(factory, logger) + coll.CreateOrUpdate(test.metric) + key := types.NamespacedName{Namespace: test.metric.Namespace, Name: test.metric.Name} + + var got duckv1.Status + wait.PollImmediate(10*time.Millisecond, 2*time.Second, func() (bool, error) { + collection, ok := coll.collections[key] + if ok { + got = collection.currentMetric().Status.Status + return equality.Semantic.DeepEqual(got, test.expectedMetricStatus), nil + } + return false, nil + }) + if !equality.Semantic.DeepEqual(got, test.expectedMetricStatus) { + t.Errorf("Got = %#v, want: %#v, diff:\n%q", got, test.expectedMetricStatus, cmp.Diff(got, test.expectedMetricStatus)) + } + coll.Delete(test.metric.Namespace, test.metric.Name) + }) + } +} + +func scraperFactory(scraper StatsScraper, err error) StatsScraperFactory { + return func(*av1alpha1.Metric) (StatsScraper, error) { + return scraper, err + } +} + +type testScraper struct { + s func() (Stat, error) + url string +} + +func (s *testScraper) Scrape() (Stat, error) { + return s.s() +} + +func TestMetricCollectorAggregate(t *testing.T) { + m := defaultMetric + m.Spec.StableWindow = 6 * time.Second + m.Spec.PanicWindow = 2 * time.Second + c := &collection{ + metric: &m, + concurrencyBuckets: aggregation.NewTimedFloat64Buckets(m.Spec.StableWindow, BucketSize), + concurrencyPanicBuckets: aggregation.NewTimedFloat64Buckets(m.Spec.PanicWindow, BucketSize), + rpsBuckets: aggregation.NewTimedFloat64Buckets(m.Spec.StableWindow, BucketSize), + rpsPanicBuckets: aggregation.NewTimedFloat64Buckets(m.Spec.PanicWindow, BucketSize), + } + now := time.Now() + for i := 0; i < 10; i++ { + stat := Stat{ + Time: now.Add(time.Duration(i) * time.Second), + PodName: "testPod", + AverageConcurrentRequests: float64(i + 5), + RequestCount: float64(i + 5), + } + c.record(stat) + } + st, pan, noData := c.stableAndPanicConcurrency(now.Add(time.Duration(9) * time.Second)) + if noData { + t.Fatal("Unexpected NoData error") + } + if got, want := st, 11.5; got != want { + t.Errorf("Stable Concurrency = %f, want: %f", got, want) + } + if got, want := pan, 13.5; got != want { + t.Errorf("Stable Concurrency = %f, want: %f", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/config.go b/test/vendor/knative.dev/serving/pkg/autoscaler/config.go new file mode 100644 index 0000000000..8a2a1ddc5d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/config.go @@ -0,0 +1,234 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "fmt" + "strconv" + "strings" + "time" + + "knative.dev/serving/pkg/apis/autoscaling" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // ConfigName is the name of the config map of the autoscaler. + ConfigName = "config-autoscaler" + + defaultTargetUtilization = 0.7 +) + +// Config defines the tunable autoscaler parameters +// +k8s:deepcopy-gen=true +type Config struct { + // Feature flags. + EnableScaleToZero bool + + // Enable connection-aware pod scaledown + EnableGracefulScaledown bool + + // Target concurrency knobs for different container concurrency configurations. + ContainerConcurrencyTargetFraction float64 + ContainerConcurrencyTargetDefault float64 + // TargetUtilization is used for the metrics other than concurrency. This is not + // configurable now. Customers can override it by specifying + // autoscaling.knative.dev/targetUtilizationPercentage in Revision annotation. + // TODO(yanweiguo): Expose this to config-autoscaler configmap and eventually + // deprecate ContainerConcurrencyTargetFraction. + TargetUtilization float64 + // RPSTargetDefault is the default target value for requests per second. + RPSTargetDefault float64 + // NB: most of our computations are in floats, so this is float to avoid casting. + TargetBurstCapacity float64 + + // General autoscaler algorithm configuration. + MaxScaleUpRate float64 + MaxScaleDownRate float64 + StableWindow time.Duration + PanicWindowPercentage float64 + PanicThresholdPercentage float64 + TickInterval time.Duration + + ScaleToZeroGracePeriod time.Duration +} + +// NewConfigFromMap creates a Config from the supplied map +func NewConfigFromMap(data map[string]string) (*Config, error) { + lc := &Config{ + TargetUtilization: defaultTargetUtilization, + } + + // Process bool fields. + for _, b := range []struct { + key string + field *bool + defaultValue bool + }{ + { + key: "enable-scale-to-zero", + field: &lc.EnableScaleToZero, + defaultValue: true, + }, + { + key: "enable-graceful-scaledown", + field: &lc.EnableGracefulScaledown, + defaultValue: false, + }} { + if raw, ok := data[b.key]; !ok { + *b.field = b.defaultValue + } else { + *b.field = strings.EqualFold(raw, "true") + } + } + + // Process Float64 fields + for _, f64 := range []struct { + key string + field *float64 + // specified exactly when optional + defaultValue float64 + }{{ + key: "max-scale-up-rate", + field: &lc.MaxScaleUpRate, + defaultValue: 1000.0, + }, { + key: "max-scale-down-rate", + field: &lc.MaxScaleDownRate, + defaultValue: 2.0, + }, { + key: "container-concurrency-target-percentage", + field: &lc.ContainerConcurrencyTargetFraction, + // TODO(#1956): Tune target usage based on empirical data. + defaultValue: defaultTargetUtilization, + }, { + key: "container-concurrency-target-default", + field: &lc.ContainerConcurrencyTargetDefault, + defaultValue: 100.0, + }, { + key: "requests-per-second-target-default", + field: &lc.RPSTargetDefault, + defaultValue: 200.0, + }, { + key: "target-burst-capacity", + field: &lc.TargetBurstCapacity, + defaultValue: 200, + }, { + key: "panic-window-percentage", + field: &lc.PanicWindowPercentage, + defaultValue: 10.0, + }, { + key: "panic-threshold-percentage", + field: &lc.PanicThresholdPercentage, + defaultValue: 200.0, + }} { + if raw, ok := data[f64.key]; !ok { + *f64.field = f64.defaultValue + } else if val, err := strconv.ParseFloat(raw, 64); err != nil { + return nil, err + } else { + *f64.field = val + } + } + + // Adjust % ⇒ fractions: for legacy reasons we allow values in the + // (0, 1] interval, so minimal percentage must be greater than 1.0. + // Internally we want to have fractions, since otherwise we'll have + // to perform division on each computation. + if lc.ContainerConcurrencyTargetFraction > 1.0 { + lc.ContainerConcurrencyTargetFraction /= 100.0 + } + + // Process Duration fields + for _, dur := range []struct { + key string + field *time.Duration + defaultValue time.Duration + }{{ + key: "stable-window", + field: &lc.StableWindow, + defaultValue: 60 * time.Second, + }, { + key: "scale-to-zero-grace-period", + field: &lc.ScaleToZeroGracePeriod, + defaultValue: 30 * time.Second, + }, { + key: "tick-interval", + field: &lc.TickInterval, + defaultValue: 2 * time.Second, + }} { + if raw, ok := data[dur.key]; !ok { + *dur.field = dur.defaultValue + } else if val, err := time.ParseDuration(raw); err != nil { + return nil, err + } else { + *dur.field = val + } + } + + return validate(lc) +} + +func validate(lc *Config) (*Config, error) { + if lc.ScaleToZeroGracePeriod < autoscaling.WindowMin { + return nil, fmt.Errorf("scale-to-zero-grace-period must be at least %v, got %v", autoscaling.WindowMin, lc.ScaleToZeroGracePeriod) + } + if lc.TargetBurstCapacity < 0 && lc.TargetBurstCapacity != -1 { + return nil, fmt.Errorf("target-burst-capacity must be non-negative, got %f", lc.TargetBurstCapacity) + } + + if lc.ContainerConcurrencyTargetFraction <= 0 || lc.ContainerConcurrencyTargetFraction > 1 { + return nil, fmt.Errorf("container-concurrency-target-percentage = %f is outside of valid range of (0, 100]", lc.ContainerConcurrencyTargetFraction) + } + + if x := lc.ContainerConcurrencyTargetFraction * lc.ContainerConcurrencyTargetDefault; x < autoscaling.TargetMin { + return nil, fmt.Errorf("container-concurrency-target-percentage and container-concurrency-target-default yield target concurrency of %f, can't be less than 1", x) + } + + if lc.RPSTargetDefault < autoscaling.TargetMin { + return nil, fmt.Errorf("requests-per-second-target-default must be at least %v, got %v", autoscaling.TargetMin, lc.RPSTargetDefault) + } + + if lc.MaxScaleUpRate <= 1.0 { + return nil, fmt.Errorf("max-scale-up-rate = %v, must be greater than 1.0", lc.MaxScaleUpRate) + } + + if lc.MaxScaleDownRate <= 1.0 { + return nil, fmt.Errorf("max-scale-down-rate = %v, must be greater than 1.0", lc.MaxScaleDownRate) + } + + // We can't permit stable window be less than our aggregation window for correctness. + if lc.StableWindow < autoscaling.WindowMin { + return nil, fmt.Errorf("stable-window = %v, must be at least %v", lc.StableWindow, autoscaling.WindowMin) + } + if lc.StableWindow.Round(time.Second) != lc.StableWindow { + return nil, fmt.Errorf("stable-window = %v, must be specified with at most second precision", lc.StableWindow) + } + + effPW := time.Duration(lc.PanicWindowPercentage / 100 * float64(lc.StableWindow)) + if effPW < BucketSize || effPW > lc.StableWindow { + return nil, fmt.Errorf("panic-window-percentage = %v, must be in [%v, 100] interval", lc.PanicWindowPercentage, 100*float64(BucketSize)/float64(lc.StableWindow)) + } + + return lc, nil +} + +// NewConfigFromConfigMap creates a Config from the supplied ConfigMap +func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { + return NewConfigFromMap(configMap.Data) +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/config_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/config_test.go new file mode 100644 index 0000000000..cca38e1f56 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/config_test.go @@ -0,0 +1,277 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + + . "knative.dev/pkg/configmap/testing" +) + +var defaultConfig = Config{ + EnableScaleToZero: true, + EnableGracefulScaledown: false, + ContainerConcurrencyTargetFraction: 0.7, + ContainerConcurrencyTargetDefault: 100, + RPSTargetDefault: 200, + TargetUtilization: 0.7, + TargetBurstCapacity: 200, + MaxScaleUpRate: 1000, + MaxScaleDownRate: 2, + StableWindow: time.Minute, + ScaleToZeroGracePeriod: 30 * time.Second, + TickInterval: 2 * time.Second, + PanicWindowPercentage: 10.0, + PanicThresholdPercentage: 200.0, +} + +func TestNewConfig(t *testing.T) { + tests := []struct { + name string + input map[string]string + want *Config + wantErr bool + }{{ + name: "default", + input: map[string]string{}, + want: &defaultConfig, + }, { + name: "minimum", + input: map[string]string{ + "max-scale-up-rate": "1.001", + "container-concurrency-target-percentage": "0.5", + "container-concurrency-target-default": "10.0", + "target-burst-capacity": "0", + "stable-window": "5m", + "tick-interval": "2s", + "panic-window-percentage": "10", + "panic-threshold-percentage": "200", + }, + want: func(c Config) *Config { + c.ContainerConcurrencyTargetFraction = 0.5 + c.ContainerConcurrencyTargetDefault = 10 + c.MaxScaleUpRate = 1.001 + c.TargetBurstCapacity = 0 + c.StableWindow = 5 * time.Minute + return &c + }(defaultConfig), + }, { + name: "concurrencty target percentage as percent", + input: map[string]string{ + "container-concurrency-target-percentage": "55", + }, + want: func(c Config) *Config { + c.ContainerConcurrencyTargetFraction = 0.55 + return &c + }(defaultConfig), + }, { + name: "with -1 tbc", + input: map[string]string{ + "target-burst-capacity": "-1", + }, + want: func(c Config) *Config { + c.TargetBurstCapacity = -1 + return &c + }(defaultConfig), + }, { + name: "with default toggles set", + input: map[string]string{ + "enable-scale-to-zero": "true", + "enable-graceful-scaledown": "false", + "max-scale-down-rate": "3.0", + "max-scale-up-rate": "1.01", + "container-concurrency-target-percentage": "0.71", + "container-concurrency-target-default": "10.5", + "requests-per-second-target-default": "10.11", + "target-burst-capacity": "12345", + "stable-window": "5m", + "tick-interval": "2s", + "panic-window-percentage": "10", + "panic-threshold-percentage": "200", + }, + want: func(c Config) *Config { + c.TargetBurstCapacity = 12345 + c.ContainerConcurrencyTargetDefault = 10.5 + c.ContainerConcurrencyTargetFraction = 0.71 + c.RPSTargetDefault = 10.11 + c.MaxScaleDownRate = 3 + c.MaxScaleUpRate = 1.01 + c.StableWindow = 5 * time.Minute + return &c + }(defaultConfig), + }, { + name: "with toggles on strange casing", + input: map[string]string{ + "enable-scale-to-zero": "TRUE", + "enable-graceful-scaledown": "FALSE", + }, + want: &defaultConfig, + }, { + name: "with toggles explicitly flipped", + input: map[string]string{ + "enable-scale-to-zero": "false", + "enable-graceful-scaledown": "true", + }, + want: func(c Config) *Config { + c.EnableScaleToZero = false + c.EnableGracefulScaledown = true + return &c + }(defaultConfig), + }, { + name: "with explicit grace period", + input: map[string]string{ + "enable-scale-to-zero": "false", + "scale-to-zero-grace-period": "33s", + }, + want: func(c Config) *Config { + c.EnableScaleToZero = false + c.ScaleToZeroGracePeriod = 33 * time.Second + return &c + }(defaultConfig), + }, { + name: "malformed float", + input: map[string]string{ + "max-scale-up-rate": "not a float", + }, + wantErr: true, + }, { + name: "malformed duration", + input: map[string]string{ + "stable-window": "not a duration", + }, + wantErr: true, + }, { + name: "invalid target burst capacity", + input: map[string]string{ + "target-burst-capacity": "-11", + }, + wantErr: true, + }, { + name: "invalid target %, too small", + input: map[string]string{ + "container-concurrency-target-percentage": "-42", + }, + wantErr: true, + }, { + name: "invalid target %, too big", + input: map[string]string{ + "container-concurrency-target-percentage": "142.4", + }, + wantErr: true, + }, { + name: "invalid RPS target, too small", + input: map[string]string{ + "requests-per-second-target-default": "-5.25", + }, + wantErr: true, + }, { + name: "target capacity less than 1", + input: map[string]string{ + "container-concurrency-target-percentage": "30.0", + "container-concurrency-target-default": "2", + }, + wantErr: true, + }, { + name: "max scale up rate 1.0", + input: map[string]string{ + "max-scale-up-rate": "1", + }, + wantErr: true, + }, { + name: "max down down rate negative", + input: map[string]string{ + "max-scale-down-rate": "-55", + }, + wantErr: true, + }, { + name: "max down down rate 1.0", + input: map[string]string{ + "max-scale-down-rate": "1", + }, + wantErr: true, + }, { + name: "stable window too small", + input: map[string]string{ + "stable-window": "1s", + }, + wantErr: true, + }, { + name: "stable not seconds", + input: map[string]string{ + "stable-window": "61984ms", + }, + wantErr: true, + }, { + name: "panic window percentage too small", + input: map[string]string{ + "stable-window": "12s", + "panic-window-percentage": "5", // 0.6s < BucketSize + }, + wantErr: true, + }, { + name: "panic window percentage too big", + input: map[string]string{ + "stable-window": "12s", + "panic-window": "3s", + "panic-window-percentage": "110", + }, + wantErr: true, + }, { + name: "TU*CC < 1", + input: map[string]string{ + "container-concurrency-target-percentage": "5", + "container-concurrency-target-default": "10.0", + }, + wantErr: true, + }, { + name: "grace window too small", + input: map[string]string{ + "stable-window": "12s", + "scale-to-zero-grace-period": "4s", + }, + wantErr: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := NewConfigFromConfigMap(&corev1.ConfigMap{ + Data: test.input, + }) + t.Logf("Error = %v", err) + if (err != nil) != test.wantErr { + t.Errorf("NewConfig() = %v, want %v", err, test.wantErr) + } + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("NewConfig (-want, +got) = %v", diff) + } + }) + } +} + +func TestOurConfig(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, ConfigName) + if _, err := NewConfigFromConfigMap(cm); err != nil { + t.Errorf("NewConfigFromConfigMap(actual) = %v", err) + } + if _, err := NewConfigFromConfigMap(example); err != nil { + t.Errorf("NewConfigFromConfigMap(example) = %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/doc.go b/test/vendor/knative.dev/serving/pkg/autoscaler/doc.go new file mode 100644 index 0000000000..2310795f10 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/doc.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package autoscaler calculates the number of pods necessary for the +desired level of concurrency per pod (stableConcurrencyPerPod). It +operates in two modes, stable mode and panic mode. + +Stable mode calculates the average concurrency observed over the last +60 seconds and adjusts the observed pod count to achieve the target +value. Current observed pod count is the number of unique pod names +which show up in the last 60 seconds. + +Panic mode calculates the average concurrency observed over the last 6 +seconds and adjusts the observed pod count to achieve the stable +target value. Panic mode is engaged when the observed 6 second average +concurrency reaches 2x the target stable concurrency. Panic mode will +last at least 60 seconds--longer if the 2x threshold is repeatedly +breached. During panic mode the number of pods is never decreased in +order to prevent flapping. + +Package autoscaler supports both single-tenant (one autoscaler per +revision) and multitenant (one autoscaler for all revisions) autoscalers; +config/controller.yaml determines which kind of autoscaler is used. +*/ +package autoscaler diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/fake/fake_metric_client.go b/test/vendor/knative.dev/serving/pkg/autoscaler/fake/fake_metric_client.go new file mode 100644 index 0000000000..243ed0426b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/fake/fake_metric_client.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "time" + + "k8s.io/apimachinery/pkg/types" +) + +// MetricClient is a fake implementation of autoscaler.MetricClient for testing. +type MetricClient struct { + StableConcurrency float64 + PanicConcurrency float64 + StableRPS float64 + PanicRPS float64 + ErrF func(key types.NamespacedName, now time.Time) error +} + +// StableAndPanicConcurrency returns stable/panic concurrency stored in the object +// and the result of Errf as the error. +func (t *MetricClient) StableAndPanicConcurrency(key types.NamespacedName, now time.Time) (float64, float64, error) { + var err error + if t.ErrF != nil { + err = t.ErrF(key, now) + } + return t.StableConcurrency, t.PanicConcurrency, err +} + +// StableAndPanicRPS returns stable/panic RPS stored in the object +// and the result of Errf as the error. +func (t *MetricClient) StableAndPanicRPS(key types.NamespacedName, now time.Time) (float64, float64, error) { + var err error + if t.ErrF != nil { + err = t.ErrF(key, now) + } + return t.StableRPS, t.PanicRPS, err +} + +// StaticMetricClient returns stable/panic concurrency and RPS with static value, i.e. 10. +var StaticMetricClient = MetricClient{ + StableConcurrency: 10.0, + PanicConcurrency: 10.0, + StableRPS: 10.0, + PanicRPS: 10.0, +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client.go b/test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client.go new file mode 100644 index 0000000000..9f37ae055a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client.go @@ -0,0 +1,122 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "fmt" + "io" + "net/http" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +type httpScrapeClient struct { + httpClient *http.Client +} + +func newHTTPScrapeClient(httpClient *http.Client) (*httpScrapeClient, error) { + if httpClient == nil { + return nil, errors.New("HTTP client must not be nil") + } + + return &httpScrapeClient{ + httpClient: httpClient, + }, nil +} + +func (c *httpScrapeClient) Scrape(url string) (Stat, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return emptyStat, err + } + resp, err := c.httpClient.Do(req) + if err != nil { + return emptyStat, err + } + defer resp.Body.Close() + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { + return emptyStat, fmt.Errorf("GET request for URL %q returned HTTP status %v", url, resp.StatusCode) + } + + return extractData(resp.Body) +} + +func extractData(body io.Reader) (Stat, error) { + var parser expfmt.TextParser + metricFamilies, err := parser.TextToMetricFamilies(body) + if err != nil { + return emptyStat, fmt.Errorf("reading text format failed: %w", err) + } + + stat := emptyStat + for m, pv := range map[string]*float64{ + "queue_average_concurrent_requests": &stat.AverageConcurrentRequests, + "queue_average_proxied_concurrent_requests": &stat.AverageProxiedConcurrentRequests, + "queue_requests_per_second": &stat.RequestCount, + "queue_proxied_operations_per_second": &stat.ProxiedRequestCount, + } { + pm := prometheusMetric(metricFamilies, m) + if pm == nil { + return emptyStat, fmt.Errorf("could not find value for %s in response", m) + } + *pv = *pm.Gauge.Value + + if stat.PodName == "" { + stat.PodName = prometheusLabel(pm.Label, "destination_pod") + if stat.PodName == "" { + return emptyStat, errors.New("could not find pod name in metric labels") + } + } + } + // Transitional metrics, which older pods won't report. + for m, pv := range map[string]*float64{ + "process_uptime": &stat.ProcessUptime, // Can be removed after 0.15 cuts. + } { + pm := prometheusMetric(metricFamilies, m) + // Ignore if not found. + if pm == nil { + continue + } + *pv = *pm.Gauge.Value + } + return stat, nil +} + +// prometheusMetric returns the point of the first Metric of the MetricFamily +// with the given key from the given map. If there is no such MetricFamily or it +// has no Metrics, then returns nil. +func prometheusMetric(metricFamilies map[string]*dto.MetricFamily, key string) *dto.Metric { + if metric, ok := metricFamilies[key]; ok && len(metric.Metric) > 0 { + return metric.Metric[0] + } + + return nil +} + +// prometheusLabels returns the value of the label with the given key from the +// given slice of labels. Returns an empty string if the label cannot be found. +func prometheusLabel(labels []*dto.LabelPair, key string) string { + for _, label := range labels { + if *label.Name == key { + return *label.Value + } + } + + return "" +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client_test.go new file mode 100644 index 0000000000..55f43f9655 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/http_scrape_client_test.go @@ -0,0 +1,229 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "net/http" + "testing" +) + +const ( + testURL = "http://test-revision-zhudex.test-namespace:9090/metrics" + + // TODO: Use Prometheus lib to generate the following text instead of using text format directly. + testAverageConcurrencyContext = `# HELP queue_average_concurrent_requests Number of requests currently being handled by this pod +# TYPE queue_average_concurrent_requests gauge +queue_average_concurrent_requests{destination_namespace="test-namespace",destination_revision="test-revision",destination_pod="test-revision-1234"} 3.0 +` + testQPSContext = `# HELP queue_requests_per_second Number of requests received since last Stat +# TYPE queue_requests_per_second gauge +queue_requests_per_second{destination_namespace="test-namespace",destination_revision="test-revision",destination_pod="test-revision-1234"} 5 +` + testAverageProxiedConcurrenyContext = `# HELP queue_average_proxied_concurrent_requests Number of proxied requests currently being handled by this pod +# TYPE queue_average_proxied_concurrent_requests gauge +queue_average_proxied_concurrent_requests{destination_namespace="test-namespace",destination_revision="test-revision",destination_pod="test-revision-1234"} 2.0 +` + testProxiedQPSContext = `# HELP queue_proxied_operations_per_second Number of proxied requests received since last Stat +# TYPE queue_proxied_operations_per_second gauge +queue_proxied_operations_per_second{destination_namespace="test-namespace",destination_revision="test-revision",destination_pod="test-revision-1234"} 4 +` + testFullContext = testAverageConcurrencyContext + testQPSContext + testAverageProxiedConcurrenyContext + testProxiedQPSContext + + testUptimeContext = `# HELP process_uptime The number of seconds that the process has been up +# TYPE process_uptime gauge +process_uptime{destination_configuration="s1",destination_namespace="default",destination_pod="s1-tdgpn-deployment-86f6459cf8-mc9mw",destination_revision="s1-tdgpn"} 2937.12 +` + + testOptionalContext = testFullContext + testUptimeContext +) + +func TestNewHTTPScrapeClient_ErrorCases(t *testing.T) { + testCases := []struct { + name string + client *http.Client + expectedErr string + }{{ + name: "Empty HTTP client", + expectedErr: "HTTP client must not be nil", + }} + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + if _, err := newHTTPScrapeClient(test.client); err != nil { + got := err.Error() + want := test.expectedErr + if got != want { + t.Errorf("Got error message: %v. Want: %v", got, want) + } + } else { + t.Errorf("Expected error from newHTTPScrapeClient, got nil") + } + }) + } +} + +func TestHTTPScrapeClientScrapeHappyCase(t *testing.T) { + hClient := newTestHTTPClient(getHTTPResponse(http.StatusOK, testFullContext), nil) + sClient, err := newHTTPScrapeClient(hClient) + if err != nil { + t.Fatalf("newHTTPScrapeClient = %v, want no error", err) + } + + stat, err := sClient.Scrape(testURL) + if err != nil { + t.Errorf("scrapeViaURL = %v, want no error", err) + } + if stat.AverageConcurrentRequests != 3.0 { + t.Errorf("stat.AverageConcurrentRequests = %v, want 3.0", stat.AverageConcurrentRequests) + } + if stat.RequestCount != 5 { + t.Errorf("stat.RequestCount = %v, want 5", stat.RequestCount) + } + if stat.AverageProxiedConcurrentRequests != 2.0 { + t.Errorf("stat.AverageProxiedConcurrency = %v, want 2.0", stat.AverageProxiedConcurrentRequests) + } + if stat.ProxiedRequestCount != 4 { + t.Errorf("stat.ProxiedCount = %v, want 4", stat.ProxiedRequestCount) + } + if stat.PodName != "test-revision-1234" { + t.Errorf("stat.PodName = %s, want test-revision-1234", stat.PodName) + } + if stat.ProcessUptime != 0 { + t.Errorf("default/missing stat.ProcessUptime = %v, want: 0", stat.ProcessUptime) + } +} + +func TestHTTPScrapeClientScrapeHappyCaseWithOptionals(t *testing.T) { + hClient := newTestHTTPClient(getHTTPResponse(http.StatusOK, testOptionalContext), nil) + sClient, err := newHTTPScrapeClient(hClient) + if err != nil { + t.Fatalf("newHTTPScrapeClient = %v, want no error", err) + } + + stat, err := sClient.Scrape(testURL) + if err != nil { + t.Errorf("scrapeViaURL = %v, want no error", err) + } + if stat.AverageConcurrentRequests != 3.0 { + t.Errorf("stat.AverageConcurrentRequests = %v, want 3.0", stat.AverageConcurrentRequests) + } + if stat.RequestCount != 5 { + t.Errorf("stat.RequestCount = %v, want 5", stat.RequestCount) + } + if stat.AverageProxiedConcurrentRequests != 2.0 { + t.Errorf("stat.AverageProxiedConcurrency = %v, want 2.0", stat.AverageProxiedConcurrentRequests) + } + if stat.ProxiedRequestCount != 4 { + t.Errorf("stat.ProxiedCount = %v, want 4", stat.ProxiedRequestCount) + } + if stat.PodName != "test-revision-1234" { + t.Errorf("stat.PodName = %s, want test-revision-1234", stat.PodName) + } + if got, want := stat.ProcessUptime, 2937.12; got != want { + t.Errorf("stat.ProcessUptime = %v, want: %v", got, want) + } +} + +func TestHTTPScrapeClient_Scrape_ErrorCases(t *testing.T) { + testCases := []struct { + name string + responseCode int + responseErr error + responseContext string + expectedErr string + }{{ + name: "Non 200 return code", + responseCode: http.StatusForbidden, + expectedErr: fmt.Sprintf(`GET request for URL %q returned HTTP status 403`, testURL), + }, { + name: "Error got when sending request", + responseCode: http.StatusOK, + responseErr: errors.New("upstream closed"), + expectedErr: fmt.Sprintf("Get %s: upstream closed", testURL), + }, { + name: "Bad response context format", + responseCode: http.StatusOK, + responseContext: "bad context", + expectedErr: "reading text format failed: text format parsing error in line 1: unexpected end of input stream", + }, { + name: "Missing average concurrency", + responseCode: http.StatusOK, + responseContext: testQPSContext + testAverageProxiedConcurrenyContext + testProxiedQPSContext, + expectedErr: "could not find value for queue_average_concurrent_requests in response", + }, { + name: "Missing QPS", + responseCode: http.StatusOK, + responseContext: testAverageConcurrencyContext + testAverageProxiedConcurrenyContext + testProxiedQPSContext, + expectedErr: "could not find value for queue_requests_per_second in response", + }, { + name: "Missing average proxied concurrency", + responseCode: http.StatusOK, + responseContext: testAverageConcurrencyContext + testQPSContext + testProxiedQPSContext, + expectedErr: "could not find value for queue_average_proxied_concurrent_requests in response", + }, { + name: "Missing proxied QPS", + responseCode: http.StatusOK, + responseContext: testAverageConcurrencyContext + testQPSContext + testAverageProxiedConcurrenyContext, + expectedErr: "could not find value for queue_proxied_operations_per_second in response", + }} + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + hClient := newTestHTTPClient(getHTTPResponse(test.responseCode, test.responseContext), test.responseErr) + sClient, err := newHTTPScrapeClient(hClient) + if err != nil { + t.Errorf("newHTTPScrapeClient=%v, want no error", err) + } + if _, err := sClient.Scrape(testURL); err != nil { + if err.Error() != test.expectedErr { + t.Errorf("Got error message: %q, want: %q", err.Error(), test.expectedErr) + } + } else { + t.Errorf("Expected error from newServiceScraperWithClient, got nil") + } + }) + } +} + +func getHTTPResponse(statusCode int, context string) *http.Response { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewBufferString(context)), + } +} + +type fakeRoundTripper struct { + response *http.Response + responseError error +} + +func (frt fakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return frt.response, frt.responseError +} + +func newTestHTTPClient(response *http.Response, err error) *http.Client { + return &http.Client{ + Transport: fakeRoundTripper{ + response: response, + responseError: err, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider.go b/test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider.go new file mode 100644 index 0000000000..e70b3bb2da --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "math" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + cmetrics "k8s.io/metrics/pkg/apis/custom_metrics" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +var ( + concurrencyMetricInfo = provider.CustomMetricInfo{ + GroupResource: v1alpha1.Resource("revisions"), + Namespaced: true, + Metric: autoscaling.Concurrency, + } + rpsMetricInfo = provider.CustomMetricInfo{ + GroupResource: v1alpha1.Resource("revisions"), + Namespaced: true, + Metric: autoscaling.RPS, + } + + errMetricNotSupported = errors.New("metric not supported") + errNotImplemented = errors.New("not implemented") +) + +// MetricProvider is a provider to back a custom-metrics API implementation. +type MetricProvider struct { + metricClient MetricClient +} + +var _ provider.CustomMetricsProvider = (*MetricProvider)(nil) + +// NewMetricProvider creates a new MetricProvider. +func NewMetricProvider(metricClient MetricClient) *MetricProvider { + return &MetricProvider{ + metricClient: metricClient, + } +} + +// GetMetricByName implements the interface. +func (p *MetricProvider) GetMetricByName(name types.NamespacedName, info provider.CustomMetricInfo, + metricSelector labels.Selector) (*cmetrics.MetricValue, error) { + now := time.Now() + var data float64 + var err error + if cmp.Equal(info, concurrencyMetricInfo) { + data, _, err = p.metricClient.StableAndPanicConcurrency(name, now) + } else if cmp.Equal(info, rpsMetricInfo) { + data, _, err = p.metricClient.StableAndPanicRPS(name, now) + } else { + return nil, errMetricNotSupported + } + if err != nil { + return nil, err + } + + return &cmetrics.MetricValue{ + Metric: cmetrics.MetricIdentifier{ + Name: info.Metric, + }, + Timestamp: metav1.Time{Time: now}, + Value: *resource.NewQuantity(int64(math.Ceil(data)), resource.DecimalSI), + }, nil +} + +// GetMetricBySelector implements the interface. +func (p *MetricProvider) GetMetricBySelector(string, labels.Selector, provider.CustomMetricInfo, labels.Selector) (*cmetrics.MetricValueList, error) { + return nil, errNotImplemented +} + +// ListAllMetrics implements the interface. +func (p *MetricProvider) ListAllMetrics() []provider.CustomMetricInfo { + return []provider.CustomMetricInfo{concurrencyMetricInfo, rpsMetricInfo} +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider_test.go new file mode 100644 index 0000000000..00e4723c24 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/metrics_provider_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "testing" + "time" + + "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider" + "knative.dev/pkg/kmp" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler/fake" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" +) + +var ( + existingNamespace = "existing" + nonExistingNamespace = "non-existing" +) + +func TestGetMetricByName(t *testing.T) { + type args struct { + name types.NamespacedName + info provider.CustomMetricInfo + } + tests := []struct { + name string + args args + want int64 + wantErr bool + }{{ + name: "all good", + args: args{ + name: types.NamespacedName{Namespace: existingNamespace, Name: "test"}, + info: concurrencyMetricInfo, + }, + want: 11, + }, { + name: "all good (RPS)", + args: args{ + name: types.NamespacedName{Namespace: existingNamespace, Name: "test"}, + info: rpsMetricInfo, + }, + want: 14, + }, { + name: "requesting unsupported metric", + args: args{ + name: types.NamespacedName{Namespace: existingNamespace, Name: "test"}, + info: provider.CustomMetricInfo{ + GroupResource: v1alpha1.Resource("services"), + Namespaced: true, + Metric: autoscaling.Concurrency, + }, + }, + wantErr: true, + }, { + name: "error from metric client", + args: args{ + name: types.NamespacedName{Namespace: nonExistingNamespace, Name: "test"}, + info: concurrencyMetricInfo, + }, + wantErr: true, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := NewMetricProvider(staticMetrics(10.3, 14)) + got, err := p.GetMetricByName(tt.args.name, tt.args.info, labels.Everything()) + if (err != nil) != tt.wantErr { + t.Errorf("GetMetricByName() error = %v, wantErr %v", err, tt.wantErr) + } + if err != nil { + return + } + + gotValue, _ := got.Value.AsInt64() + if gotValue != tt.want { + t.Errorf("GetMetricByName() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetMetricBySelector(t *testing.T) { + provider := NewMetricProvider(staticMetrics(10.0, 14)) + _, got := provider.GetMetricBySelector("foo", labels.NewSelector(), concurrencyMetricInfo, labels.Everything()) + if got != errNotImplemented { + t.Errorf("GetMetricBySelector() = %v, want %v", got, errNotImplemented) + } + + _, got = provider.GetMetricBySelector("foo", labels.NewSelector(), rpsMetricInfo, labels.Everything()) + if got != errNotImplemented { + t.Errorf("GetMetricBySelector() = %v, want %v", got, errNotImplemented) + } +} + +func TestListAllMetrics(t *testing.T) { + provider := NewMetricProvider(staticMetrics(10.0, 14)) + gotConcurrency := provider.ListAllMetrics()[0] + + if equal, err := kmp.SafeEqual(gotConcurrency, concurrencyMetricInfo); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if !equal { + t.Errorf("ListAllMetrics() = %v, want %v", gotConcurrency, concurrencyMetricInfo) + } + + gotRPS := provider.ListAllMetrics()[1] + if equal, err := kmp.SafeEqual(gotRPS, rpsMetricInfo); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if !equal { + t.Errorf("ListAllMetrics() = %v, want %v", gotRPS, rpsMetricInfo) + } +} + +func staticMetrics(concurrency, rps float64) MetricClient { + return &fake.MetricClient{ + StableConcurrency: concurrency, + StableRPS: rps, + ErrF: func(key types.NamespacedName, now time.Time) error { + if key.Namespace != existingNamespace { + return errors.New("doesn't exist") + } + return nil + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler.go b/test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler.go new file mode 100644 index 0000000000..2133a186a3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler.go @@ -0,0 +1,341 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "context" + "math" + "sync" + "time" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +// Decider is a resource which observes the request load of a Revision and +// recommends a number of replicas to run. +// +k8s:deepcopy-gen=true +type Decider struct { + metav1.ObjectMeta + Spec DeciderSpec + Status DeciderStatus +} + +// DeciderSpec is the parameters in which the Revision should scaled. +type DeciderSpec struct { + TickInterval time.Duration + MaxScaleUpRate float64 + MaxScaleDownRate float64 + // The metric used for scaling, i.e. concurrency, rps. + ScalingMetric string + // The value of scaling metric per pod that we target to maintain. + // TargetValue <= TotalValue. + TargetValue float64 + // The total value of scaling metric that a pod can maintain. + TotalValue float64 + // The burst capacity that user wants to maintain without queuing at the POD level. + // Note, that queueing still might happen due to the non-ideal load balancing. + TargetBurstCapacity float64 + PanicThreshold float64 + // StableWindow is needed to determine when to exit panicmode. + StableWindow time.Duration + // The name of the k8s service for pod information. + ServiceName string +} + +// DeciderStatus is the current scale recommendation. +type DeciderStatus struct { + // DesiredScale is the target number of instances that autoscaler + // this revision needs. + DesiredScale int32 + + // ExcessBurstCapacity is the difference between spare capacity + // (how much more load the pods in the revision deployment can take before being + // overloaded) and the configured target burst capacity. + // If this number is negative: Activator will be threaded in + // the request path by the PodAutoscaler controller. + ExcessBurstCapacity int32 +} + +// UniScaler records statistics for a particular Decider and proposes the scale for the Decider's target based on those statistics. +type UniScaler interface { + // Scale either proposes a number of replicas and available excess burst capacity, + // or skips proposing. The proposal is requested at the given time. + // The returned boolean is true if and only if a proposal was returned. + Scale(context.Context, time.Time) (int32, int32, bool) + + // Update reconfigures the UniScaler according to the DeciderSpec. + Update(*DeciderSpec) error +} + +// UniScalerFactory creates a UniScaler for a given PA using the given dynamic configuration. +type UniScalerFactory func(*Decider) (UniScaler, error) + +// scalerRunner wraps a UniScaler and a channel for implementing shutdown behavior. +type scalerRunner struct { + scaler UniScaler + stopCh chan struct{} + pokeCh chan struct{} + + // mux guards access to decider. + mux sync.RWMutex + decider *Decider +} + +func (sr *scalerRunner) latestScale() int32 { + sr.mux.RLock() + defer sr.mux.RUnlock() + return sr.decider.Status.DesiredScale +} + +func sameSign(a, b int32) bool { + return (a&math.MinInt32)^(b&math.MinInt32) == 0 +} + +func (sr *scalerRunner) updateLatestScale(proposed, ebc int32) bool { + ret := false + sr.mux.Lock() + defer sr.mux.Unlock() + if sr.decider.Status.DesiredScale != proposed { + sr.decider.Status.DesiredScale = proposed + ret = true + } + + // If sign has changed -- then we have to update KPA + ret = ret || !sameSign(sr.decider.Status.ExcessBurstCapacity, ebc) + + // Update with the latest calculation anyway. + sr.decider.Status.ExcessBurstCapacity = ebc + return ret +} + +// MultiScaler maintains a collection of Uniscalers. +type MultiScaler struct { + scalers map[types.NamespacedName]*scalerRunner + scalersMutex sync.RWMutex + scalersStopCh <-chan struct{} + + uniScalerFactory UniScalerFactory + + logger *zap.SugaredLogger + + watcher func(types.NamespacedName) + watcherMutex sync.RWMutex + + tickProvider func(time.Duration) *time.Ticker +} + +// NewMultiScaler constructs a MultiScaler. +func NewMultiScaler( + stopCh <-chan struct{}, + uniScalerFactory UniScalerFactory, + logger *zap.SugaredLogger) *MultiScaler { + return &MultiScaler{ + scalers: make(map[types.NamespacedName]*scalerRunner), + scalersStopCh: stopCh, + uniScalerFactory: uniScalerFactory, + logger: logger, + tickProvider: time.NewTicker, + } +} + +// Get returns the copy of the current Decider. +func (m *MultiScaler) Get(ctx context.Context, namespace, name string) (*Decider, error) { + key := types.NamespacedName{Namespace: namespace, Name: name} + m.scalersMutex.RLock() + defer m.scalersMutex.RUnlock() + scaler, exists := m.scalers[key] + if !exists { + // This GroupResource is a lie, but unfortunately this interface requires one. + return nil, errors.NewNotFound(av1alpha1.Resource("Deciders"), key.String()) + } + scaler.mux.RLock() + defer scaler.mux.RUnlock() + return scaler.decider.DeepCopy(), nil +} + +// Create instantiates the desired Decider. +func (m *MultiScaler) Create(ctx context.Context, decider *Decider) (*Decider, error) { + key := types.NamespacedName{Namespace: decider.Namespace, Name: decider.Name} + logger := m.logger.With(zap.String(logkey.Key, key.String())) + ctx = logging.WithLogger(ctx, logger) + m.scalersMutex.Lock() + defer m.scalersMutex.Unlock() + scaler, exists := m.scalers[key] + if !exists { + var err error + scaler, err = m.createScaler(ctx, decider) + if err != nil { + return nil, err + } + m.scalers[key] = scaler + } + scaler.mux.RLock() + defer scaler.mux.RUnlock() + // scaler.decider is already a copy of the original, so just return it. + return scaler.decider, nil +} + +// Update applied the desired DeciderSpec to a currently running Decider. +func (m *MultiScaler) Update(ctx context.Context, decider *Decider) (*Decider, error) { + key := types.NamespacedName{Namespace: decider.Namespace, Name: decider.Name} + logger := m.logger.With(zap.String(logkey.Key, key.String())) + ctx = logging.WithLogger(ctx, logger) + m.scalersMutex.Lock() + defer m.scalersMutex.Unlock() + if scaler, exists := m.scalers[key]; exists { + scaler.mux.Lock() + defer scaler.mux.Unlock() + oldDeciderSpec := scaler.decider.Spec + // Make sure we store the copy. + scaler.decider = decider.DeepCopy() + scaler.scaler.Update(&decider.Spec) + if oldDeciderSpec.TickInterval != decider.Spec.TickInterval { + m.updateRunner(ctx, scaler) + } + return decider, nil + } + // This GroupResource is a lie, but unfortunately this interface requires one. + return nil, errors.NewNotFound(av1alpha1.Resource("Deciders"), key.String()) +} + +// Delete stops and removes a Decider. +func (m *MultiScaler) Delete(ctx context.Context, namespace, name string) error { + key := types.NamespacedName{Namespace: namespace, Name: name} + m.scalersMutex.Lock() + defer m.scalersMutex.Unlock() + if scaler, exists := m.scalers[key]; exists { + close(scaler.stopCh) + delete(m.scalers, key) + } + return nil +} + +// Watch registers a singleton function to call when DeciderStatus is updated. +func (m *MultiScaler) Watch(fn func(types.NamespacedName)) { + m.watcherMutex.Lock() + defer m.watcherMutex.Unlock() + + if m.watcher != nil { + m.logger.Fatal("Multiple calls to Watch() not supported") + } + m.watcher = fn +} + +// Inform sends an update to the registered watcher function, if it is set. +func (m *MultiScaler) Inform(event types.NamespacedName) bool { + m.watcherMutex.RLock() + defer m.watcherMutex.RUnlock() + + if m.watcher != nil { + m.watcher(event) + return true + } + return false +} + +func (m *MultiScaler) updateRunner(ctx context.Context, runner *scalerRunner) { + runner.stopCh <- struct{}{} + m.runScalerTicker(ctx, runner) +} + +func (m *MultiScaler) runScalerTicker(ctx context.Context, runner *scalerRunner) { + metricKey := types.NamespacedName{Namespace: runner.decider.Namespace, Name: runner.decider.Name} + ticker := m.tickProvider(runner.decider.Spec.TickInterval) + go func() { + defer ticker.Stop() + for { + select { + case <-m.scalersStopCh: + return + case <-runner.stopCh: + return + case <-ticker.C: + m.tickScaler(ctx, runner.scaler, runner, metricKey) + case <-runner.pokeCh: + m.tickScaler(ctx, runner.scaler, runner, metricKey) + } + } + }() +} + +func (m *MultiScaler) createScaler(ctx context.Context, decider *Decider) (*scalerRunner, error) { + d := decider.DeepCopy() + scaler, err := m.uniScalerFactory(d) + if err != nil { + return nil, err + } + + runner := &scalerRunner{ + scaler: scaler, + stopCh: make(chan struct{}), + decider: d, + pokeCh: make(chan struct{}), + } + d.Status.DesiredScale = -1 + switch tbc := d.Spec.TargetBurstCapacity; tbc { + case -1, 0: + d.Status.ExcessBurstCapacity = int32(tbc) + default: + // If TBC > Target * InitialScale (currently 1), then we know initial + // scale won't be enough to cover TBC and we'll be behind activator. + // TODO(autoscale-wg): fix this when we switch to non "1" initial scale. + d.Status.ExcessBurstCapacity = int32(1*d.Spec.TotalValue - tbc) + } + + m.runScalerTicker(ctx, runner) + return runner, nil +} + +func (m *MultiScaler) tickScaler(ctx context.Context, scaler UniScaler, runner *scalerRunner, metricKey types.NamespacedName) { + logger := logging.FromContext(ctx) + desiredScale, excessBC, scaled := scaler.Scale(ctx, time.Now()) + + if !scaled { + return + } + + // Cannot scale negative (nor we can compute burst capacity). + if desiredScale < 0 { + logger.Errorf("Cannot scale: desiredScale %d < 0.", desiredScale) + return + } + + if runner.updateLatestScale(desiredScale, excessBC) { + m.Inform(metricKey) + } +} + +// Poke checks if the autoscaler needs to be run immediately. +func (m *MultiScaler) Poke(key types.NamespacedName, stat Stat) { + m.scalersMutex.RLock() + defer m.scalersMutex.RUnlock() + + scaler, exists := m.scalers[key] + if !exists { + return + } + + if scaler.latestScale() == 0 && stat.AverageConcurrentRequests != 0 { + scaler.pokeCh <- struct{}{} + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler_test.go new file mode 100644 index 0000000000..c0e6d36609 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/multiscaler_test.go @@ -0,0 +1,516 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + . "knative.dev/pkg/logging/testing" +) + +const ( + tickInterval = 5 * time.Millisecond + tickTimeout = 100 * time.Millisecond +) + +// watchFunc generates a function to assert the changes happening in the multiscaler. +func watchFunc(ctx context.Context, ms *MultiScaler, decider *Decider, desiredScale int, errCh chan error) func(key types.NamespacedName) { + metricKey := types.NamespacedName{Namespace: decider.Namespace, Name: decider.Name} + return func(key types.NamespacedName) { + if key != metricKey { + errCh <- fmt.Errorf("Watch() = %v, wanted %v", key, metricKey) + return + } + m, err := ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + errCh <- fmt.Errorf("Get() = %w", err) + return + } + if got, want := m.Status.DesiredScale, int32(desiredScale); got != want { + errCh <- fmt.Errorf("Get() = %v, wanted %v", got, want) + return + } + errCh <- nil + } +} + +// verifyTick verifies that we get a tick in a certain amount of time. +func verifyTick(errCh chan error) error { + select { + case err := <-errCh: + return err + case <-time.After(tickTimeout): + return errors.New("timed out waiting for Watch()") + } +} + +// verifyNoTick verifies that we don't get a tick in a certain amount of time. +func verifyNoTick(errCh chan error) error { + select { + case err := <-errCh: + if err != nil { + return err + } + return errors.New("Got unexpected tick") + case <-time.After(tickTimeout): + // Got nothing, all good! + return nil + } +} + +func TestMultiScalerScaling(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + mtp := &manualTickProvider{ + ch: make(chan time.Time, 1), + } + ms.tickProvider = mtp.NewTicker + + decider := newDecider() + uniScaler.setScaleResult(1, 1, true) + + // Before it exists, we should get a NotFound. + m, err := ms.Get(ctx, decider.Namespace, decider.Name) + if !apierrors.IsNotFound(err) { + t.Errorf("Get() = (%v, %v), want not found error", m, err) + } + + errCh := make(chan error) + ms.Watch(watchFunc(ctx, ms, decider, 1 /*desired scale*/, errCh)) + + _, err = ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + d, err := ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Fatalf("Get() = %v", err) + } + if got, want := d.Status.DesiredScale, int32(-1); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } + if got, want := d.Status.ExcessBurstCapacity, int32(0); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } + + mtp.ch <- time.Now() + + // Verify that we see a "tick" + if err := verifyTick(errCh); err != nil { + t.Fatal(err) + } + + // Verify new values are propagated. + d, err = ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Fatalf("Get() = %v", err) + } + if got, want := d.Status.DesiredScale, int32(1); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } + if got, want := d.Status.ExcessBurstCapacity, int32(1); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } + + // Verify that subsequent "ticks" don't trigger a callback, since + // the desired scale has not changed. + if err := verifyNoTick(errCh); err != nil { + t.Fatal(err) + } + + if err := ms.Delete(ctx, decider.Namespace, decider.Name); err != nil { + t.Errorf("Delete() = %v", err) + } + + // Verify that we stop seeing "ticks" + if err := verifyNoTick(errCh); err != nil { + t.Fatal(err) + } +} + +func TestMultiscalerCreateTBC42(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, _ := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + decider.Spec.TargetBurstCapacity = 42 + decider.Spec.TotalValue = 25 + + _, err := ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + d, err := ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Fatalf("Get() = %v", err) + } + if got, want := d.Status.DesiredScale, int32(-1); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } + if got, want := d.Status.ExcessBurstCapacity, int32(25-42); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } +} +func TestMultiscalerCreateTBCMinus1(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, _ := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + decider.Spec.TargetBurstCapacity = -1 + + _, err := ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + d, err := ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Fatalf("Get() = %v", err) + } + if got, want := d.Status.DesiredScale, int32(-1); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } + if got, want := d.Status.ExcessBurstCapacity, int32(-1); got != want { + t.Errorf("Decider.Status.DesiredScale = %d, want: %d", got, want) + } +} + +func TestMultiScalerOnlyCapacityChange(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + uniScaler.setScaleResult(1, 1, true) + + errCh := make(chan error) + ms.Watch(watchFunc(ctx, ms, decider, 1, errCh)) + + _, err := ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + + // Verify that we see a "tick". + if err := verifyTick(errCh); err != nil { + t.Fatal(err) + } + + // Change the sign of the excess capacity. + uniScaler.setScaleResult(1, -1, true) + + // Verify that subsequent "ticks" don't trigger a callback, since + // the desired scale has not changed. + if err := verifyTick(errCh); err != nil { + t.Fatal(err) + } + + if err := ms.Delete(ctx, decider.Namespace, decider.Name); err != nil { + t.Errorf("Delete() = %v", err) + } + + // Verify that we stop seeing "ticks". + if err := verifyNoTick(errCh); err != nil { + t.Fatal(err) + } +} + +func TestMultiScalerTickUpdate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + decider.Spec.TickInterval = 10 * time.Second + uniScaler.setScaleResult(1, 1, true) + + // Before it exists, we should get a NotFound. + m, err := ms.Get(ctx, decider.Namespace, decider.Name) + if !apierrors.IsNotFound(err) { + t.Errorf("Get() = (%v, %v), want not found error", m, err) + } + + _, err = ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + time.Sleep(50 * time.Millisecond) + + // Expected count to be 0 as the tick interval is 10s and no autoscaling calculation should be triggered + if count := uniScaler.getScaleCount(); count != 0 { + t.Fatalf("Expected count to be 0 but got %d", count) + } + + decider.Spec.TickInterval = tickInterval + + if _, err = ms.Update(ctx, decider); err != nil { + t.Errorf("Update() = %v", err) + } + + if err := wait.PollImmediate(tickInterval, tickTimeout, func() (bool, error) { + // Expected count to be greater than 1 as the tick interval is updated to be 5ms + if uniScaler.getScaleCount() >= 1 { + return true, nil + } + return false, nil + }); err != nil { + t.Fatalf("Expected at least 1 tick but got %d", uniScaler.getScaleCount()) + } +} + +func TestMultiScalerScaleToZero(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + uniScaler.setScaleResult(0, 1, true) + + // Before it exists, we should get a NotFound. + m, err := ms.Get(ctx, decider.Namespace, decider.Name) + if !apierrors.IsNotFound(err) { + t.Errorf("Get() = (%v, %v), want not found error", m, err) + } + + errCh := make(chan error) + ms.Watch(watchFunc(ctx, ms, decider, 0, errCh)) + + _, err = ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + + // Verify that we see a "tick" + if err := verifyTick(errCh); err != nil { + t.Fatal(err) + } + + err = ms.Delete(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Errorf("Delete() = %v", err) + } + + // Verify that we stop seeing "ticks" + if err := verifyNoTick(errCh); err != nil { + t.Fatal(err) + } +} + +func TestMultiScalerScaleFromZero(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + decider.Spec.TickInterval = 60 * time.Second + uniScaler.setScaleResult(1, 1, true) + + errCh := make(chan error) + ms.Watch(watchFunc(ctx, ms, decider, 1, errCh)) + + _, err := ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + metricKey := types.NamespacedName{Namespace: decider.Namespace, Name: decider.Name} + if scaler, exists := ms.scalers[metricKey]; !exists { + t.Errorf("Failed to get scaler for metric %s", metricKey) + } else if !scaler.updateLatestScale(0, 10) { + t.Error("Failed to set scale for metric to 0") + } + + testStat := Stat{ + Time: time.Now(), + PodName: "test-pod", + AverageConcurrentRequests: 1, + RequestCount: 1, + } + ms.Poke(metricKey, testStat) + + // Verify that we see a "tick" + if err := verifyTick(errCh); err != nil { + t.Fatal(err) + } +} + +func TestMultiScalerIgnoreNegativeScale(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + + uniScaler.setScaleResult(-1, 10, true) + + // Before it exists, we should get a NotFound. + m, err := ms.Get(ctx, decider.Namespace, decider.Name) + if !apierrors.IsNotFound(err) { + t.Errorf("Get() = (%v, %v), want not found error", m, err) + } + + errCh := make(chan error) + ms.Watch(func(key types.NamespacedName) { + // Let the main process know when this is called. + errCh <- nil + }) + + _, err = ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + + // Verify that we get no "ticks", because the desired scale is negative + if err := verifyNoTick(errCh); err != nil { + t.Fatal(err) + } + + err = ms.Delete(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Errorf("Delete() = %v", err) + } + + // Verify that we stop seeing "ticks" + if err := verifyNoTick(errCh); err != nil { + t.Fatal(err) + } +} + +func TestMultiScalerUpdate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms, uniScaler := createMultiScaler(ctx, TestLogger(t)) + + decider := newDecider() + decider.Spec.TargetValue = 1.0 + uniScaler.setScaleResult(0, 100, true) + + // Create the decider and verify the Spec + _, err := ms.Create(ctx, decider) + if err != nil { + t.Fatalf("Create() = %v", err) + } + m, err := ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Errorf("Get() = %v", err) + } + if got, want := m.Spec.TargetValue, 1.0; got != want { + t.Errorf("Got target concurrency %v. Wanted %v", got, want) + } + + // Update the target and verify the Spec + decider.Spec.TargetValue = 10.0 + if _, err = ms.Update(ctx, decider); err != nil { + t.Errorf("Update() = %v", err) + } + m, err = ms.Get(ctx, decider.Namespace, decider.Name) + if err != nil { + t.Errorf("Get() = %v", err) + } + if got, want := m.Spec.TargetValue, 10.0; got != want { + t.Errorf("Got target concurrency %v. Wanted %v", got, want) + } +} + +func createMultiScaler(ctx context.Context, l *zap.SugaredLogger) (*MultiScaler, *fakeUniScaler) { + uniscaler := &fakeUniScaler{} + + ms := NewMultiScaler(ctx.Done(), uniscaler.fakeUniScalerFactory, l) + + return ms, uniscaler +} + +type fakeUniScaler struct { + mutex sync.RWMutex + replicas int32 + surplus int32 + scaled bool + scaleCount int +} + +func (u *fakeUniScaler) fakeUniScalerFactory(*Decider) (UniScaler, error) { + return u, nil +} + +func (u *fakeUniScaler) Scale(context.Context, time.Time) (int32, int32, bool) { + u.mutex.Lock() + defer u.mutex.Unlock() + u.scaleCount++ + return u.replicas, u.surplus, u.scaled +} + +func (u *fakeUniScaler) getScaleCount() int { + u.mutex.RLock() + defer u.mutex.RUnlock() + return u.scaleCount +} + +func (u *fakeUniScaler) setScaleResult(replicas, surplus int32, scaled bool) { + u.mutex.Lock() + defer u.mutex.Unlock() + + u.surplus = surplus + u.replicas = replicas + u.scaled = scaled +} + +func (u *fakeUniScaler) Update(*DeciderSpec) error { + return nil +} + +func newDecider() *Decider { + return &Decider{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + }, + Spec: DeciderSpec{ + TickInterval: tickInterval, + TargetValue: 1, + }, + Status: DeciderStatus{}, + } +} + +func TestSameSign(t *testing.T) { + tests := []struct { + а, b int32 + want bool + }{{1982, 1984, true}, + {-1984, -1988, true}, + {-1988, 2006, false}, + {-2006, 2009, false}, + {0, 1, true}, // 0 is considered positive for our needs + {0, -42, false}} + for _, test := range tests { + if got, want := sameSign(test.а, test.b), test.want; got != want { + t.Errorf("%d <=> %d: got: %v, want: %v", test.а, test.b, got, want) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/sample_size.go b/test/vendor/knative.dev/serving/pkg/autoscaler/sample_size.go new file mode 100644 index 0000000000..0b2da5cf79 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/sample_size.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import "math" + +const ( + // criticalValueSquared is the square of the critical value of the Normal distribution + // for a confidence level of 95%. + criticalValueSquared = 1.96 * 1.96 + // marginOfErrorSquared is the square of margin of error. 5 is a usually used value + // for MOE. + marginOfErrorSquared = 5.0 * 5.0 + // σ2 is the population variance. + σ2 = 100.0 +) + +// populationMeanSampleSize uses the following formula for the sample size n: +// +// if N <= 3: +// n = N +// else: +// n = N*X / (N + X – 1), X = C^2 ­* σ^2 / MOE^2, +// +// where N is the population size, C is the critical value of the Normal distribution +// for a given confidence level of 95%, MOE is the margin of error and σ^2 is the +// population variance. +func populationMeanSampleSize(population int) int { + if population < 0 { + return 0 + } + if population <= 3 { + return population + } + x := criticalValueSquared * σ2 / marginOfErrorSquared + populationf := float64(population) + return int(math.Ceil(populationf * x / (populationf + x - 1))) +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/sample_size_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/sample_size_test.go new file mode 100644 index 0000000000..97e2d7281a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/sample_size_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "testing" +) + +func TestPopulationMeanSampleSize(t *testing.T) { + testCases := []struct { + popSize int + wantSampleSize int + }{{ + popSize: 0, + wantSampleSize: 0, + }, { + popSize: 1, + wantSampleSize: 1, + }, { + popSize: 2, + wantSampleSize: 2, + }, { + popSize: 5, + wantSampleSize: 4, + }, { + popSize: 10, + wantSampleSize: 7, + }, { + popSize: 100, + wantSampleSize: 14, + }, { + popSize: 1000, + wantSampleSize: 16, + }} + + for _, testCase := range testCases { + if got, want := populationMeanSampleSize(testCase.popSize), testCase.wantSampleSize; got != want { + t.Errorf("client.SampleSize(%v) = %v, want %v", testCase.popSize, got, want) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter.go b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter.go new file mode 100644 index 0000000000..75a979592a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter.go @@ -0,0 +1,272 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "context" + "errors" + + pkgmetrics "knative.dev/pkg/metrics" + "knative.dev/pkg/metrics/metricskey" + "knative.dev/serving/pkg/metrics" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + desiredPodCountM = stats.Int64( + "desired_pods", + "Number of pods autoscaler wants to allocate", + stats.UnitDimensionless) + requestedPodCountM = stats.Int64( + "requested_pods", + "Number of pods autoscaler requested from Kubernetes", + stats.UnitDimensionless) + actualPodCountM = stats.Int64( + "actual_pods", + "Number of pods that are allocated currently", + stats.UnitDimensionless) + excessBurstCapacityM = stats.Float64( + "excess_burst_capacity", + "Excess burst capacity overserved over the stable window", + stats.UnitDimensionless) + stableRequestConcurrencyM = stats.Float64( + "stable_request_concurrency", + "Average of requests count per observed pod over the stable window", + stats.UnitDimensionless) + panicRequestConcurrencyM = stats.Float64( + "panic_request_concurrency", + "Average of requests count per observed pod over the panic window", + stats.UnitDimensionless) + targetRequestConcurrencyM = stats.Float64( + "target_concurrency_per_pod", + "The desired number of concurrent requests for each pod", + stats.UnitDimensionless) + stableRPSM = stats.Float64( + "stable_requests_per_second", + "Average requests-per-second per observed pod over the stable window", + stats.UnitDimensionless) + panicRPSM = stats.Float64( + "panic_requests_per_second", + "Average requests-per-second per observed pod over the panic window", + stats.UnitDimensionless) + targetRPSM = stats.Float64( + "target_requests_per_second", + "The desired requests-per-second for each pod", + stats.UnitDimensionless) + panicM = stats.Int64( + "panic_mode", + "1 if autoscaler is in panic mode, 0 otherwise", + stats.UnitDimensionless) +) + +func init() { + register() +} + +func register() { + // Create views to see our measurements. This can return an error if + // a previously-registered view has the same name with a different value. + // View name defaults to the measure name if unspecified. + if err := view.Register( + &view.View{ + Description: "Number of pods autoscaler wants to allocate", + Measure: desiredPodCountM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Number of pods autoscaler requested from Kubernetes", + Measure: requestedPodCountM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Number of pods that are allocated currently", + Measure: actualPodCountM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Average of requests count over the stable window", + Measure: stableRequestConcurrencyM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Current excess burst capacity over average request count over the stable window", + Measure: excessBurstCapacityM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Average of requests count over the panic window", + Measure: panicRequestConcurrencyM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "The desired number of concurrent requests for each pod", + Measure: targetRequestConcurrencyM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "1 if autoscaler is in panic mode, 0 otherwise", + Measure: panicM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Average requests-per-second over the stable window", + Measure: stableRPSM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "Average requests-per-second over the panic window", + Measure: panicRPSM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + &view.View{ + Description: "The desired requests-per-second for each pod", + Measure: targetRPSM, + Aggregation: view.LastValue(), + TagKeys: metrics.CommonRevisionKeys, + }, + ); err != nil { + panic(err) + } +} + +// StatsReporter defines the interface for sending autoscaler metrics +type StatsReporter interface { + ReportDesiredPodCount(v int64) error + ReportRequestedPodCount(v int64) error + ReportActualPodCount(v int64) error + ReportStableRequestConcurrency(v float64) error + ReportPanicRequestConcurrency(v float64) error + ReportTargetRequestConcurrency(v float64) error + ReportStableRPS(v float64) error + ReportPanicRPS(v float64) error + ReportTargetRPS(v float64) error + ReportExcessBurstCapacity(v float64) error + ReportPanic(v int64) error +} + +// Reporter holds cached metric objects to report autoscaler metrics +type Reporter struct { + ctx context.Context + initialized bool +} + +func valueOrUnknown(v string) string { + if v != "" { + return v + } + return metricskey.ValueUnknown +} + +// NewStatsReporter creates a reporter that collects and reports autoscaler metrics +func NewStatsReporter(ns, service, config, revision string) (*Reporter, error) { + r := &Reporter{} + + // Our tags are static. So, we can get away with creating a single context + // and reuse it for reporting all of our metrics. Note that service names + // can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + context.Background(), + tag.Upsert(metrics.NamespaceTagKey, ns), + tag.Upsert(metrics.ServiceTagKey, valueOrUnknown(service)), + tag.Upsert(metrics.ConfigTagKey, config), + tag.Upsert(metrics.RevisionTagKey, revision)) + if err != nil { + return nil, err + } + + r.ctx = ctx + r.initialized = true + return r, nil +} + +// ReportDesiredPodCount captures value v for desired pod count measure. +func (r *Reporter) ReportDesiredPodCount(v int64) error { + return r.report(desiredPodCountM.M(v)) +} + +// ReportRequestedPodCount captures value v for requested pod count measure. +func (r *Reporter) ReportRequestedPodCount(v int64) error { + return r.report(requestedPodCountM.M(v)) +} + +// ReportActualPodCount captures value v for actual pod count measure. +func (r *Reporter) ReportActualPodCount(v int64) error { + return r.report(actualPodCountM.M(v)) +} + +// ReportExcessBurstCapacity captures value v for excess target burst capacity. +func (r *Reporter) ReportExcessBurstCapacity(v float64) error { + return r.report(excessBurstCapacityM.M(v)) +} + +// ReportStableRequestConcurrency captures value v for stable request concurrency measure. +func (r *Reporter) ReportStableRequestConcurrency(v float64) error { + return r.report(stableRequestConcurrencyM.M(v)) +} + +// ReportPanicRequestConcurrency captures value v for panic request concurrency measure. +func (r *Reporter) ReportPanicRequestConcurrency(v float64) error { + return r.report(panicRequestConcurrencyM.M(v)) +} + +// ReportTargetRequestConcurrency captures value v for target request concurrency measure. +func (r *Reporter) ReportTargetRequestConcurrency(v float64) error { + return r.report(targetRequestConcurrencyM.M(v)) +} + +// ReportStableRPS captures value v for stable RPS measure. +func (r *Reporter) ReportStableRPS(v float64) error { + return r.report(stableRPSM.M(v)) +} + +// ReportPanicRPS captures value v for panic RPS measure. +func (r *Reporter) ReportPanicRPS(v float64) error { + return r.report(panicRPSM.M(v)) +} + +// ReportTargetRPS captures value v for target requests-per-second measure. +func (r *Reporter) ReportTargetRPS(v float64) error { + return r.report(targetRPSM.M(v)) + +} + +// ReportPanic captures value v for panic mode measure. +func (r *Reporter) ReportPanic(v int64) error { + return r.report(panicM.M(v)) +} + +func (r *Reporter) report(m stats.Measurement) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + pkgmetrics.Record(r.ctx, m) + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter_test.go new file mode 100644 index 0000000000..499b5255ae --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_reporter_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "strings" + "testing" + + "knative.dev/pkg/metrics/metricskey" + "knative.dev/pkg/metrics/metricstest" +) + +func TestNewStatsReporterErrors(t *testing.T) { + // These are invalid as defined by the current OpenCensus library. + invalidTagValues := []string{ + "naïve", // Includes non-ASCII character. + strings.Repeat("a", 256), // Longer than 255 characters. + } + + for _, v := range invalidTagValues { + _, err := NewStatsReporter(v, v, v, v) + if err == nil { + t.Errorf("Expected err to not be nil for value %q, got nil", v) + } + } +} + +func TestReporterReport(t *testing.T) { + resetMetrics() + r := &Reporter{} + if err := r.ReportDesiredPodCount(10); err == nil { + t.Error("Reporter.ReportDesiredPodCount() expected an error for Report call before init. Got success.") + } + + r, _ = NewStatsReporter("testns", "testsvc", "testconfig", "testrev") + wantTags := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: "testsvc", + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + } + + // Send statistics only once and observe the results + expectSuccess(t, "ReportDesiredPodCount", func() error { return r.ReportDesiredPodCount(10) }) + expectSuccess(t, "ReportRequestedPodCount", func() error { return r.ReportRequestedPodCount(7) }) + expectSuccess(t, "ReportActualPodCount", func() error { return r.ReportActualPodCount(5) }) + expectSuccess(t, "ReportPanic", func() error { return r.ReportPanic(0) }) + expectSuccess(t, "ReportStableRequestConcurrency", func() error { return r.ReportStableRequestConcurrency(2) }) + expectSuccess(t, "ReportPanicRequestConcurrency", func() error { return r.ReportPanicRequestConcurrency(3) }) + expectSuccess(t, "ReportTargetRequestConcurrency", func() error { return r.ReportTargetRequestConcurrency(0.9) }) + expectSuccess(t, "ReportStableRPS", func() error { return r.ReportStableRPS(5) }) + expectSuccess(t, "ReportPanicRPS", func() error { return r.ReportPanicRPS(6) }) + expectSuccess(t, "ReportTargetRPS", func() error { return r.ReportTargetRPS(7) }) + expectSuccess(t, "ReportExcessBurstCapacity", func() error { return r.ReportExcessBurstCapacity(19.84) }) + metricstest.CheckLastValueData(t, "desired_pods", wantTags, 10) + metricstest.CheckLastValueData(t, "requested_pods", wantTags, 7) + metricstest.CheckLastValueData(t, "actual_pods", wantTags, 5) + metricstest.CheckLastValueData(t, "panic_mode", wantTags, 0) + metricstest.CheckLastValueData(t, "stable_request_concurrency", wantTags, 2) + metricstest.CheckLastValueData(t, "excess_burst_capacity", wantTags, 19.84) + metricstest.CheckLastValueData(t, "panic_request_concurrency", wantTags, 3) + metricstest.CheckLastValueData(t, "target_concurrency_per_pod", wantTags, 0.9) + metricstest.CheckLastValueData(t, "stable_requests_per_second", wantTags, 5) + metricstest.CheckLastValueData(t, "panic_requests_per_second", wantTags, 6) + metricstest.CheckLastValueData(t, "target_requests_per_second", wantTags, 7) + + // All the stats are gauges - record multiple entries for one stat - last one should stick + expectSuccess(t, "ReportDesiredPodCount", func() error { return r.ReportDesiredPodCount(1) }) + expectSuccess(t, "ReportDesiredPodCount", func() error { return r.ReportDesiredPodCount(2) }) + expectSuccess(t, "ReportDesiredPodCount", func() error { return r.ReportDesiredPodCount(3) }) + metricstest.CheckLastValueData(t, "desired_pods", wantTags, 3) + + expectSuccess(t, "ReportRequestedPodCount", func() error { return r.ReportRequestedPodCount(4) }) + expectSuccess(t, "ReportRequestedPodCount", func() error { return r.ReportRequestedPodCount(5) }) + expectSuccess(t, "ReportRequestedPodCount", func() error { return r.ReportRequestedPodCount(6) }) + metricstest.CheckLastValueData(t, "requested_pods", wantTags, 6) + + expectSuccess(t, "ReportActualPodCount", func() error { return r.ReportActualPodCount(7) }) + expectSuccess(t, "ReportActualPodCount", func() error { return r.ReportActualPodCount(8) }) + expectSuccess(t, "ReportActualPodCount", func() error { return r.ReportActualPodCount(9) }) + metricstest.CheckLastValueData(t, "actual_pods", wantTags, 9) + + expectSuccess(t, "ReportPanic", func() error { return r.ReportPanic(1) }) + expectSuccess(t, "ReportPanic", func() error { return r.ReportPanic(0) }) + expectSuccess(t, "ReportPanic", func() error { return r.ReportPanic(1) }) + metricstest.CheckLastValueData(t, "panic_mode", wantTags, 1) + + expectSuccess(t, "ReportPanic", func() error { return r.ReportPanic(0) }) + metricstest.CheckLastValueData(t, "panic_mode", wantTags, 0) +} + +func TestReporterEmptyServiceName(t *testing.T) { + resetMetrics() + // Metrics reported to an empty service name will be recorded with service "unknown" (metricskey.ValueUnknown). + r, _ := NewStatsReporter("testns", "" /*service=*/, "testconfig", "testrev") + wantTags := map[string]string{ + metricskey.LabelNamespaceName: "testns", + metricskey.LabelServiceName: metricskey.ValueUnknown, + metricskey.LabelConfigurationName: "testconfig", + metricskey.LabelRevisionName: "testrev", + } + expectSuccess(t, "ReportDesiredPodCount", func() error { return r.ReportDesiredPodCount(10) }) + metricstest.CheckLastValueData(t, "desired_pods", wantTags, 10) +} + +func expectSuccess(t *testing.T, funcName string, f func() error) { + if err := f(); err != nil { + t.Errorf("Reporter.%v() expected success but got error %v", funcName, err) + } +} + +// Resets global state from the opencensus package +// Required to run at the beginning of tests that check metrics' values +// to make the tests idempotent. +func resetMetrics() { + metricstest.Unregister( + desiredPodCountM.Name(), + requestedPodCountM.Name(), + actualPodCountM.Name(), + stableRequestConcurrencyM.Name(), + panicRequestConcurrencyM.Name(), + excessBurstCapacityM.Name(), + targetRequestConcurrencyM.Name(), + panicM.Name(), + stableRPSM.Name(), + panicRPSM.Name(), + targetRPSM.Name()) + register() +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper.go b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper.go new file mode 100644 index 0000000000..3c2069052f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper.go @@ -0,0 +1,226 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "fmt" + "net/http" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/resources" +) + +const ( + httpClientTimeout = 3 * time.Second + + // scraperPodName is the name used in all stats sent from the scraper to + // the autoscaler. The actual customer pods are hidden behind the scraper. The + // autoscaler does need to know how many customer pods are reporting metrics. + // Instead, the autoscaler knows the stats it receives are either from the + // scraper or the activator. + scraperPodName = "service-scraper" + + // scraperMaxRetries are retries to be done to the actual Scrape routine. We want + // to retry if a Scrape returns an error or if the Scrape goes to a pod we already + // scraped. + scraperMaxRetries = 10 +) + +var ( + // ErrFailedGetEndpoints specifies the error returned by scraper when it fails to + // get endpoints. + ErrFailedGetEndpoints = errors.New("failed to get endpoints") + + // ErrDidNotReceiveStat specifies the error returned by scraper when it does not receive + // stat from an unscraped pod + ErrDidNotReceiveStat = errors.New("did not receive stat from an unscraped pod") +) + +// StatsScraper defines the interface for collecting Revision metrics +type StatsScraper interface { + // Scrape scrapes the Revision queue metric endpoint. + Scrape() (Stat, error) +} + +// scrapeClient defines the interface for collecting Revision metrics for a given +// URL. Internal used only. +type scrapeClient interface { + // Scrape scrapes the given URL. + Scrape(url string) (Stat, error) +} + +// cacheDisabledClient is a http client with cache disabled. It is shared by +// every goruntime for a revision scraper. +var cacheDisabledClient = &http.Client{ + Transport: &http.Transport{ + // Do not use the cached connection + DisableKeepAlives: true, + }, + Timeout: httpClientTimeout, +} + +// ServiceScraper scrapes Revision metrics via a K8S service by sampling. Which +// pod to be picked up to serve the request is decided by K8S. Please see +// https://kubernetes.io/docs/concepts/services-networking/network-policies/ +// for details. +type ServiceScraper struct { + sClient scrapeClient + counter resources.ReadyPodCounter + url string +} + +// NewServiceScraper creates a new StatsScraper for the Revision which +// the given Metric is responsible for. +func NewServiceScraper(metric *av1alpha1.Metric, counter resources.ReadyPodCounter) (*ServiceScraper, error) { + sClient, err := newHTTPScrapeClient(cacheDisabledClient) + if err != nil { + return nil, err + } + return newServiceScraperWithClient(metric, counter, sClient) +} + +func newServiceScraperWithClient( + metric *av1alpha1.Metric, + counter resources.ReadyPodCounter, + sClient scrapeClient) (*ServiceScraper, error) { + if metric == nil { + return nil, errors.New("metric must not be nil") + } + if counter == nil { + return nil, errors.New("counter must not be nil") + } + if sClient == nil { + return nil, errors.New("scrape client must not be nil") + } + revName := metric.Labels[serving.RevisionLabelKey] + if revName == "" { + return nil, fmt.Errorf("no Revision label found for Metric %s", metric.Name) + } + + return &ServiceScraper{ + sClient: sClient, + counter: counter, + url: urlFromTarget(metric.Spec.ScrapeTarget, metric.ObjectMeta.Namespace), + }, nil +} + +func urlFromTarget(t, ns string) string { + return fmt.Sprintf( + "http://%s.%s:%d/metrics", + t, ns, networking.AutoscalingQueueMetricsPort) +} + +// Scrape calls the destination service then sends it +// to the given stats channel. +func (s *ServiceScraper) Scrape() (Stat, error) { + readyPodsCount, err := s.counter.ReadyCount() + if err != nil { + return emptyStat, ErrFailedGetEndpoints + } + + if readyPodsCount == 0 { + return emptyStat, nil + } + + sampleSize := populationMeanSampleSize(readyPodsCount) + statCh := make(chan Stat, sampleSize) + scrapedPods := &sync.Map{} + + grp := errgroup.Group{} + for i := 0; i < sampleSize; i++ { + grp.Go(func() error { + for tries := 1; ; tries++ { + stat, err := s.tryScrape(scrapedPods) + if err == nil { + statCh <- stat + return nil + } + + // Return the error if we exhausted our retries. + if tries == scraperMaxRetries { + return err + } + } + }) + } + + // Return the inner error, if any. + if err := grp.Wait(); err != nil { + return emptyStat, fmt.Errorf("unsuccessful scrape, sampleSize=%d: %w", sampleSize, err) + } + close(statCh) + + var ( + avgConcurrency float64 + avgProxiedConcurrency float64 + reqCount float64 + proxiedReqCount float64 + successCount float64 + ) + + for stat := range statCh { + successCount++ + avgConcurrency += stat.AverageConcurrentRequests + avgProxiedConcurrency += stat.AverageProxiedConcurrentRequests + reqCount += stat.RequestCount + proxiedReqCount += stat.ProxiedRequestCount + } + + frpc := float64(readyPodsCount) + avgConcurrency = avgConcurrency / successCount + avgProxiedConcurrency = avgProxiedConcurrency / successCount + reqCount = reqCount / successCount + proxiedReqCount = proxiedReqCount / successCount + + // Assumption: A particular pod can stand for other pods, i.e. other pods + // have similar concurrency and QPS. + // + // Hide the actual pods behind scraper and send only one stat for all the + // customer pods per scraping. The pod name is set to a unique value, i.e. + // scraperPodName so in autoscaler all stats are either from activator or + // scraper. + return Stat{ + Time: time.Now(), + PodName: scraperPodName, + AverageConcurrentRequests: avgConcurrency * frpc, + AverageProxiedConcurrentRequests: avgProxiedConcurrency * frpc, + RequestCount: reqCount * frpc, + ProxiedRequestCount: proxiedReqCount * frpc, + }, nil +} + +// tryScrape runs a single scrape and checks if this pod wasn't already scraped +// against the given already scraped pods. +func (s *ServiceScraper) tryScrape(scrapedPods *sync.Map) (Stat, error) { + stat, err := s.sClient.Scrape(s.url) + if err != nil { + return emptyStat, err + } + + if _, exists := scrapedPods.LoadOrStore(stat.PodName, struct{}{}); exists { + return emptyStat, ErrDidNotReceiveStat + } + + return stat, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper_test.go new file mode 100644 index 0000000000..31aa484d57 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/stats_scraper_test.go @@ -0,0 +1,271 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaler + +import ( + "errors" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/resources" +) + +const ( + testRevision = "test-revision" + testService = "test-revision-metrics" + testNamespace = "test-namespace" +) + +var ( + testStats = []Stat{ + { + PodName: "pod-1", + AverageConcurrentRequests: 3.0, + AverageProxiedConcurrentRequests: 2.0, + RequestCount: 5, + ProxiedRequestCount: 4, + }, { + PodName: "pod-2", + AverageConcurrentRequests: 5.0, + AverageProxiedConcurrentRequests: 4.0, + RequestCount: 7, + ProxiedRequestCount: 6, + }, { + PodName: "pod-3", + AverageConcurrentRequests: 3.0, + AverageProxiedConcurrentRequests: 2.0, + RequestCount: 5, + ProxiedRequestCount: 4, + }, + } +) + +func TestNewServiceScraperWithClientHappyCase(t *testing.T) { + client := newTestScrapeClient(testStats, []error{nil}) + if scraper, err := serviceScraperForTest(client); err != nil { + t.Fatalf("serviceScraperForTest=%v, want no error", err) + } else if scraper.url != testURL { + t.Errorf("scraper.url=%v, want %v", scraper.url, testURL) + } +} + +func TestNewServiceScraperWithClientErrorCases(t *testing.T) { + metric := testMetric() + invalidMetric := testMetric() + invalidMetric.Labels = map[string]string{} + client := newTestScrapeClient(testStats, []error{nil}) + lister := kubeInformer.Core().V1().Endpoints().Lister() + counter := resources.NewScopedEndpointsCounter(lister, testNamespace, testService) + + testCases := []struct { + name string + metric *av1alpha1.Metric + client scrapeClient + counter resources.ReadyPodCounter + expectedErr string + }{{ + name: "Empty Decider", + client: client, + counter: counter, + expectedErr: "metric must not be nil", + }, { + name: "Missing revision label in Decider", + metric: invalidMetric, + client: client, + counter: counter, + expectedErr: "no Revision label found for Metric test-revision", + }, { + name: "Empty scrape client", + metric: metric, + counter: counter, + expectedErr: "scrape client must not be nil", + }, { + name: "Empty lister", + metric: metric, + client: client, + counter: nil, + expectedErr: "counter must not be nil", + }} + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + if _, err := newServiceScraperWithClient(test.metric, test.counter, test.client); err != nil { + got := err.Error() + want := test.expectedErr + if got != want { + t.Errorf("Got error message: %v. Want: %v", got, want) + } + } else { + t.Errorf("Expected error from CreateNewServiceScraper, got nil") + } + }) + } +} + +func TestScrapeReportStatWhenAllCallsSucceed(t *testing.T) { + client := newTestScrapeClient(testStats, []error{nil}) + scraper, err := serviceScraperForTest(client) + if err != nil { + t.Fatalf("serviceScraperForTest=%v, want no error", err) + } + + // Make an Endpoints with 3 pods. + endpoints(3, testService) + + // Scrape will set a timestamp bigger than this. + now := time.Now() + got, err := scraper.Scrape() + if err != nil { + t.Fatalf("unexpected error from scraper.Scrape(): %v", err) + } + + if got.Time.Before(now) { + t.Errorf("stat.Time=%v, want bigger than %v", got.Time, now) + } + if got.PodName != scraperPodName { + t.Errorf("stat.PodName=%v, want %v", got.PodName, scraperPodName) + } + // (3.0 + 5.0 + 3.0) / 3.0 * 3 = 11 + if got.AverageConcurrentRequests != 11.0 { + t.Errorf("stat.AverageConcurrentRequests=%v, want %v", + got.AverageConcurrentRequests, 11.0) + } + // ((5 + 7 + 5) / 3.0) * 3 = 17 + if got.RequestCount != 17 { + t.Errorf("stat.RequestCount=%v, want %v", got.RequestCount, 15) + } + // (2.0 + 4.0 + 2.0) / 3.0 * 3 = 8 + if got.AverageProxiedConcurrentRequests != 8.0 { + t.Errorf("stat.AverageProxiedConcurrentRequests=%v, want %v", + got.AverageProxiedConcurrentRequests, 8.0) + } + // ((4 + 6 + 4) / 3.0) * 3 = 14 + if got.ProxiedRequestCount != 14 { + t.Errorf("stat.ProxiedCount=%v, want %v", got.ProxiedRequestCount, 12) + } +} + +func TestScrapeReportErrorCannotFindEnoughPods(t *testing.T) { + client := newTestScrapeClient(testStats[2:], []error{nil}) + scraper, err := serviceScraperForTest(client) + if err != nil { + t.Fatalf("serviceScraperForTest=%v, want no error", err) + } + + // Make an Endpoints with 2 pods. + endpoints(2, testService) + + _, err = scraper.Scrape() + if err == nil { + t.Errorf("scrape.Scrape() = nil, expected an error") + } +} + +func TestScrapeReportErrorIfAnyFails(t *testing.T) { + errTest := errors.New("test") + + // 1 success and 10 failures so one scrape fails permanently through retries. + client := newTestScrapeClient(testStats, []error{nil, + errTest, errTest, errTest, errTest, errTest, errTest, errTest, errTest, errTest, errTest}) + scraper, err := serviceScraperForTest(client) + if err != nil { + t.Fatalf("serviceScraperForTest=%v, want no error", err) + } + + // Make an Endpoints with 2 pods. + endpoints(2, testService) + + _, err = scraper.Scrape() + if !errors.Is(err, errTest) { + t.Errorf("scraper.Scrape() = %v, want %v wrapped", err, errTest) + } +} + +func TestScrapeDoNotScrapeIfNoPodsFound(t *testing.T) { + client := newTestScrapeClient(testStats, nil) + scraper, err := serviceScraperForTest(client) + if err != nil { + t.Fatalf("serviceScraperForTest=%v, want no error", err) + } + + // Make an Endpoints with 0 pods. + endpoints(0, testService) + + stat, err := scraper.Scrape() + if err != nil { + t.Fatalf("scraper.Scrape() returned error: %v", err) + } + if stat != emptyStat { + t.Error("Received unexpected Stat.") + } +} + +func serviceScraperForTest(sClient scrapeClient) (*ServiceScraper, error) { + metric := testMetric() + counter := resources.NewScopedEndpointsCounter(kubeInformer.Core().V1().Endpoints().Lister(), testNamespace, testService) + return newServiceScraperWithClient(metric, counter, sClient) +} + +func testMetric() *av1alpha1.Metric { + return &av1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + Labels: map[string]string{ + serving.RevisionLabelKey: testRevision, + }, + }, + Spec: av1alpha1.MetricSpec{ + ScrapeTarget: testRevision + "-zhudex", + }, + } +} + +func newTestScrapeClient(stats []Stat, errs []error) scrapeClient { + return &fakeScrapeClient{ + stats: stats, + errs: errs, + } +} + +type fakeScrapeClient struct { + i int + stats []Stat + errs []error + mutex sync.Mutex +} + +// Scrape return the next item in the stats and error array of fakeScrapeClient. +func (c *fakeScrapeClient) Scrape(url string) (Stat, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + ans := c.stats[c.i%len(c.stats)] + err := c.errs[c.i%len(c.errs)] + c.i++ + return ans, err +} + +func TestURLFromTarget(t *testing.T) { + if got, want := "http://dance.now:9090/metrics", urlFromTarget("dance", "now"); got != want { + t.Errorf("urlFromTarget = %s, want: %s, diff: %s", got, want, cmp.Diff(got, want)) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/doc.go b/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/doc.go new file mode 100644 index 0000000000..ef2b5397c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +Package statserver provides a WebSocket server which receives autoscaler statistics, typically from queue proxy sidecar +containers, and sends them to a channel. + +*/ +package statserver diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go b/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go new file mode 100644 index 0000000000..3ccc508c38 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go @@ -0,0 +1,205 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package statserver + +import ( + "bytes" + "context" + "encoding/gob" + "net" + "net/http" + "sync" + "time" + + "github.com/gorilla/websocket" + "go.uber.org/zap" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/network" +) + +const closeCodeServiceRestart = 1012 // See https://www.iana.org/assignments/websocket/websocket.xhtml + +// Server receives autoscaler statistics over WebSocket and sends them to a channel. +type Server struct { + addr string + wsSrv http.Server + servingCh chan struct{} + stopCh chan struct{} + statsCh chan<- autoscaler.StatMessage + openClients sync.WaitGroup + logger *zap.SugaredLogger +} + +// New creates a Server which will receive autoscaler statistics and forward them to statsCh until Shutdown is called. +func New(statsServerAddr string, statsCh chan<- autoscaler.StatMessage, logger *zap.SugaredLogger) *Server { + svr := Server{ + addr: statsServerAddr, + servingCh: make(chan struct{}), + stopCh: make(chan struct{}), + statsCh: statsCh, + openClients: sync.WaitGroup{}, + logger: logger.Named("stats-websocket-server").With("address", statsServerAddr), + } + + mux := http.NewServeMux() + mux.HandleFunc("/", svr.Handler) + svr.wsSrv = http.Server{ + Addr: statsServerAddr, + Handler: mux, + ConnState: svr.onConnStateChange, + } + return &svr +} + +func (s *Server) onConnStateChange(conn net.Conn, state http.ConnState) { + if state == http.StateNew { + tcpConn := conn.(*net.TCPConn) + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(3 * time.Minute) + } +} + +// ListenAndServe listens on the address s.addr and handles incoming connections. +// It blocks until the server fails or Shutdown is called. +// It returns an error or, if Shutdown was called, nil. +func (s *Server) ListenAndServe() error { + listener, err := s.listen() + if err != nil { + return err + } + return s.serve(listener) +} + +func (s *Server) listen() (net.Listener, error) { + s.logger.Info("Starting") + return net.Listen("tcp", s.addr) +} + +func (s *Server) serve(l net.Listener) error { + close(s.servingCh) + if err := s.wsSrv.Serve(l); err != http.ErrServerClosed { + return err + } + return nil +} + +func handleHealthz(w http.ResponseWriter, r *http.Request) bool { + if network.IsKubeletProbe(r) { + // As an initial approach, once stats server is up -- return true. + w.WriteHeader(http.StatusOK) + return true + } + return false +} + +// Handler exposes a websocket handler for receiving stats from queue +// sidecar containers. +func (s *Server) Handler(w http.ResponseWriter, r *http.Request) { + s.logger.Debug("Handle entered") + if handleHealthz(w, r) { + return + } + var upgrader websocket.Upgrader + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + s.logger.Errorw("error upgrading websocket", zap.Error(err)) + return + } + + handlerCh := make(chan struct{}) + + s.openClients.Add(1) + go func() { + defer s.openClients.Done() + select { + case <-s.stopCh: + // Send a close message to tell the client to immediately reconnect + s.logger.Debug("Sending close message to client") + err := conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(closeCodeServiceRestart, "Restarting")) + if err != nil { + s.logger.Errorf("Failed to send close message to client: %#v", err) + } + conn.Close() + case <-handlerCh: + s.logger.Debug("Handler exit complete") + } + }() + + s.logger.Debug("Connection upgraded to WebSocket. Entering receive loop.") + + for { + messageType, msg, err := conn.ReadMessage() + if err != nil { + // We close abnormally, because we're just closing the connection in the client, + // which is okay. There's no value delaying closure of the connection unnecessarily. + if websocket.IsCloseError(err, websocket.CloseAbnormalClosure) { + s.logger.Debug("Handler disconnected") + } else { + s.logger.Errorf("Handler exiting on error: %#v", err) + } + close(handlerCh) + return + } + if messageType != websocket.BinaryMessage { + s.logger.Error("Dropping non-binary message.") + continue + } + dec := gob.NewDecoder(bytes.NewBuffer(msg)) + var sm autoscaler.StatMessage + err = dec.Decode(&sm) + if err != nil { + s.logger.Error(err) + continue + } + sm.Stat.Time = time.Now() + + s.logger.Debugf("Received stat message: %+v", sm) + s.statsCh <- sm + } +} + +// Shutdown terminates the server gracefully for the given timeout period and then returns. +func (s *Server) Shutdown(timeout time.Duration) { + <-s.servingCh + s.logger.Info("Shutting down") + + close(s.stopCh) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := s.wsSrv.Shutdown(ctx) + if err != nil { + if err == context.DeadlineExceeded { + s.logger.Warn("Shutdown timed out") + } else { + s.logger.Errorw("Shutdown failed.", zap.Error(err)) + } + } + + done := make(chan struct{}) + go func() { + defer close(done) + s.openClients.Wait() + }() + + // Wait until all client connections have been closed or any remaining timeout expires. + select { + case <-done: + s.logger.Info("Shutdown complete") + case <-ctx.Done(): + s.logger.Warn("Shutdown timed out") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server_test.go b/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server_test.go new file mode 100644 index 0000000000..acd1b2aac2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/statserver/server_test.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package statserver + +import ( + "bytes" + "encoding/gob" + "fmt" + "net" + "net/http" + "net/url" + "runtime" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/gorilla/websocket" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "knative.dev/serving/pkg/autoscaler" + + "k8s.io/apimachinery/pkg/types" +) + +func TestServerLifecycle(t *testing.T) { + statsCh := make(chan autoscaler.StatMessage) + server := newTestServer(statsCh) + + eg := errgroup.Group{} + eg.Go(func() error { + return server.listenAndServe() + }) + + server.listenAddr() + server.Shutdown(time.Second) + + if err := eg.Wait(); err != nil { + t.Error("listenAndServe failed.", err) + } +} + +func TestProbe(t *testing.T) { + statsCh := make(chan autoscaler.StatMessage) + server := newTestServer(statsCh) + + defer server.Shutdown(0) + go server.listenAndServe() + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/healthz", server.listenAddr()), nil) + if err != nil { + t.Fatal("Error creating request:", err) + } + req.Header.Set("User-Agent", "kube-probe/1.15.i.wish") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatal("Error roundtripping:", err) + } + defer resp.Body.Close() + if got, want := resp.StatusCode, http.StatusOK; got != want { + t.Errorf("StatusCode: %v, want: %v", got, want) + } +} + +func TestStatsReceived(t *testing.T) { + statsCh := make(chan autoscaler.StatMessage) + server := newTestServer(statsCh) + + defer server.Shutdown(0) + go server.listenAndServe() + + statSink := dialOK(server.listenAddr(), t) + + assertReceivedOK(newStatMessage(types.NamespacedName{Namespace: "test-namespace", Name: "test-revision"}, "activator1", 2.1, 51), statSink, statsCh, t) + assertReceivedOK(newStatMessage(types.NamespacedName{Namespace: "test-namespace", Name: "test-revision2"}, "activator2", 2.2, 30), statSink, statsCh, t) + + closeSink(statSink, t) +} + +func TestServerShutdown(t *testing.T) { + statsCh := make(chan autoscaler.StatMessage) + server := newTestServer(statsCh) + + go server.listenAndServe() + + listenAddr := server.listenAddr() + statSink := dialOK(listenAddr, t) + + assertReceivedOK(newStatMessage(types.NamespacedName{Namespace: "test-namespace", Name: "test-revision"}, "activator1", 2.1, 51), statSink, statsCh, t) + + server.Shutdown(time.Second) + // We own the channel. + close(statsCh) + + // Send a statistic to the server + send(statSink, newStatMessage(types.NamespacedName{Namespace: "test-namespace", Name: "test-revision2"}, "activator2", 2.2, 30), t) + + // Check the statistic was not received + _, ok := <-statsCh + if ok { + t.Fatal("Received statistic after shutdown") + } + + // Check connection has been closed with a close control message with a "service restart" close code + if _, _, err := statSink.NextReader(); err == nil { + t.Fatal("Connection not closed") + } else { + err, ok := err.(*websocket.CloseError) + if !ok { + t.Fatal("CloseError not received") + } + if err.Code != 1012 { + t.Fatalf("CloseError with unexpected close code %d received", err.Code) + } + } + + // Check that new connections are refused with some error + if _, err := dial(listenAddr, t); err == nil { + t.Fatal("Connection not refused") + } + + closeSink(statSink, t) +} + +func TestServerDoesNotLeakGoroutines(t *testing.T) { + statsCh := make(chan autoscaler.StatMessage) + server := newTestServer(statsCh) + + go server.listenAndServe() + + originalGoroutines := runtime.NumGoroutine() + + listenAddr := server.listenAddr() + statSink := dialOK(listenAddr, t) + + assertReceivedOK(newStatMessage(types.NamespacedName{Namespace: "test-namespace", Name: "test-revision"}, "activator1", 2.1, 51), statSink, statsCh, t) + + closeSink(statSink, t) + + // Check the number of goroutines eventually reduces to the number there were before the connection was created + for i := 1000; i >= 0; i-- { + currentGoRoutines := runtime.NumGoroutine() + if currentGoRoutines <= originalGoroutines { + break + } + time.Sleep(5 * time.Millisecond) + if i == 0 { + t.Fatalf("Current number of goroutines %d is not equal to the original number %d", currentGoRoutines, originalGoroutines) + } + } + + server.Shutdown(time.Second) +} + +func newStatMessage(revKey types.NamespacedName, podName string, averageConcurrentRequests float64, requestCount float64) autoscaler.StatMessage { + return autoscaler.StatMessage{ + Key: revKey, + Stat: autoscaler.Stat{ + PodName: podName, + AverageConcurrentRequests: averageConcurrentRequests, + RequestCount: requestCount, + }, + } +} + +func assertReceivedOK(sm autoscaler.StatMessage, statSink *websocket.Conn, statsCh <-chan autoscaler.StatMessage, t *testing.T) bool { + send(statSink, sm, t) + recv, ok := <-statsCh + if !ok { + t.Fatalf("statistic not received") + } + if recv.Stat.Time == (time.Time{}) { + t.Fatalf("Stat time is nil") + } + ignoreTimeField := cmpopts.IgnoreFields(autoscaler.StatMessage{}, "Stat.Time") + if !cmp.Equal(sm, recv, ignoreTimeField) { + t.Fatalf("StatMessage mismatch: diff (-got, +want) %s", cmp.Diff(recv, sm, ignoreTimeField)) + } + return true +} + +func dialOK(serverURL string, t *testing.T) *websocket.Conn { + statSink, err := dial(serverURL, t) + if err != nil { + t.Fatal("Dial failed:", err) + } + return statSink +} + +func dial(serverURL string, t *testing.T) (*websocket.Conn, error) { + u, err := url.Parse(serverURL) + if err != nil { + t.Fatal(err) + } + u.Scheme = "ws" + + dialer := &websocket.Dialer{ + HandshakeTimeout: time.Second, + } + statSink, _, err := dialer.Dial(u.String(), nil) + return statSink, err +} + +func send(statSink *websocket.Conn, sm autoscaler.StatMessage, t *testing.T) { + var b bytes.Buffer + enc := gob.NewEncoder(&b) + + if err := enc.Encode(sm); err != nil { + t.Fatal("Failed to encode data from stats channel:", err) + } + if err := statSink.WriteMessage(websocket.BinaryMessage, b.Bytes()); err != nil { + t.Fatal("Failed to write to stat sink:", err) + } +} + +func closeSink(statSink *websocket.Conn, t *testing.T) { + if err := statSink.Close(); err != nil { + t.Fatal("Failed to close", err) + } +} + +const testAddress = "127.0.0.1:0" + +type testServer struct { + *Server + listenAddrCh chan string +} + +func newTestServer(statsCh chan<- autoscaler.StatMessage) *testServer { + return &testServer{ + Server: New(testAddress, statsCh, zap.NewNop().Sugar()), + listenAddrCh: make(chan string, 1), + } +} + +// listenAddr returns the address on which the server is listening. Blocks until listenAndServe is called. +func (s *testServer) listenAddr() string { + return <-s.listenAddrCh +} + +func (s *testServer) listenAndServe() error { + listener, err := s.listen() + if err != nil { + return err + } + return s.serve(&testListener{listener, s.listenAddrCh}) +} + +type testListener struct { + net.Listener + listenAddr chan string +} + +func (t *testListener) Accept() (net.Conn, error) { + t.listenAddr <- "http://" + t.Listener.Addr().String() + return t.Listener.Accept() +} diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/testdata/config-autoscaler.yaml b/test/vendor/knative.dev/serving/pkg/autoscaler/testdata/config-autoscaler.yaml new file mode 120000 index 0000000000..a57d715c10 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/testdata/config-autoscaler.yaml @@ -0,0 +1 @@ +../../../config/core/configmaps/autoscaler.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/autoscaler/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/autoscaler/zz_generated.deepcopy.go new file mode 100644 index 0000000000..b6e745ab0a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/autoscaler/zz_generated.deepcopy.go @@ -0,0 +1,56 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package autoscaler + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Decider) DeepCopyInto(out *Decider) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Decider. +func (in *Decider) DeepCopy() *Decider { + if in == nil { + return nil + } + out := new(Decider) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/clientset.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/clientset.go new file mode 100644 index 0000000000..dead602a48 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/clientset.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + acmev1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2" + certmanagerv1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AcmeV1alpha2() acmev1alpha2.AcmeV1alpha2Interface + CertmanagerV1alpha2() certmanagerv1alpha2.CertmanagerV1alpha2Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + acmeV1alpha2 *acmev1alpha2.AcmeV1alpha2Client + certmanagerV1alpha2 *certmanagerv1alpha2.CertmanagerV1alpha2Client +} + +// AcmeV1alpha2 retrieves the AcmeV1alpha2Client +func (c *Clientset) AcmeV1alpha2() acmev1alpha2.AcmeV1alpha2Interface { + return c.acmeV1alpha2 +} + +// CertmanagerV1alpha2 retrieves the CertmanagerV1alpha2Client +func (c *Clientset) CertmanagerV1alpha2() certmanagerv1alpha2.CertmanagerV1alpha2Interface { + return c.certmanagerV1alpha2 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.acmeV1alpha2, err = acmev1alpha2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.certmanagerV1alpha2, err = certmanagerv1alpha2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.acmeV1alpha2 = acmev1alpha2.NewForConfigOrDie(c) + cs.certmanagerV1alpha2 = certmanagerv1alpha2.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.acmeV1alpha2 = acmev1alpha2.New(c) + cs.certmanagerV1alpha2 = certmanagerv1alpha2.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/doc.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/doc.go rename to test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/doc.go index 1122e50bfc..e48c2aa446 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/doc.go +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..0b2d7df4b0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + acmev1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2" + fakeacmev1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake" + certmanagerv1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2" + fakecertmanagerv1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// AcmeV1alpha2 retrieves the AcmeV1alpha2Client +func (c *Clientset) AcmeV1alpha2() acmev1alpha2.AcmeV1alpha2Interface { + return &fakeacmev1alpha2.FakeAcmeV1alpha2{Fake: &c.Fake} +} + +// CertmanagerV1alpha2 retrieves the CertmanagerV1alpha2Client +func (c *Clientset) CertmanagerV1alpha2() certmanagerv1alpha2.CertmanagerV1alpha2Interface { + return &fakecertmanagerv1alpha2.FakeCertmanagerV1alpha2{Fake: &c.Fake} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..2c4903250c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/register.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..3b5495b2f1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + acmev1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + acmev1alpha2.AddToScheme, + certmanagerv1alpha2.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/scheme/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/doc.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/scheme/doc.go rename to test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/doc.go index 7d76538485..7acc2dcf25 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/scheme/doc.go +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/register.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..fd43e5eb84 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + acmev1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + acmev1alpha2.AddToScheme, + certmanagerv1alpha2.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/acme_client.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/acme_client.go new file mode 100644 index 0000000000..497a49e8bb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/acme_client.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + rest "k8s.io/client-go/rest" + "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +type AcmeV1alpha2Interface interface { + RESTClient() rest.Interface + ChallengesGetter + OrdersGetter +} + +// AcmeV1alpha2Client is used to interact with features provided by the acme.cert-manager.io group. +type AcmeV1alpha2Client struct { + restClient rest.Interface +} + +func (c *AcmeV1alpha2Client) Challenges(namespace string) ChallengeInterface { + return newChallenges(c, namespace) +} + +func (c *AcmeV1alpha2Client) Orders(namespace string) OrderInterface { + return newOrders(c, namespace) +} + +// NewForConfig creates a new AcmeV1alpha2Client for the given config. +func NewForConfig(c *rest.Config) (*AcmeV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AcmeV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new AcmeV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AcmeV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AcmeV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *AcmeV1alpha2Client { + return &AcmeV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AcmeV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/challenge.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/challenge.go new file mode 100644 index 0000000000..fb2c560533 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/challenge.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +// ChallengesGetter has a method to return a ChallengeInterface. +// A group's client should implement this interface. +type ChallengesGetter interface { + Challenges(namespace string) ChallengeInterface +} + +// ChallengeInterface has methods to work with Challenge resources. +type ChallengeInterface interface { + Create(*v1alpha2.Challenge) (*v1alpha2.Challenge, error) + Update(*v1alpha2.Challenge) (*v1alpha2.Challenge, error) + UpdateStatus(*v1alpha2.Challenge) (*v1alpha2.Challenge, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.Challenge, error) + List(opts v1.ListOptions) (*v1alpha2.ChallengeList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Challenge, err error) + ChallengeExpansion +} + +// challenges implements ChallengeInterface +type challenges struct { + client rest.Interface + ns string +} + +// newChallenges returns a Challenges +func newChallenges(c *AcmeV1alpha2Client, namespace string) *challenges { + return &challenges{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the challenge, and returns the corresponding challenge object, and an error if there is any. +func (c *challenges) Get(name string, options v1.GetOptions) (result *v1alpha2.Challenge, err error) { + result = &v1alpha2.Challenge{} + err = c.client.Get(). + Namespace(c.ns). + Resource("challenges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Challenges that match those selectors. +func (c *challenges) List(opts v1.ListOptions) (result *v1alpha2.ChallengeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.ChallengeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested challenges. +func (c *challenges) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a challenge and creates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *challenges) Create(challenge *v1alpha2.Challenge) (result *v1alpha2.Challenge, err error) { + result = &v1alpha2.Challenge{} + err = c.client.Post(). + Namespace(c.ns). + Resource("challenges"). + Body(challenge). + Do(). + Into(result) + return +} + +// Update takes the representation of a challenge and updates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *challenges) Update(challenge *v1alpha2.Challenge) (result *v1alpha2.Challenge, err error) { + result = &v1alpha2.Challenge{} + err = c.client.Put(). + Namespace(c.ns). + Resource("challenges"). + Name(challenge.Name). + Body(challenge). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *challenges) UpdateStatus(challenge *v1alpha2.Challenge) (result *v1alpha2.Challenge, err error) { + result = &v1alpha2.Challenge{} + err = c.client.Put(). + Namespace(c.ns). + Resource("challenges"). + Name(challenge.Name). + SubResource("status"). + Body(challenge). + Do(). + Into(result) + return +} + +// Delete takes name of the challenge and deletes it. Returns an error if one occurs. +func (c *challenges) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("challenges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *challenges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("challenges"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched challenge. +func (c *challenges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Challenge, err error) { + result = &v1alpha2.Challenge{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("challenges"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/doc.go new file mode 100644 index 0000000000..1488f82d14 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_acme_client.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_acme_client.go new file mode 100644 index 0000000000..cd5fb0a5c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_acme_client.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2" +) + +type FakeAcmeV1alpha2 struct { + *testing.Fake +} + +func (c *FakeAcmeV1alpha2) Challenges(namespace string) v1alpha2.ChallengeInterface { + return &FakeChallenges{c, namespace} +} + +func (c *FakeAcmeV1alpha2) Orders(namespace string) v1alpha2.OrderInterface { + return &FakeOrders{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAcmeV1alpha2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_challenge.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_challenge.go new file mode 100644 index 0000000000..9f0aff4a4a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_challenge.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeChallenges implements ChallengeInterface +type FakeChallenges struct { + Fake *FakeAcmeV1alpha2 + ns string +} + +var challengesResource = schema.GroupVersionResource{Group: "acme.cert-manager.io", Version: "v1alpha2", Resource: "challenges"} + +var challengesKind = schema.GroupVersionKind{Group: "acme.cert-manager.io", Version: "v1alpha2", Kind: "Challenge"} + +// Get takes name of the challenge, and returns the corresponding challenge object, and an error if there is any. +func (c *FakeChallenges) Get(name string, options v1.GetOptions) (result *v1alpha2.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(challengesResource, c.ns, name), &v1alpha2.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Challenge), err +} + +// List takes label and field selectors, and returns the list of Challenges that match those selectors. +func (c *FakeChallenges) List(opts v1.ListOptions) (result *v1alpha2.ChallengeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(challengesResource, challengesKind, c.ns, opts), &v1alpha2.ChallengeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.ChallengeList{ListMeta: obj.(*v1alpha2.ChallengeList).ListMeta} + for _, item := range obj.(*v1alpha2.ChallengeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested challenges. +func (c *FakeChallenges) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(challengesResource, c.ns, opts)) + +} + +// Create takes the representation of a challenge and creates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *FakeChallenges) Create(challenge *v1alpha2.Challenge) (result *v1alpha2.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(challengesResource, c.ns, challenge), &v1alpha2.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Challenge), err +} + +// Update takes the representation of a challenge and updates it. Returns the server's representation of the challenge, and an error, if there is any. +func (c *FakeChallenges) Update(challenge *v1alpha2.Challenge) (result *v1alpha2.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(challengesResource, c.ns, challenge), &v1alpha2.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Challenge), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeChallenges) UpdateStatus(challenge *v1alpha2.Challenge) (*v1alpha2.Challenge, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(challengesResource, "status", c.ns, challenge), &v1alpha2.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Challenge), err +} + +// Delete takes name of the challenge and deletes it. Returns an error if one occurs. +func (c *FakeChallenges) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(challengesResource, c.ns, name), &v1alpha2.Challenge{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeChallenges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(challengesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.ChallengeList{}) + return err +} + +// Patch applies the patch and returns the patched challenge. +func (c *FakeChallenges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Challenge, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(challengesResource, c.ns, name, pt, data, subresources...), &v1alpha2.Challenge{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Challenge), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_order.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_order.go new file mode 100644 index 0000000000..9d9caedd29 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/fake/fake_order.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOrders implements OrderInterface +type FakeOrders struct { + Fake *FakeAcmeV1alpha2 + ns string +} + +var ordersResource = schema.GroupVersionResource{Group: "acme.cert-manager.io", Version: "v1alpha2", Resource: "orders"} + +var ordersKind = schema.GroupVersionKind{Group: "acme.cert-manager.io", Version: "v1alpha2", Kind: "Order"} + +// Get takes name of the order, and returns the corresponding order object, and an error if there is any. +func (c *FakeOrders) Get(name string, options v1.GetOptions) (result *v1alpha2.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ordersResource, c.ns, name), &v1alpha2.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Order), err +} + +// List takes label and field selectors, and returns the list of Orders that match those selectors. +func (c *FakeOrders) List(opts v1.ListOptions) (result *v1alpha2.OrderList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ordersResource, ordersKind, c.ns, opts), &v1alpha2.OrderList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.OrderList{ListMeta: obj.(*v1alpha2.OrderList).ListMeta} + for _, item := range obj.(*v1alpha2.OrderList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested orders. +func (c *FakeOrders) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ordersResource, c.ns, opts)) + +} + +// Create takes the representation of a order and creates it. Returns the server's representation of the order, and an error, if there is any. +func (c *FakeOrders) Create(order *v1alpha2.Order) (result *v1alpha2.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ordersResource, c.ns, order), &v1alpha2.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Order), err +} + +// Update takes the representation of a order and updates it. Returns the server's representation of the order, and an error, if there is any. +func (c *FakeOrders) Update(order *v1alpha2.Order) (result *v1alpha2.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ordersResource, c.ns, order), &v1alpha2.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Order), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOrders) UpdateStatus(order *v1alpha2.Order) (*v1alpha2.Order, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ordersResource, "status", c.ns, order), &v1alpha2.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Order), err +} + +// Delete takes name of the order and deletes it. Returns an error if one occurs. +func (c *FakeOrders) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(ordersResource, c.ns, name), &v1alpha2.Order{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOrders) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ordersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.OrderList{}) + return err +} + +// Patch applies the patch and returns the patched order. +func (c *FakeOrders) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Order, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ordersResource, c.ns, name, pt, data, subresources...), &v1alpha2.Order{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Order), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/generated_expansion.go new file mode 100644 index 0000000000..4345d326b7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type ChallengeExpansion interface{} + +type OrderExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/order.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/order.go new file mode 100644 index 0000000000..3fd2cf3c1b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/acme/v1alpha2/order.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +// OrdersGetter has a method to return a OrderInterface. +// A group's client should implement this interface. +type OrdersGetter interface { + Orders(namespace string) OrderInterface +} + +// OrderInterface has methods to work with Order resources. +type OrderInterface interface { + Create(*v1alpha2.Order) (*v1alpha2.Order, error) + Update(*v1alpha2.Order) (*v1alpha2.Order, error) + UpdateStatus(*v1alpha2.Order) (*v1alpha2.Order, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.Order, error) + List(opts v1.ListOptions) (*v1alpha2.OrderList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Order, err error) + OrderExpansion +} + +// orders implements OrderInterface +type orders struct { + client rest.Interface + ns string +} + +// newOrders returns a Orders +func newOrders(c *AcmeV1alpha2Client, namespace string) *orders { + return &orders{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the order, and returns the corresponding order object, and an error if there is any. +func (c *orders) Get(name string, options v1.GetOptions) (result *v1alpha2.Order, err error) { + result = &v1alpha2.Order{} + err = c.client.Get(). + Namespace(c.ns). + Resource("orders"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Orders that match those selectors. +func (c *orders) List(opts v1.ListOptions) (result *v1alpha2.OrderList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.OrderList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested orders. +func (c *orders) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a order and creates it. Returns the server's representation of the order, and an error, if there is any. +func (c *orders) Create(order *v1alpha2.Order) (result *v1alpha2.Order, err error) { + result = &v1alpha2.Order{} + err = c.client.Post(). + Namespace(c.ns). + Resource("orders"). + Body(order). + Do(). + Into(result) + return +} + +// Update takes the representation of a order and updates it. Returns the server's representation of the order, and an error, if there is any. +func (c *orders) Update(order *v1alpha2.Order) (result *v1alpha2.Order, err error) { + result = &v1alpha2.Order{} + err = c.client.Put(). + Namespace(c.ns). + Resource("orders"). + Name(order.Name). + Body(order). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *orders) UpdateStatus(order *v1alpha2.Order) (result *v1alpha2.Order, err error) { + result = &v1alpha2.Order{} + err = c.client.Put(). + Namespace(c.ns). + Resource("orders"). + Name(order.Name). + SubResource("status"). + Body(order). + Do(). + Into(result) + return +} + +// Delete takes name of the order and deletes it. Returns an error if one occurs. +func (c *orders) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("orders"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *orders) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("orders"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched order. +func (c *orders) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Order, err error) { + result = &v1alpha2.Order{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("orders"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificate.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificate.go new file mode 100644 index 0000000000..42aac812fa --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificate.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +// CertificatesGetter has a method to return a CertificateInterface. +// A group's client should implement this interface. +type CertificatesGetter interface { + Certificates(namespace string) CertificateInterface +} + +// CertificateInterface has methods to work with Certificate resources. +type CertificateInterface interface { + Create(*v1alpha2.Certificate) (*v1alpha2.Certificate, error) + Update(*v1alpha2.Certificate) (*v1alpha2.Certificate, error) + UpdateStatus(*v1alpha2.Certificate) (*v1alpha2.Certificate, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.Certificate, error) + List(opts v1.ListOptions) (*v1alpha2.CertificateList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Certificate, err error) + CertificateExpansion +} + +// certificates implements CertificateInterface +type certificates struct { + client rest.Interface + ns string +} + +// newCertificates returns a Certificates +func newCertificates(c *CertmanagerV1alpha2Client, namespace string) *certificates { + return &certificates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificate, and returns the corresponding certificate object, and an error if there is any. +func (c *certificates) Get(name string, options v1.GetOptions) (result *v1alpha2.Certificate, err error) { + result = &v1alpha2.Certificate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Certificates that match those selectors. +func (c *certificates) List(opts v1.ListOptions) (result *v1alpha2.CertificateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.CertificateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificates. +func (c *certificates) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a certificate and creates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *certificates) Create(certificate *v1alpha2.Certificate) (result *v1alpha2.Certificate, err error) { + result = &v1alpha2.Certificate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificates"). + Body(certificate). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificate and updates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *certificates) Update(certificate *v1alpha2.Certificate) (result *v1alpha2.Certificate, err error) { + result = &v1alpha2.Certificate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificates"). + Name(certificate.Name). + Body(certificate). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *certificates) UpdateStatus(certificate *v1alpha2.Certificate) (result *v1alpha2.Certificate, err error) { + result = &v1alpha2.Certificate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificates"). + Name(certificate.Name). + SubResource("status"). + Body(certificate). + Do(). + Into(result) + return +} + +// Delete takes name of the certificate and deletes it. Returns an error if one occurs. +func (c *certificates) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched certificate. +func (c *certificates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Certificate, err error) { + result = &v1alpha2.Certificate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificaterequest.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificaterequest.go new file mode 100644 index 0000000000..f5a276e730 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certificaterequest.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +// CertificateRequestsGetter has a method to return a CertificateRequestInterface. +// A group's client should implement this interface. +type CertificateRequestsGetter interface { + CertificateRequests(namespace string) CertificateRequestInterface +} + +// CertificateRequestInterface has methods to work with CertificateRequest resources. +type CertificateRequestInterface interface { + Create(*v1alpha2.CertificateRequest) (*v1alpha2.CertificateRequest, error) + Update(*v1alpha2.CertificateRequest) (*v1alpha2.CertificateRequest, error) + UpdateStatus(*v1alpha2.CertificateRequest) (*v1alpha2.CertificateRequest, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.CertificateRequest, error) + List(opts v1.ListOptions) (*v1alpha2.CertificateRequestList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.CertificateRequest, err error) + CertificateRequestExpansion +} + +// certificateRequests implements CertificateRequestInterface +type certificateRequests struct { + client rest.Interface + ns string +} + +// newCertificateRequests returns a CertificateRequests +func newCertificateRequests(c *CertmanagerV1alpha2Client, namespace string) *certificateRequests { + return &certificateRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificateRequest, and returns the corresponding certificateRequest object, and an error if there is any. +func (c *certificateRequests) Get(name string, options v1.GetOptions) (result *v1alpha2.CertificateRequest, err error) { + result = &v1alpha2.CertificateRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateRequests that match those selectors. +func (c *certificateRequests) List(opts v1.ListOptions) (result *v1alpha2.CertificateRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.CertificateRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateRequests. +func (c *certificateRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a certificateRequest and creates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *certificateRequests) Create(certificateRequest *v1alpha2.CertificateRequest) (result *v1alpha2.CertificateRequest, err error) { + result = &v1alpha2.CertificateRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificaterequests"). + Body(certificateRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificateRequest and updates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *certificateRequests) Update(certificateRequest *v1alpha2.CertificateRequest) (result *v1alpha2.CertificateRequest, err error) { + result = &v1alpha2.CertificateRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(certificateRequest.Name). + Body(certificateRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *certificateRequests) UpdateStatus(certificateRequest *v1alpha2.CertificateRequest) (result *v1alpha2.CertificateRequest, err error) { + result = &v1alpha2.CertificateRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(certificateRequest.Name). + SubResource("status"). + Body(certificateRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the certificateRequest and deletes it. Returns an error if one occurs. +func (c *certificateRequests) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificaterequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificaterequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched certificateRequest. +func (c *certificateRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.CertificateRequest, err error) { + result = &v1alpha2.CertificateRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificaterequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certmanager_client.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certmanager_client.go new file mode 100644 index 0000000000..9fcaa81777 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/certmanager_client.go @@ -0,0 +1,104 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + rest "k8s.io/client-go/rest" + "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +type CertmanagerV1alpha2Interface interface { + RESTClient() rest.Interface + CertificatesGetter + CertificateRequestsGetter + ClusterIssuersGetter + IssuersGetter +} + +// CertmanagerV1alpha2Client is used to interact with features provided by the cert-manager.io group. +type CertmanagerV1alpha2Client struct { + restClient rest.Interface +} + +func (c *CertmanagerV1alpha2Client) Certificates(namespace string) CertificateInterface { + return newCertificates(c, namespace) +} + +func (c *CertmanagerV1alpha2Client) CertificateRequests(namespace string) CertificateRequestInterface { + return newCertificateRequests(c, namespace) +} + +func (c *CertmanagerV1alpha2Client) ClusterIssuers() ClusterIssuerInterface { + return newClusterIssuers(c) +} + +func (c *CertmanagerV1alpha2Client) Issuers(namespace string) IssuerInterface { + return newIssuers(c, namespace) +} + +// NewForConfig creates a new CertmanagerV1alpha2Client for the given config. +func NewForConfig(c *rest.Config) (*CertmanagerV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CertmanagerV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new CertmanagerV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertmanagerV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertmanagerV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *CertmanagerV1alpha2Client { + return &CertmanagerV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertmanagerV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/clusterissuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/clusterissuer.go new file mode 100644 index 0000000000..728471b0ef --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/clusterissuer.go @@ -0,0 +1,180 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +// ClusterIssuersGetter has a method to return a ClusterIssuerInterface. +// A group's client should implement this interface. +type ClusterIssuersGetter interface { + ClusterIssuers() ClusterIssuerInterface +} + +// ClusterIssuerInterface has methods to work with ClusterIssuer resources. +type ClusterIssuerInterface interface { + Create(*v1alpha2.ClusterIssuer) (*v1alpha2.ClusterIssuer, error) + Update(*v1alpha2.ClusterIssuer) (*v1alpha2.ClusterIssuer, error) + UpdateStatus(*v1alpha2.ClusterIssuer) (*v1alpha2.ClusterIssuer, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.ClusterIssuer, error) + List(opts v1.ListOptions) (*v1alpha2.ClusterIssuerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.ClusterIssuer, err error) + ClusterIssuerExpansion +} + +// clusterIssuers implements ClusterIssuerInterface +type clusterIssuers struct { + client rest.Interface +} + +// newClusterIssuers returns a ClusterIssuers +func newClusterIssuers(c *CertmanagerV1alpha2Client) *clusterIssuers { + return &clusterIssuers{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterIssuer, and returns the corresponding clusterIssuer object, and an error if there is any. +func (c *clusterIssuers) Get(name string, options v1.GetOptions) (result *v1alpha2.ClusterIssuer, err error) { + result = &v1alpha2.ClusterIssuer{} + err = c.client.Get(). + Resource("clusterissuers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterIssuers that match those selectors. +func (c *clusterIssuers) List(opts v1.ListOptions) (result *v1alpha2.ClusterIssuerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.ClusterIssuerList{} + err = c.client.Get(). + Resource("clusterissuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterIssuers. +func (c *clusterIssuers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterissuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterIssuer and creates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *clusterIssuers) Create(clusterIssuer *v1alpha2.ClusterIssuer) (result *v1alpha2.ClusterIssuer, err error) { + result = &v1alpha2.ClusterIssuer{} + err = c.client.Post(). + Resource("clusterissuers"). + Body(clusterIssuer). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterIssuer and updates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *clusterIssuers) Update(clusterIssuer *v1alpha2.ClusterIssuer) (result *v1alpha2.ClusterIssuer, err error) { + result = &v1alpha2.ClusterIssuer{} + err = c.client.Put(). + Resource("clusterissuers"). + Name(clusterIssuer.Name). + Body(clusterIssuer). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *clusterIssuers) UpdateStatus(clusterIssuer *v1alpha2.ClusterIssuer) (result *v1alpha2.ClusterIssuer, err error) { + result = &v1alpha2.ClusterIssuer{} + err = c.client.Put(). + Resource("clusterissuers"). + Name(clusterIssuer.Name). + SubResource("status"). + Body(clusterIssuer). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterIssuer and deletes it. Returns an error if one occurs. +func (c *clusterIssuers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterissuers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterIssuers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterissuers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterIssuer. +func (c *clusterIssuers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.ClusterIssuer, err error) { + result = &v1alpha2.ClusterIssuer{} + err = c.client.Patch(pt). + Resource("clusterissuers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/doc.go new file mode 100644 index 0000000000..1488f82d14 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificate.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificate.go new file mode 100644 index 0000000000..052c8719ac --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificate.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificates implements CertificateInterface +type FakeCertificates struct { + Fake *FakeCertmanagerV1alpha2 + ns string +} + +var certificatesResource = schema.GroupVersionResource{Group: "cert-manager.io", Version: "v1alpha2", Resource: "certificates"} + +var certificatesKind = schema.GroupVersionKind{Group: "cert-manager.io", Version: "v1alpha2", Kind: "Certificate"} + +// Get takes name of the certificate, and returns the corresponding certificate object, and an error if there is any. +func (c *FakeCertificates) Get(name string, options v1.GetOptions) (result *v1alpha2.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificatesResource, c.ns, name), &v1alpha2.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Certificate), err +} + +// List takes label and field selectors, and returns the list of Certificates that match those selectors. +func (c *FakeCertificates) List(opts v1.ListOptions) (result *v1alpha2.CertificateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificatesResource, certificatesKind, c.ns, opts), &v1alpha2.CertificateList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.CertificateList{ListMeta: obj.(*v1alpha2.CertificateList).ListMeta} + for _, item := range obj.(*v1alpha2.CertificateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificates. +func (c *FakeCertificates) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificatesResource, c.ns, opts)) + +} + +// Create takes the representation of a certificate and creates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *FakeCertificates) Create(certificate *v1alpha2.Certificate) (result *v1alpha2.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificatesResource, c.ns, certificate), &v1alpha2.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Certificate), err +} + +// Update takes the representation of a certificate and updates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *FakeCertificates) Update(certificate *v1alpha2.Certificate) (result *v1alpha2.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificatesResource, c.ns, certificate), &v1alpha2.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Certificate), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificates) UpdateStatus(certificate *v1alpha2.Certificate) (*v1alpha2.Certificate, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificatesResource, "status", c.ns, certificate), &v1alpha2.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Certificate), err +} + +// Delete takes name of the certificate and deletes it. Returns an error if one occurs. +func (c *FakeCertificates) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(certificatesResource, c.ns, name), &v1alpha2.Certificate{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.CertificateList{}) + return err +} + +// Patch applies the patch and returns the patched certificate. +func (c *FakeCertificates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesResource, c.ns, name, pt, data, subresources...), &v1alpha2.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Certificate), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificaterequest.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificaterequest.go new file mode 100644 index 0000000000..9327821d6a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certificaterequest.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificateRequests implements CertificateRequestInterface +type FakeCertificateRequests struct { + Fake *FakeCertmanagerV1alpha2 + ns string +} + +var certificaterequestsResource = schema.GroupVersionResource{Group: "cert-manager.io", Version: "v1alpha2", Resource: "certificaterequests"} + +var certificaterequestsKind = schema.GroupVersionKind{Group: "cert-manager.io", Version: "v1alpha2", Kind: "CertificateRequest"} + +// Get takes name of the certificateRequest, and returns the corresponding certificateRequest object, and an error if there is any. +func (c *FakeCertificateRequests) Get(name string, options v1.GetOptions) (result *v1alpha2.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificaterequestsResource, c.ns, name), &v1alpha2.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.CertificateRequest), err +} + +// List takes label and field selectors, and returns the list of CertificateRequests that match those selectors. +func (c *FakeCertificateRequests) List(opts v1.ListOptions) (result *v1alpha2.CertificateRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificaterequestsResource, certificaterequestsKind, c.ns, opts), &v1alpha2.CertificateRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.CertificateRequestList{ListMeta: obj.(*v1alpha2.CertificateRequestList).ListMeta} + for _, item := range obj.(*v1alpha2.CertificateRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateRequests. +func (c *FakeCertificateRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificaterequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a certificateRequest and creates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *FakeCertificateRequests) Create(certificateRequest *v1alpha2.CertificateRequest) (result *v1alpha2.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificaterequestsResource, c.ns, certificateRequest), &v1alpha2.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.CertificateRequest), err +} + +// Update takes the representation of a certificateRequest and updates it. Returns the server's representation of the certificateRequest, and an error, if there is any. +func (c *FakeCertificateRequests) Update(certificateRequest *v1alpha2.CertificateRequest) (result *v1alpha2.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificaterequestsResource, c.ns, certificateRequest), &v1alpha2.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.CertificateRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificateRequests) UpdateStatus(certificateRequest *v1alpha2.CertificateRequest) (*v1alpha2.CertificateRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificaterequestsResource, "status", c.ns, certificateRequest), &v1alpha2.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.CertificateRequest), err +} + +// Delete takes name of the certificateRequest and deletes it. Returns an error if one occurs. +func (c *FakeCertificateRequests) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(certificaterequestsResource, c.ns, name), &v1alpha2.CertificateRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificateRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificaterequestsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.CertificateRequestList{}) + return err +} + +// Patch applies the patch and returns the patched certificateRequest. +func (c *FakeCertificateRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.CertificateRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificaterequestsResource, c.ns, name, pt, data, subresources...), &v1alpha2.CertificateRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.CertificateRequest), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certmanager_client.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certmanager_client.go new file mode 100644 index 0000000000..a47cc490d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_certmanager_client.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2" +) + +type FakeCertmanagerV1alpha2 struct { + *testing.Fake +} + +func (c *FakeCertmanagerV1alpha2) Certificates(namespace string) v1alpha2.CertificateInterface { + return &FakeCertificates{c, namespace} +} + +func (c *FakeCertmanagerV1alpha2) CertificateRequests(namespace string) v1alpha2.CertificateRequestInterface { + return &FakeCertificateRequests{c, namespace} +} + +func (c *FakeCertmanagerV1alpha2) ClusterIssuers() v1alpha2.ClusterIssuerInterface { + return &FakeClusterIssuers{c} +} + +func (c *FakeCertmanagerV1alpha2) Issuers(namespace string) v1alpha2.IssuerInterface { + return &FakeIssuers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCertmanagerV1alpha2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_clusterissuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_clusterissuer.go new file mode 100644 index 0000000000..c3adc64ef8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_clusterissuer.go @@ -0,0 +1,131 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterIssuers implements ClusterIssuerInterface +type FakeClusterIssuers struct { + Fake *FakeCertmanagerV1alpha2 +} + +var clusterissuersResource = schema.GroupVersionResource{Group: "cert-manager.io", Version: "v1alpha2", Resource: "clusterissuers"} + +var clusterissuersKind = schema.GroupVersionKind{Group: "cert-manager.io", Version: "v1alpha2", Kind: "ClusterIssuer"} + +// Get takes name of the clusterIssuer, and returns the corresponding clusterIssuer object, and an error if there is any. +func (c *FakeClusterIssuers) Get(name string, options v1.GetOptions) (result *v1alpha2.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterissuersResource, name), &v1alpha2.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ClusterIssuer), err +} + +// List takes label and field selectors, and returns the list of ClusterIssuers that match those selectors. +func (c *FakeClusterIssuers) List(opts v1.ListOptions) (result *v1alpha2.ClusterIssuerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterissuersResource, clusterissuersKind, opts), &v1alpha2.ClusterIssuerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.ClusterIssuerList{ListMeta: obj.(*v1alpha2.ClusterIssuerList).ListMeta} + for _, item := range obj.(*v1alpha2.ClusterIssuerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterIssuers. +func (c *FakeClusterIssuers) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterissuersResource, opts)) +} + +// Create takes the representation of a clusterIssuer and creates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *FakeClusterIssuers) Create(clusterIssuer *v1alpha2.ClusterIssuer) (result *v1alpha2.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterissuersResource, clusterIssuer), &v1alpha2.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ClusterIssuer), err +} + +// Update takes the representation of a clusterIssuer and updates it. Returns the server's representation of the clusterIssuer, and an error, if there is any. +func (c *FakeClusterIssuers) Update(clusterIssuer *v1alpha2.ClusterIssuer) (result *v1alpha2.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterissuersResource, clusterIssuer), &v1alpha2.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ClusterIssuer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterIssuers) UpdateStatus(clusterIssuer *v1alpha2.ClusterIssuer) (*v1alpha2.ClusterIssuer, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clusterissuersResource, "status", clusterIssuer), &v1alpha2.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ClusterIssuer), err +} + +// Delete takes name of the clusterIssuer and deletes it. Returns an error if one occurs. +func (c *FakeClusterIssuers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterissuersResource, name), &v1alpha2.ClusterIssuer{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterIssuers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterissuersResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.ClusterIssuerList{}) + return err +} + +// Patch applies the patch and returns the patched clusterIssuer. +func (c *FakeClusterIssuers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.ClusterIssuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterissuersResource, name, pt, data, subresources...), &v1alpha2.ClusterIssuer{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.ClusterIssuer), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_issuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_issuer.go new file mode 100644 index 0000000000..da9df2c255 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/fake/fake_issuer.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeIssuers implements IssuerInterface +type FakeIssuers struct { + Fake *FakeCertmanagerV1alpha2 + ns string +} + +var issuersResource = schema.GroupVersionResource{Group: "cert-manager.io", Version: "v1alpha2", Resource: "issuers"} + +var issuersKind = schema.GroupVersionKind{Group: "cert-manager.io", Version: "v1alpha2", Kind: "Issuer"} + +// Get takes name of the issuer, and returns the corresponding issuer object, and an error if there is any. +func (c *FakeIssuers) Get(name string, options v1.GetOptions) (result *v1alpha2.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(issuersResource, c.ns, name), &v1alpha2.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Issuer), err +} + +// List takes label and field selectors, and returns the list of Issuers that match those selectors. +func (c *FakeIssuers) List(opts v1.ListOptions) (result *v1alpha2.IssuerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(issuersResource, issuersKind, c.ns, opts), &v1alpha2.IssuerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.IssuerList{ListMeta: obj.(*v1alpha2.IssuerList).ListMeta} + for _, item := range obj.(*v1alpha2.IssuerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested issuers. +func (c *FakeIssuers) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(issuersResource, c.ns, opts)) + +} + +// Create takes the representation of a issuer and creates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *FakeIssuers) Create(issuer *v1alpha2.Issuer) (result *v1alpha2.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(issuersResource, c.ns, issuer), &v1alpha2.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Issuer), err +} + +// Update takes the representation of a issuer and updates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *FakeIssuers) Update(issuer *v1alpha2.Issuer) (result *v1alpha2.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(issuersResource, c.ns, issuer), &v1alpha2.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Issuer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIssuers) UpdateStatus(issuer *v1alpha2.Issuer) (*v1alpha2.Issuer, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(issuersResource, "status", c.ns, issuer), &v1alpha2.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Issuer), err +} + +// Delete takes name of the issuer and deletes it. Returns an error if one occurs. +func (c *FakeIssuers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(issuersResource, c.ns, name), &v1alpha2.Issuer{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIssuers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(issuersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.IssuerList{}) + return err +} + +// Patch applies the patch and returns the patched issuer. +func (c *FakeIssuers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Issuer, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(issuersResource, c.ns, name, pt, data, subresources...), &v1alpha2.Issuer{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Issuer), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/generated_expansion.go new file mode 100644 index 0000000000..ff439ff652 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type CertificateExpansion interface{} + +type CertificateRequestExpansion interface{} + +type ClusterIssuerExpansion interface{} + +type IssuerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/issuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/issuer.go new file mode 100644 index 0000000000..55d9329bb2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/clientset/versioned/typed/certmanager/v1alpha2/issuer.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/certmanager/clientset/versioned/scheme" +) + +// IssuersGetter has a method to return a IssuerInterface. +// A group's client should implement this interface. +type IssuersGetter interface { + Issuers(namespace string) IssuerInterface +} + +// IssuerInterface has methods to work with Issuer resources. +type IssuerInterface interface { + Create(*v1alpha2.Issuer) (*v1alpha2.Issuer, error) + Update(*v1alpha2.Issuer) (*v1alpha2.Issuer, error) + UpdateStatus(*v1alpha2.Issuer) (*v1alpha2.Issuer, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.Issuer, error) + List(opts v1.ListOptions) (*v1alpha2.IssuerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Issuer, err error) + IssuerExpansion +} + +// issuers implements IssuerInterface +type issuers struct { + client rest.Interface + ns string +} + +// newIssuers returns a Issuers +func newIssuers(c *CertmanagerV1alpha2Client, namespace string) *issuers { + return &issuers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the issuer, and returns the corresponding issuer object, and an error if there is any. +func (c *issuers) Get(name string, options v1.GetOptions) (result *v1alpha2.Issuer, err error) { + result = &v1alpha2.Issuer{} + err = c.client.Get(). + Namespace(c.ns). + Resource("issuers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Issuers that match those selectors. +func (c *issuers) List(opts v1.ListOptions) (result *v1alpha2.IssuerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.IssuerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested issuers. +func (c *issuers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a issuer and creates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *issuers) Create(issuer *v1alpha2.Issuer) (result *v1alpha2.Issuer, err error) { + result = &v1alpha2.Issuer{} + err = c.client.Post(). + Namespace(c.ns). + Resource("issuers"). + Body(issuer). + Do(). + Into(result) + return +} + +// Update takes the representation of a issuer and updates it. Returns the server's representation of the issuer, and an error, if there is any. +func (c *issuers) Update(issuer *v1alpha2.Issuer) (result *v1alpha2.Issuer, err error) { + result = &v1alpha2.Issuer{} + err = c.client.Put(). + Namespace(c.ns). + Resource("issuers"). + Name(issuer.Name). + Body(issuer). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *issuers) UpdateStatus(issuer *v1alpha2.Issuer) (result *v1alpha2.Issuer, err error) { + result = &v1alpha2.Issuer{} + err = c.client.Put(). + Namespace(c.ns). + Resource("issuers"). + Name(issuer.Name). + SubResource("status"). + Body(issuer). + Do(). + Into(result) + return +} + +// Delete takes name of the issuer and deletes it. Returns an error if one occurs. +func (c *issuers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("issuers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *issuers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("issuers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched issuer. +func (c *issuers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Issuer, err error) { + result = &v1alpha2.Issuer{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("issuers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/interface.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/interface.go new file mode 100644 index 0000000000..79f756340e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package acme + +import ( + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/challenge.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/challenge.go new file mode 100644 index 0000000000..485106723a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/challenge.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + acmev1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2" +) + +// ChallengeInformer provides access to a shared informer and lister for +// Challenges. +type ChallengeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.ChallengeLister +} + +type challengeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewChallengeInformer constructs a new informer for Challenge type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewChallengeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredChallengeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredChallengeInformer constructs a new informer for Challenge type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredChallengeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1alpha2().Challenges(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1alpha2().Challenges(namespace).Watch(options) + }, + }, + &acmev1alpha2.Challenge{}, + resyncPeriod, + indexers, + ) +} + +func (f *challengeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredChallengeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *challengeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acmev1alpha2.Challenge{}, f.defaultInformer) +} + +func (f *challengeInformer) Lister() v1alpha2.ChallengeLister { + return v1alpha2.NewChallengeLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/interface.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/interface.go new file mode 100644 index 0000000000..7a69b1a2cc --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Challenges returns a ChallengeInformer. + Challenges() ChallengeInformer + // Orders returns a OrderInformer. + Orders() OrderInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Challenges returns a ChallengeInformer. +func (v *version) Challenges() ChallengeInformer { + return &challengeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Orders returns a OrderInformer. +func (v *version) Orders() OrderInformer { + return &orderInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/order.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/order.go new file mode 100644 index 0000000000..93771f3fba --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2/order.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + acmev1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2" +) + +// OrderInformer provides access to a shared informer and lister for +// Orders. +type OrderInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.OrderLister +} + +type orderInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewOrderInformer constructs a new informer for Order type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOrderInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOrderInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredOrderInformer constructs a new informer for Order type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOrderInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1alpha2().Orders(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcmeV1alpha2().Orders(namespace).Watch(options) + }, + }, + &acmev1alpha2.Order{}, + resyncPeriod, + indexers, + ) +} + +func (f *orderInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOrderInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *orderInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acmev1alpha2.Order{}, f.defaultInformer) +} + +func (f *orderInformer) Lister() v1alpha2.OrderLister { + return v1alpha2.NewOrderLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/interface.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/interface.go new file mode 100644 index 0000000000..870ec7f63f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package certmanager + +import ( + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificate.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificate.go new file mode 100644 index 0000000000..0f2d58b6d7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificate.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2" +) + +// CertificateInformer provides access to a shared informer and lister for +// Certificates. +type CertificateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.CertificateLister +} + +type certificateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateInformer constructs a new informer for Certificate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateInformer constructs a new informer for Certificate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().Certificates(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().Certificates(namespace).Watch(options) + }, + }, + &certmanagerv1alpha2.Certificate{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1alpha2.Certificate{}, f.defaultInformer) +} + +func (f *certificateInformer) Lister() v1alpha2.CertificateLister { + return v1alpha2.NewCertificateLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificaterequest.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificaterequest.go new file mode 100644 index 0000000000..872b1b5c61 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/certificaterequest.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2" +) + +// CertificateRequestInformer provides access to a shared informer and lister for +// CertificateRequests. +type CertificateRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.CertificateRequestLister +} + +type certificateRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateRequestInformer constructs a new informer for CertificateRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateRequestInformer constructs a new informer for CertificateRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().CertificateRequests(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().CertificateRequests(namespace).Watch(options) + }, + }, + &certmanagerv1alpha2.CertificateRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1alpha2.CertificateRequest{}, f.defaultInformer) +} + +func (f *certificateRequestInformer) Lister() v1alpha2.CertificateRequestLister { + return v1alpha2.NewCertificateRequestLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/clusterissuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/clusterissuer.go new file mode 100644 index 0000000000..fe09c686e1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/clusterissuer.go @@ -0,0 +1,88 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2" +) + +// ClusterIssuerInformer provides access to a shared informer and lister for +// ClusterIssuers. +type ClusterIssuerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.ClusterIssuerLister +} + +type clusterIssuerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterIssuerInformer constructs a new informer for ClusterIssuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterIssuerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterIssuerInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterIssuerInformer constructs a new informer for ClusterIssuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterIssuerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().ClusterIssuers().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().ClusterIssuers().Watch(options) + }, + }, + &certmanagerv1alpha2.ClusterIssuer{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterIssuerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterIssuerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterIssuerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1alpha2.ClusterIssuer{}, f.defaultInformer) +} + +func (f *clusterIssuerInformer) Lister() v1alpha2.ClusterIssuerLister { + return v1alpha2.NewClusterIssuerLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/interface.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/interface.go new file mode 100644 index 0000000000..4c9d651534 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Certificates returns a CertificateInformer. + Certificates() CertificateInformer + // CertificateRequests returns a CertificateRequestInformer. + CertificateRequests() CertificateRequestInformer + // ClusterIssuers returns a ClusterIssuerInformer. + ClusterIssuers() ClusterIssuerInformer + // Issuers returns a IssuerInformer. + Issuers() IssuerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Certificates returns a CertificateInformer. +func (v *version) Certificates() CertificateInformer { + return &certificateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// CertificateRequests returns a CertificateRequestInformer. +func (v *version) CertificateRequests() CertificateRequestInformer { + return &certificateRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ClusterIssuers returns a ClusterIssuerInformer. +func (v *version) ClusterIssuers() ClusterIssuerInformer { + return &clusterIssuerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Issuers returns a IssuerInformer. +func (v *version) Issuers() IssuerInformer { + return &issuerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/issuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/issuer.go new file mode 100644 index 0000000000..3a33c1bf69 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2/issuer.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2" +) + +// IssuerInformer provides access to a shared informer and lister for +// Issuers. +type IssuerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.IssuerLister +} + +type issuerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIssuerInformer constructs a new informer for Issuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIssuerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIssuerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIssuerInformer constructs a new informer for Issuer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIssuerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().Issuers(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertmanagerV1alpha2().Issuers(namespace).Watch(options) + }, + }, + &certmanagerv1alpha2.Issuer{}, + resyncPeriod, + indexers, + ) +} + +func (f *issuerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIssuerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *issuerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certmanagerv1alpha2.Issuer{}, f.defaultInformer) +} + +func (f *issuerInformer) Lister() v1alpha2.IssuerLister { + return v1alpha2.NewIssuerLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/factory.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/factory.go new file mode 100644 index 0000000000..2c152b1b2f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/factory.go @@ -0,0 +1,186 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + acme "knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme" + certmanager "knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager" + internalinterfaces "knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Acme() acme.Interface + Certmanager() certmanager.Interface +} + +func (f *sharedInformerFactory) Acme() acme.Interface { + return acme.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Certmanager() certmanager.Interface { + return certmanager.New(f, f.namespace, f.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/generic.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/generic.go new file mode 100644 index 0000000000..de782d2222 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/generic.go @@ -0,0 +1,75 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + certmanagerv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=acme.cert-manager.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("challenges"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acme().V1alpha2().Challenges().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("orders"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acme().V1alpha2().Orders().Informer()}, nil + + // Group=cert-manager.io, Version=v1alpha2 + case certmanagerv1alpha2.SchemeGroupVersion.WithResource("certificates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1alpha2().Certificates().Informer()}, nil + case certmanagerv1alpha2.SchemeGroupVersion.WithResource("certificaterequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1alpha2().CertificateRequests().Informer()}, nil + case certmanagerv1alpha2.SchemeGroupVersion.WithResource("clusterissuers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1alpha2().ClusterIssuers().Informer()}, nil + case certmanagerv1alpha2.SchemeGroupVersion.WithResource("issuers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certmanager().V1alpha2().Issuers().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..736507b184 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/client.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/client.go new file mode 100644 index 0000000000..c2952d90b1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + rest "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + versioned "knative.dev/serving/pkg/client/certmanager/clientset/versioned" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/clientset/versioned.Interface from context.") + } + return untyped.(versioned.Interface) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/fake/fake.go new file mode 100644 index 0000000000..a3f7871f19 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/client/fake/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + fake "knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake" + client "knative.dev/serving/pkg/client/certmanager/injection/client" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/clientset/versioned/fake.Clientset from context.") + } + return untyped.(*fake.Clientset) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/challenge.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/challenge.go new file mode 100644 index 0000000000..5a10d430d1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/challenge.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package challenge + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Acme().V1alpha2().Challenges() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha2.ChallengeInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2.ChallengeInformer from context.") + } + return untyped.(v1alpha2.ChallengeInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/fake/fake.go new file mode 100644 index 0000000000..58c20b6e1a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + challenge "knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge" + fake "knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake" +) + +var Get = challenge.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Acme().V1alpha2().Challenges() + return context.WithValue(ctx, challenge.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/fake/fake.go new file mode 100644 index 0000000000..11ea36212d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + order "knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order" + fake "knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake" +) + +var Get = order.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Acme().V1alpha2().Orders() + return context.WithValue(ctx, order.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/order.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/order.go new file mode 100644 index 0000000000..f5c36ffeb7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/order/order.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package order + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Acme().V1alpha2().Orders() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha2.OrderInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions/acme/v1alpha2.OrderInformer from context.") + } + return untyped.(v1alpha2.OrderInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/certificate.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/certificate.go new file mode 100644 index 0000000000..297c4218f7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/certificate.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package certificate + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1alpha2().Certificates() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha2.CertificateInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2.CertificateInformer from context.") + } + return untyped.(v1alpha2.CertificateInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/fake/fake.go new file mode 100644 index 0000000000..135e5ae9fe --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + certificate "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate" + fake "knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake" +) + +var Get = certificate.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1alpha2().Certificates() + return context.WithValue(ctx, certificate.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/certificaterequest.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/certificaterequest.go new file mode 100644 index 0000000000..683e942744 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/certificaterequest.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package certificaterequest + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1alpha2().CertificateRequests() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha2.CertificateRequestInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2.CertificateRequestInformer from context.") + } + return untyped.(v1alpha2.CertificateRequestInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/fake/fake.go new file mode 100644 index 0000000000..935af15d8d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + certificaterequest "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificaterequest" + fake "knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake" +) + +var Get = certificaterequest.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1alpha2().CertificateRequests() + return context.WithValue(ctx, certificaterequest.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/clusterissuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/clusterissuer.go new file mode 100644 index 0000000000..ab7647370f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/clusterissuer.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package clusterissuer + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1alpha2().ClusterIssuers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha2.ClusterIssuerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2.ClusterIssuerInformer from context.") + } + return untyped.(v1alpha2.ClusterIssuerInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/fake/fake.go new file mode 100644 index 0000000000..060f255521 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + clusterissuer "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer" + fake "knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake" +) + +var Get = clusterissuer.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1alpha2().ClusterIssuers() + return context.WithValue(ctx, clusterissuer.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/fake/fake.go new file mode 100644 index 0000000000..4f72f4d4bd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + issuer "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer" + fake "knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake" +) + +var Get = issuer.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Certmanager().V1alpha2().Issuers() + return context.WithValue(ctx, issuer.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/issuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/issuer.go new file mode 100644 index 0000000000..af9559e120 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/issuer/issuer.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package issuer + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha2 "knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Certmanager().V1alpha2().Issuers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha2.IssuerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions/certmanager/v1alpha2.IssuerInformer from context.") + } + return untyped.(v1alpha2.IssuerInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/factory.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/factory.go new file mode 100644 index 0000000000..338b86442f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/factory.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package factory + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + externalversions "knative.dev/serving/pkg/client/certmanager/informers/externalversions" + client "knative.dev/serving/pkg/client/certmanager/injection/client" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/certmanager/informers/externalversions.SharedInformerFactory from context.") + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake/fake.go new file mode 100644 index 0000000000..6c15aa625f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/injection/informers/factory/fake/fake.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + externalversions "knative.dev/serving/pkg/client/certmanager/informers/externalversions" + fake "knative.dev/serving/pkg/client/certmanager/injection/client/fake" + factory "knative.dev/serving/pkg/client/certmanager/injection/informers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/challenge.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/challenge.go new file mode 100644 index 0000000000..6347b77631 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/challenge.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ChallengeLister helps list Challenges. +type ChallengeLister interface { + // List lists all Challenges in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.Challenge, err error) + // Challenges returns an object that can list and get Challenges. + Challenges(namespace string) ChallengeNamespaceLister + ChallengeListerExpansion +} + +// challengeLister implements the ChallengeLister interface. +type challengeLister struct { + indexer cache.Indexer +} + +// NewChallengeLister returns a new ChallengeLister. +func NewChallengeLister(indexer cache.Indexer) ChallengeLister { + return &challengeLister{indexer: indexer} +} + +// List lists all Challenges in the indexer. +func (s *challengeLister) List(selector labels.Selector) (ret []*v1alpha2.Challenge, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Challenge)) + }) + return ret, err +} + +// Challenges returns an object that can list and get Challenges. +func (s *challengeLister) Challenges(namespace string) ChallengeNamespaceLister { + return challengeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ChallengeNamespaceLister helps list and get Challenges. +type ChallengeNamespaceLister interface { + // List lists all Challenges in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha2.Challenge, err error) + // Get retrieves the Challenge from the indexer for a given namespace and name. + Get(name string) (*v1alpha2.Challenge, error) + ChallengeNamespaceListerExpansion +} + +// challengeNamespaceLister implements the ChallengeNamespaceLister +// interface. +type challengeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Challenges in the indexer for a given namespace. +func (s challengeNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Challenge, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Challenge)) + }) + return ret, err +} + +// Get retrieves the Challenge from the indexer for a given namespace and name. +func (s challengeNamespaceLister) Get(name string) (*v1alpha2.Challenge, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("challenge"), name) + } + return obj.(*v1alpha2.Challenge), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/expansion_generated.go new file mode 100644 index 0000000000..e1444b242b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// ChallengeListerExpansion allows custom methods to be added to +// ChallengeLister. +type ChallengeListerExpansion interface{} + +// ChallengeNamespaceListerExpansion allows custom methods to be added to +// ChallengeNamespaceLister. +type ChallengeNamespaceListerExpansion interface{} + +// OrderListerExpansion allows custom methods to be added to +// OrderLister. +type OrderListerExpansion interface{} + +// OrderNamespaceListerExpansion allows custom methods to be added to +// OrderNamespaceLister. +type OrderNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/order.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/order.go new file mode 100644 index 0000000000..d713d9e0fe --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2/order.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// OrderLister helps list Orders. +type OrderLister interface { + // List lists all Orders in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.Order, err error) + // Orders returns an object that can list and get Orders. + Orders(namespace string) OrderNamespaceLister + OrderListerExpansion +} + +// orderLister implements the OrderLister interface. +type orderLister struct { + indexer cache.Indexer +} + +// NewOrderLister returns a new OrderLister. +func NewOrderLister(indexer cache.Indexer) OrderLister { + return &orderLister{indexer: indexer} +} + +// List lists all Orders in the indexer. +func (s *orderLister) List(selector labels.Selector) (ret []*v1alpha2.Order, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Order)) + }) + return ret, err +} + +// Orders returns an object that can list and get Orders. +func (s *orderLister) Orders(namespace string) OrderNamespaceLister { + return orderNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// OrderNamespaceLister helps list and get Orders. +type OrderNamespaceLister interface { + // List lists all Orders in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha2.Order, err error) + // Get retrieves the Order from the indexer for a given namespace and name. + Get(name string) (*v1alpha2.Order, error) + OrderNamespaceListerExpansion +} + +// orderNamespaceLister implements the OrderNamespaceLister +// interface. +type orderNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Orders in the indexer for a given namespace. +func (s orderNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Order, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Order)) + }) + return ret, err +} + +// Get retrieves the Order from the indexer for a given namespace and name. +func (s orderNamespaceLister) Get(name string) (*v1alpha2.Order, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("order"), name) + } + return obj.(*v1alpha2.Order), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificate.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificate.go new file mode 100644 index 0000000000..7084d75be3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificate.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateLister helps list Certificates. +type CertificateLister interface { + // List lists all Certificates in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.Certificate, err error) + // Certificates returns an object that can list and get Certificates. + Certificates(namespace string) CertificateNamespaceLister + CertificateListerExpansion +} + +// certificateLister implements the CertificateLister interface. +type certificateLister struct { + indexer cache.Indexer +} + +// NewCertificateLister returns a new CertificateLister. +func NewCertificateLister(indexer cache.Indexer) CertificateLister { + return &certificateLister{indexer: indexer} +} + +// List lists all Certificates in the indexer. +func (s *certificateLister) List(selector labels.Selector) (ret []*v1alpha2.Certificate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Certificate)) + }) + return ret, err +} + +// Certificates returns an object that can list and get Certificates. +func (s *certificateLister) Certificates(namespace string) CertificateNamespaceLister { + return certificateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateNamespaceLister helps list and get Certificates. +type CertificateNamespaceLister interface { + // List lists all Certificates in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha2.Certificate, err error) + // Get retrieves the Certificate from the indexer for a given namespace and name. + Get(name string) (*v1alpha2.Certificate, error) + CertificateNamespaceListerExpansion +} + +// certificateNamespaceLister implements the CertificateNamespaceLister +// interface. +type certificateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Certificates in the indexer for a given namespace. +func (s certificateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Certificate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Certificate)) + }) + return ret, err +} + +// Get retrieves the Certificate from the indexer for a given namespace and name. +func (s certificateNamespaceLister) Get(name string) (*v1alpha2.Certificate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("certificate"), name) + } + return obj.(*v1alpha2.Certificate), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificaterequest.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificaterequest.go new file mode 100644 index 0000000000..0543ff25fa --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/certificaterequest.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateRequestLister helps list CertificateRequests. +type CertificateRequestLister interface { + // List lists all CertificateRequests in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.CertificateRequest, err error) + // CertificateRequests returns an object that can list and get CertificateRequests. + CertificateRequests(namespace string) CertificateRequestNamespaceLister + CertificateRequestListerExpansion +} + +// certificateRequestLister implements the CertificateRequestLister interface. +type certificateRequestLister struct { + indexer cache.Indexer +} + +// NewCertificateRequestLister returns a new CertificateRequestLister. +func NewCertificateRequestLister(indexer cache.Indexer) CertificateRequestLister { + return &certificateRequestLister{indexer: indexer} +} + +// List lists all CertificateRequests in the indexer. +func (s *certificateRequestLister) List(selector labels.Selector) (ret []*v1alpha2.CertificateRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.CertificateRequest)) + }) + return ret, err +} + +// CertificateRequests returns an object that can list and get CertificateRequests. +func (s *certificateRequestLister) CertificateRequests(namespace string) CertificateRequestNamespaceLister { + return certificateRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateRequestNamespaceLister helps list and get CertificateRequests. +type CertificateRequestNamespaceLister interface { + // List lists all CertificateRequests in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha2.CertificateRequest, err error) + // Get retrieves the CertificateRequest from the indexer for a given namespace and name. + Get(name string) (*v1alpha2.CertificateRequest, error) + CertificateRequestNamespaceListerExpansion +} + +// certificateRequestNamespaceLister implements the CertificateRequestNamespaceLister +// interface. +type certificateRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CertificateRequests in the indexer for a given namespace. +func (s certificateRequestNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.CertificateRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.CertificateRequest)) + }) + return ret, err +} + +// Get retrieves the CertificateRequest from the indexer for a given namespace and name. +func (s certificateRequestNamespaceLister) Get(name string) (*v1alpha2.CertificateRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("certificaterequest"), name) + } + return obj.(*v1alpha2.CertificateRequest), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/clusterissuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/clusterissuer.go new file mode 100644 index 0000000000..12c4d1731f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/clusterissuer.go @@ -0,0 +1,65 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterIssuerLister helps list ClusterIssuers. +type ClusterIssuerLister interface { + // List lists all ClusterIssuers in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.ClusterIssuer, err error) + // Get retrieves the ClusterIssuer from the index for a given name. + Get(name string) (*v1alpha2.ClusterIssuer, error) + ClusterIssuerListerExpansion +} + +// clusterIssuerLister implements the ClusterIssuerLister interface. +type clusterIssuerLister struct { + indexer cache.Indexer +} + +// NewClusterIssuerLister returns a new ClusterIssuerLister. +func NewClusterIssuerLister(indexer cache.Indexer) ClusterIssuerLister { + return &clusterIssuerLister{indexer: indexer} +} + +// List lists all ClusterIssuers in the indexer. +func (s *clusterIssuerLister) List(selector labels.Selector) (ret []*v1alpha2.ClusterIssuer, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.ClusterIssuer)) + }) + return ret, err +} + +// Get retrieves the ClusterIssuer from the index for a given name. +func (s *clusterIssuerLister) Get(name string) (*v1alpha2.ClusterIssuer, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("clusterissuer"), name) + } + return obj.(*v1alpha2.ClusterIssuer), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/expansion_generated.go new file mode 100644 index 0000000000..f46d39de85 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/expansion_generated.go @@ -0,0 +1,47 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// CertificateListerExpansion allows custom methods to be added to +// CertificateLister. +type CertificateListerExpansion interface{} + +// CertificateNamespaceListerExpansion allows custom methods to be added to +// CertificateNamespaceLister. +type CertificateNamespaceListerExpansion interface{} + +// CertificateRequestListerExpansion allows custom methods to be added to +// CertificateRequestLister. +type CertificateRequestListerExpansion interface{} + +// CertificateRequestNamespaceListerExpansion allows custom methods to be added to +// CertificateRequestNamespaceLister. +type CertificateRequestNamespaceListerExpansion interface{} + +// ClusterIssuerListerExpansion allows custom methods to be added to +// ClusterIssuerLister. +type ClusterIssuerListerExpansion interface{} + +// IssuerListerExpansion allows custom methods to be added to +// IssuerLister. +type IssuerListerExpansion interface{} + +// IssuerNamespaceListerExpansion allows custom methods to be added to +// IssuerNamespaceLister. +type IssuerNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/issuer.go b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/issuer.go new file mode 100644 index 0000000000..3668aee71b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2/issuer.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IssuerLister helps list Issuers. +type IssuerLister interface { + // List lists all Issuers in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.Issuer, err error) + // Issuers returns an object that can list and get Issuers. + Issuers(namespace string) IssuerNamespaceLister + IssuerListerExpansion +} + +// issuerLister implements the IssuerLister interface. +type issuerLister struct { + indexer cache.Indexer +} + +// NewIssuerLister returns a new IssuerLister. +func NewIssuerLister(indexer cache.Indexer) IssuerLister { + return &issuerLister{indexer: indexer} +} + +// List lists all Issuers in the indexer. +func (s *issuerLister) List(selector labels.Selector) (ret []*v1alpha2.Issuer, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Issuer)) + }) + return ret, err +} + +// Issuers returns an object that can list and get Issuers. +func (s *issuerLister) Issuers(namespace string) IssuerNamespaceLister { + return issuerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IssuerNamespaceLister helps list and get Issuers. +type IssuerNamespaceLister interface { + // List lists all Issuers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha2.Issuer, err error) + // Get retrieves the Issuer from the indexer for a given namespace and name. + Get(name string) (*v1alpha2.Issuer, error) + IssuerNamespaceListerExpansion +} + +// issuerNamespaceLister implements the IssuerNamespaceLister +// interface. +type issuerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Issuers in the indexer for a given namespace. +func (s issuerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Issuer, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Issuer)) + }) + return ret, err +} + +// Get retrieves the Issuer from the indexer for a given namespace and name. +func (s issuerNamespaceLister) Get(name string) (*v1alpha2.Issuer, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("issuer"), name) + } + return obj.(*v1alpha2.Issuer), nil +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/clientset.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/clientset.go similarity index 74% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/clientset.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/clientset.go index 6707ffd1b9..6c637a6c0c 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/clientset.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,27 +19,25 @@ limitations under the License. package versioned import ( - autoscalingv1alpha1 "github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1" - networkingv1alpha1 "github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1" - servingv1alpha1 "github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1" - servingv1beta1 "github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1" + "fmt" + discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" + autoscalingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1" + networkingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1" + servingv1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1" + servingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1" + servingv1beta1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1" ) type Interface interface { Discovery() discovery.DiscoveryInterface AutoscalingV1alpha1() autoscalingv1alpha1.AutoscalingV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Autoscaling() autoscalingv1alpha1.AutoscalingV1alpha1Interface NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Networking() networkingv1alpha1.NetworkingV1alpha1Interface ServingV1alpha1() servingv1alpha1.ServingV1alpha1Interface ServingV1beta1() servingv1beta1.ServingV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Serving() servingv1beta1.ServingV1beta1Interface + ServingV1() servingv1.ServingV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -50,6 +48,7 @@ type Clientset struct { networkingV1alpha1 *networkingv1alpha1.NetworkingV1alpha1Client servingV1alpha1 *servingv1alpha1.ServingV1alpha1Client servingV1beta1 *servingv1beta1.ServingV1beta1Client + servingV1 *servingv1.ServingV1Client } // AutoscalingV1alpha1 retrieves the AutoscalingV1alpha1Client @@ -57,23 +56,11 @@ func (c *Clientset) AutoscalingV1alpha1() autoscalingv1alpha1.AutoscalingV1alpha return c.autoscalingV1alpha1 } -// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. -// Please explicitly pick a version. -func (c *Clientset) Autoscaling() autoscalingv1alpha1.AutoscalingV1alpha1Interface { - return c.autoscalingV1alpha1 -} - // NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { return c.networkingV1alpha1 } -// Deprecated: Networking retrieves the default version of NetworkingClient. -// Please explicitly pick a version. -func (c *Clientset) Networking() networkingv1alpha1.NetworkingV1alpha1Interface { - return c.networkingV1alpha1 -} - // ServingV1alpha1 retrieves the ServingV1alpha1Client func (c *Clientset) ServingV1alpha1() servingv1alpha1.ServingV1alpha1Interface { return c.servingV1alpha1 @@ -84,10 +71,9 @@ func (c *Clientset) ServingV1beta1() servingv1beta1.ServingV1beta1Interface { return c.servingV1beta1 } -// Deprecated: Serving retrieves the default version of ServingClient. -// Please explicitly pick a version. -func (c *Clientset) Serving() servingv1beta1.ServingV1beta1Interface { - return c.servingV1beta1 +// ServingV1 retrieves the ServingV1Client +func (c *Clientset) ServingV1() servingv1.ServingV1Interface { + return c.servingV1 } // Discovery retrieves the DiscoveryClient @@ -99,9 +85,14 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset @@ -122,6 +113,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.servingV1, err = servingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -138,6 +133,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.networkingV1alpha1 = networkingv1alpha1.NewForConfigOrDie(c) cs.servingV1alpha1 = servingv1alpha1.NewForConfigOrDie(c) cs.servingV1beta1 = servingv1beta1.NewForConfigOrDie(c) + cs.servingV1 = servingv1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -150,6 +146,7 @@ func New(c rest.Interface) *Clientset { cs.networkingV1alpha1 = networkingv1alpha1.New(c) cs.servingV1alpha1 = servingv1alpha1.New(c) cs.servingV1beta1 = servingv1beta1.New(c) + cs.servingV1 = servingv1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/doc.go new file mode 100644 index 0000000000..e48c2aa446 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/clientset_generated.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..9a071bf4f1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,110 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "knative.dev/serving/pkg/client/clientset/versioned" + autoscalingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1" + fakeautoscalingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake" + networkingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1" + fakenetworkingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake" + servingv1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1" + fakeservingv1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake" + servingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1" + fakeservingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake" + servingv1beta1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1" + fakeservingv1beta1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// AutoscalingV1alpha1 retrieves the AutoscalingV1alpha1Client +func (c *Clientset) AutoscalingV1alpha1() autoscalingv1alpha1.AutoscalingV1alpha1Interface { + return &fakeautoscalingv1alpha1.FakeAutoscalingV1alpha1{Fake: &c.Fake} +} + +// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client +func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { + return &fakenetworkingv1alpha1.FakeNetworkingV1alpha1{Fake: &c.Fake} +} + +// ServingV1alpha1 retrieves the ServingV1alpha1Client +func (c *Clientset) ServingV1alpha1() servingv1alpha1.ServingV1alpha1Interface { + return &fakeservingv1alpha1.FakeServingV1alpha1{Fake: &c.Fake} +} + +// ServingV1beta1 retrieves the ServingV1beta1Client +func (c *Clientset) ServingV1beta1() servingv1beta1.ServingV1beta1Interface { + return &fakeservingv1beta1.FakeServingV1beta1{Fake: &c.Fake} +} + +// ServingV1 retrieves the ServingV1Client +func (c *Clientset) ServingV1() servingv1.ServingV1Interface { + return &fakeservingv1.FakeServingV1{Fake: &c.Fake} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..2c4903250c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/register.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..c00c329ed3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,64 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + autoscalingv1alpha1.AddToScheme, + networkingv1alpha1.AddToScheme, + servingv1alpha1.AddToScheme, + servingv1beta1.AddToScheme, + servingv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..7acc2dcf25 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/scheme/register.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/register.go similarity index 83% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/scheme/register.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/register.go index b7aed539d6..d35b6c429d 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/scheme/register.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,15 +19,16 @@ limitations under the License. package scheme import ( - autoscalingv1alpha1 "github.com/knative/serving/pkg/apis/autoscaling/v1alpha1" - networkingv1alpha1 "github.com/knative/serving/pkg/apis/networking/v1alpha1" - servingv1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - servingv1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" ) var Scheme = runtime.NewScheme() @@ -38,6 +39,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ networkingv1alpha1.AddToScheme, servingv1alpha1.AddToScheme, servingv1beta1.AddToScheme, + servingv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go similarity index 86% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go index 8d0ce8489e..03344f58c1 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/autoscaling_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/autoscaling/v1alpha1" - "github.com/knative/serving/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) type AutoscalingV1alpha1Interface interface { RESTClient() rest.Interface + MetricsGetter PodAutoscalersGetter } @@ -35,6 +35,10 @@ type AutoscalingV1alpha1Client struct { restClient rest.Interface } +func (c *AutoscalingV1alpha1Client) Metrics(namespace string) MetricInterface { + return newMetrics(c, namespace) +} + func (c *AutoscalingV1alpha1Client) PodAutoscalers(namespace string) PodAutoscalerInterface { return newPodAutoscalers(c, namespace) } @@ -71,7 +75,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/doc.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/doc.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/doc.go index a1c6bb9fe8..41e872fe9a 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/doc.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_autoscaling_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_autoscaling_client.go new file mode 100644 index 0000000000..1a172fd0c0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_autoscaling_client.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1" +) + +type FakeAutoscalingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeAutoscalingV1alpha1) Metrics(namespace string) v1alpha1.MetricInterface { + return &FakeMetrics{c, namespace} +} + +func (c *FakeAutoscalingV1alpha1) PodAutoscalers(namespace string) v1alpha1.PodAutoscalerInterface { + return &FakePodAutoscalers{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscalingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_metric.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_metric.go new file mode 100644 index 0000000000..41497c9f24 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_metric.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +// FakeMetrics implements MetricInterface +type FakeMetrics struct { + Fake *FakeAutoscalingV1alpha1 + ns string +} + +var metricsResource = schema.GroupVersionResource{Group: "autoscaling.internal.knative.dev", Version: "v1alpha1", Resource: "metrics"} + +var metricsKind = schema.GroupVersionKind{Group: "autoscaling.internal.knative.dev", Version: "v1alpha1", Kind: "Metric"} + +// Get takes name of the metric, and returns the corresponding metric object, and an error if there is any. +func (c *FakeMetrics) Get(name string, options v1.GetOptions) (result *v1alpha1.Metric, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(metricsResource, c.ns, name), &v1alpha1.Metric{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Metric), err +} + +// List takes label and field selectors, and returns the list of Metrics that match those selectors. +func (c *FakeMetrics) List(opts v1.ListOptions) (result *v1alpha1.MetricList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(metricsResource, metricsKind, c.ns, opts), &v1alpha1.MetricList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.MetricList{ListMeta: obj.(*v1alpha1.MetricList).ListMeta} + for _, item := range obj.(*v1alpha1.MetricList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested metrics. +func (c *FakeMetrics) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(metricsResource, c.ns, opts)) + +} + +// Create takes the representation of a metric and creates it. Returns the server's representation of the metric, and an error, if there is any. +func (c *FakeMetrics) Create(metric *v1alpha1.Metric) (result *v1alpha1.Metric, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(metricsResource, c.ns, metric), &v1alpha1.Metric{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Metric), err +} + +// Update takes the representation of a metric and updates it. Returns the server's representation of the metric, and an error, if there is any. +func (c *FakeMetrics) Update(metric *v1alpha1.Metric) (result *v1alpha1.Metric, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(metricsResource, c.ns, metric), &v1alpha1.Metric{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Metric), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeMetrics) UpdateStatus(metric *v1alpha1.Metric) (*v1alpha1.Metric, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(metricsResource, "status", c.ns, metric), &v1alpha1.Metric{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Metric), err +} + +// Delete takes name of the metric and deletes it. Returns an error if one occurs. +func (c *FakeMetrics) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(metricsResource, c.ns, name), &v1alpha1.Metric{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMetrics) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(metricsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.MetricList{}) + return err +} + +// Patch applies the patch and returns the patched metric. +func (c *FakeMetrics) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Metric, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(metricsResource, c.ns, name, pt, data, subresources...), &v1alpha1.Metric{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Metric), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_podautoscaler.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_podautoscaler.go new file mode 100644 index 0000000000..8f79c262b9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/fake/fake_podautoscaler.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +// FakePodAutoscalers implements PodAutoscalerInterface +type FakePodAutoscalers struct { + Fake *FakeAutoscalingV1alpha1 + ns string +} + +var podautoscalersResource = schema.GroupVersionResource{Group: "autoscaling.internal.knative.dev", Version: "v1alpha1", Resource: "podautoscalers"} + +var podautoscalersKind = schema.GroupVersionKind{Group: "autoscaling.internal.knative.dev", Version: "v1alpha1", Kind: "PodAutoscaler"} + +// Get takes name of the podAutoscaler, and returns the corresponding podAutoscaler object, and an error if there is any. +func (c *FakePodAutoscalers) Get(name string, options v1.GetOptions) (result *v1alpha1.PodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podautoscalersResource, c.ns, name), &v1alpha1.PodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodAutoscaler), err +} + +// List takes label and field selectors, and returns the list of PodAutoscalers that match those selectors. +func (c *FakePodAutoscalers) List(opts v1.ListOptions) (result *v1alpha1.PodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podautoscalersResource, podautoscalersKind, c.ns, opts), &v1alpha1.PodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PodAutoscalerList{ListMeta: obj.(*v1alpha1.PodAutoscalerList).ListMeta} + for _, item := range obj.(*v1alpha1.PodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podAutoscalers. +func (c *FakePodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podautoscalersResource, c.ns, opts)) + +} + +// Create takes the representation of a podAutoscaler and creates it. Returns the server's representation of the podAutoscaler, and an error, if there is any. +func (c *FakePodAutoscalers) Create(podAutoscaler *v1alpha1.PodAutoscaler) (result *v1alpha1.PodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podautoscalersResource, c.ns, podAutoscaler), &v1alpha1.PodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodAutoscaler), err +} + +// Update takes the representation of a podAutoscaler and updates it. Returns the server's representation of the podAutoscaler, and an error, if there is any. +func (c *FakePodAutoscalers) Update(podAutoscaler *v1alpha1.PodAutoscaler) (result *v1alpha1.PodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podautoscalersResource, c.ns, podAutoscaler), &v1alpha1.PodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodAutoscaler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodAutoscalers) UpdateStatus(podAutoscaler *v1alpha1.PodAutoscaler) (*v1alpha1.PodAutoscaler, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podautoscalersResource, "status", c.ns, podAutoscaler), &v1alpha1.PodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodAutoscaler), err +} + +// Delete takes name of the podAutoscaler and deletes it. Returns an error if one occurs. +func (c *FakePodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podautoscalersResource, c.ns, name), &v1alpha1.PodAutoscaler{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PodAutoscalerList{}) + return err +} + +// Patch applies the patch and returns the patched podAutoscaler. +func (c *FakePodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podautoscalersResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodAutoscaler), err +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go index 66c99940e1..a9b63f8e07 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,4 +18,6 @@ limitations under the License. package v1alpha1 +type MetricExpansion interface{} + type PodAutoscalerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/metric.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/metric.go new file mode 100644 index 0000000000..5441e307ee --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/metric.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" +) + +// MetricsGetter has a method to return a MetricInterface. +// A group's client should implement this interface. +type MetricsGetter interface { + Metrics(namespace string) MetricInterface +} + +// MetricInterface has methods to work with Metric resources. +type MetricInterface interface { + Create(*v1alpha1.Metric) (*v1alpha1.Metric, error) + Update(*v1alpha1.Metric) (*v1alpha1.Metric, error) + UpdateStatus(*v1alpha1.Metric) (*v1alpha1.Metric, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Metric, error) + List(opts v1.ListOptions) (*v1alpha1.MetricList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Metric, err error) + MetricExpansion +} + +// metrics implements MetricInterface +type metrics struct { + client rest.Interface + ns string +} + +// newMetrics returns a Metrics +func newMetrics(c *AutoscalingV1alpha1Client, namespace string) *metrics { + return &metrics{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the metric, and returns the corresponding metric object, and an error if there is any. +func (c *metrics) Get(name string, options v1.GetOptions) (result *v1alpha1.Metric, err error) { + result = &v1alpha1.Metric{} + err = c.client.Get(). + Namespace(c.ns). + Resource("metrics"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Metrics that match those selectors. +func (c *metrics) List(opts v1.ListOptions) (result *v1alpha1.MetricList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.MetricList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("metrics"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested metrics. +func (c *metrics) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("metrics"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a metric and creates it. Returns the server's representation of the metric, and an error, if there is any. +func (c *metrics) Create(metric *v1alpha1.Metric) (result *v1alpha1.Metric, err error) { + result = &v1alpha1.Metric{} + err = c.client.Post(). + Namespace(c.ns). + Resource("metrics"). + Body(metric). + Do(). + Into(result) + return +} + +// Update takes the representation of a metric and updates it. Returns the server's representation of the metric, and an error, if there is any. +func (c *metrics) Update(metric *v1alpha1.Metric) (result *v1alpha1.Metric, err error) { + result = &v1alpha1.Metric{} + err = c.client.Put(). + Namespace(c.ns). + Resource("metrics"). + Name(metric.Name). + Body(metric). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *metrics) UpdateStatus(metric *v1alpha1.Metric) (result *v1alpha1.Metric, err error) { + result = &v1alpha1.Metric{} + err = c.client.Put(). + Namespace(c.ns). + Resource("metrics"). + Name(metric.Name). + SubResource("status"). + Body(metric). + Do(). + Into(result) + return +} + +// Delete takes name of the metric and deletes it. Returns an error if one occurs. +func (c *metrics) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("metrics"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *metrics) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("metrics"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched metric. +func (c *metrics) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Metric, err error) { + result = &v1alpha1.Metric{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("metrics"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go index 81411091f2..64fcbe2137 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/podautoscaler.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/autoscaling/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // PodAutoscalersGetter has a method to return a PodAutoscalerInterface. @@ -76,11 +78,16 @@ func (c *podAutoscalers) Get(name string, options v1.GetOptions) (result *v1alph // List takes label and field selectors, and returns the list of PodAutoscalers that match those selectors. func (c *podAutoscalers) List(opts v1.ListOptions) (result *v1alpha1.PodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.PodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("podautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *podAutoscalers) List(opts v1.ListOptions) (result *v1alpha1.PodAutoscal // Watch returns a watch.Interface that watches the requested podAutoscalers. func (c *podAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *podAutoscalers) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("podautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go index ee8cc2719b..a75c439c56 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/certificate.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/networking/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // CertificatesGetter has a method to return a CertificateInterface. @@ -76,11 +78,16 @@ func (c *certificates) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of Certificates that match those selectors. func (c *certificates) List(opts v1.ListOptions) (result *v1alpha1.CertificateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.CertificateList{} err = c.client.Get(). Namespace(c.ns). Resource("certificates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *certificates) List(opts v1.ListOptions) (result *v1alpha1.CertificateLi // Watch returns a watch.Interface that watches the requested certificates. func (c *certificates) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("certificates"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *certificates) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *certificates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("certificates"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/doc.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/doc.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/doc.go index a1c6bb9fe8..41e872fe9a 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/autoscaling/v1alpha1/doc.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_certificate.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_certificate.go new file mode 100644 index 0000000000..b216a354c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_certificate.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// FakeCertificates implements CertificateInterface +type FakeCertificates struct { + Fake *FakeNetworkingV1alpha1 + ns string +} + +var certificatesResource = schema.GroupVersionResource{Group: "networking.internal.knative.dev", Version: "v1alpha1", Resource: "certificates"} + +var certificatesKind = schema.GroupVersionKind{Group: "networking.internal.knative.dev", Version: "v1alpha1", Kind: "Certificate"} + +// Get takes name of the certificate, and returns the corresponding certificate object, and an error if there is any. +func (c *FakeCertificates) Get(name string, options v1.GetOptions) (result *v1alpha1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificatesResource, c.ns, name), &v1alpha1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Certificate), err +} + +// List takes label and field selectors, and returns the list of Certificates that match those selectors. +func (c *FakeCertificates) List(opts v1.ListOptions) (result *v1alpha1.CertificateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificatesResource, certificatesKind, c.ns, opts), &v1alpha1.CertificateList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.CertificateList{ListMeta: obj.(*v1alpha1.CertificateList).ListMeta} + for _, item := range obj.(*v1alpha1.CertificateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificates. +func (c *FakeCertificates) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificatesResource, c.ns, opts)) + +} + +// Create takes the representation of a certificate and creates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *FakeCertificates) Create(certificate *v1alpha1.Certificate) (result *v1alpha1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificatesResource, c.ns, certificate), &v1alpha1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Certificate), err +} + +// Update takes the representation of a certificate and updates it. Returns the server's representation of the certificate, and an error, if there is any. +func (c *FakeCertificates) Update(certificate *v1alpha1.Certificate) (result *v1alpha1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificatesResource, c.ns, certificate), &v1alpha1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Certificate), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificates) UpdateStatus(certificate *v1alpha1.Certificate) (*v1alpha1.Certificate, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificatesResource, "status", c.ns, certificate), &v1alpha1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Certificate), err +} + +// Delete takes name of the certificate and deletes it. Returns an error if one occurs. +func (c *FakeCertificates) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(certificatesResource, c.ns, name), &v1alpha1.Certificate{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.CertificateList{}) + return err +} + +// Patch applies the patch and returns the patched certificate. +func (c *FakeCertificates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Certificate, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Certificate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Certificate), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_ingress.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_ingress.go new file mode 100644 index 0000000000..dd20595fc3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_ingress.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// FakeIngresses implements IngressInterface +type FakeIngresses struct { + Fake *FakeNetworkingV1alpha1 + ns string +} + +var ingressesResource = schema.GroupVersionResource{Group: "networking.internal.knative.dev", Version: "v1alpha1", Resource: "ingresses"} + +var ingressesKind = schema.GroupVersionKind{Group: "networking.internal.knative.dev", Version: "v1alpha1", Kind: "Ingress"} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1alpha1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1alpha1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Ingress), err +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1alpha1.IngressList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1alpha1.IngressList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.IngressList{ListMeta: obj.(*v1alpha1.IngressList).ListMeta} + for _, item := range obj.(*v1alpha1.IngressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *FakeIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) + +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *FakeIngresses) Create(ingress *v1alpha1.Ingress) (result *v1alpha1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1alpha1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Ingress), err +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *FakeIngresses) Update(ingress *v1alpha1.Ingress) (result *v1alpha1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1alpha1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Ingress), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIngresses) UpdateStatus(ingress *v1alpha1.Ingress) (*v1alpha1.Ingress, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1alpha1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Ingress), err +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1alpha1.Ingress{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.IngressList{}) + return err +} + +// Patch applies the patch and returns the patched ingress. +func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Ingress), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_networking_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_networking_client.go new file mode 100644 index 0000000000..439ee16a55 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -0,0 +1,48 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1" +) + +type FakeNetworkingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeNetworkingV1alpha1) Certificates(namespace string) v1alpha1.CertificateInterface { + return &FakeCertificates{c, namespace} +} + +func (c *FakeNetworkingV1alpha1) Ingresses(namespace string) v1alpha1.IngressInterface { + return &FakeIngresses{c, namespace} +} + +func (c *FakeNetworkingV1alpha1) ServerlessServices(namespace string) v1alpha1.ServerlessServiceInterface { + return &FakeServerlessServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_serverlessservice.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_serverlessservice.go new file mode 100644 index 0000000000..92a9fc106e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/fake/fake_serverlessservice.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// FakeServerlessServices implements ServerlessServiceInterface +type FakeServerlessServices struct { + Fake *FakeNetworkingV1alpha1 + ns string +} + +var serverlessservicesResource = schema.GroupVersionResource{Group: "networking.internal.knative.dev", Version: "v1alpha1", Resource: "serverlessservices"} + +var serverlessservicesKind = schema.GroupVersionKind{Group: "networking.internal.knative.dev", Version: "v1alpha1", Kind: "ServerlessService"} + +// Get takes name of the serverlessService, and returns the corresponding serverlessService object, and an error if there is any. +func (c *FakeServerlessServices) Get(name string, options v1.GetOptions) (result *v1alpha1.ServerlessService, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(serverlessservicesResource, c.ns, name), &v1alpha1.ServerlessService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServerlessService), err +} + +// List takes label and field selectors, and returns the list of ServerlessServices that match those selectors. +func (c *FakeServerlessServices) List(opts v1.ListOptions) (result *v1alpha1.ServerlessServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(serverlessservicesResource, serverlessservicesKind, c.ns, opts), &v1alpha1.ServerlessServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ServerlessServiceList{ListMeta: obj.(*v1alpha1.ServerlessServiceList).ListMeta} + for _, item := range obj.(*v1alpha1.ServerlessServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serverlessServices. +func (c *FakeServerlessServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(serverlessservicesResource, c.ns, opts)) + +} + +// Create takes the representation of a serverlessService and creates it. Returns the server's representation of the serverlessService, and an error, if there is any. +func (c *FakeServerlessServices) Create(serverlessService *v1alpha1.ServerlessService) (result *v1alpha1.ServerlessService, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(serverlessservicesResource, c.ns, serverlessService), &v1alpha1.ServerlessService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServerlessService), err +} + +// Update takes the representation of a serverlessService and updates it. Returns the server's representation of the serverlessService, and an error, if there is any. +func (c *FakeServerlessServices) Update(serverlessService *v1alpha1.ServerlessService) (result *v1alpha1.ServerlessService, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(serverlessservicesResource, c.ns, serverlessService), &v1alpha1.ServerlessService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServerlessService), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServerlessServices) UpdateStatus(serverlessService *v1alpha1.ServerlessService) (*v1alpha1.ServerlessService, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(serverlessservicesResource, "status", c.ns, serverlessService), &v1alpha1.ServerlessService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServerlessService), err +} + +// Delete takes name of the serverlessService and deletes it. Returns an error if one occurs. +func (c *FakeServerlessServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(serverlessservicesResource, c.ns, name), &v1alpha1.ServerlessService{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServerlessServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(serverlessservicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ServerlessServiceList{}) + return err +} + +// Patch applies the patch and returns the patched serverlessService. +func (c *FakeServerlessServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ServerlessService, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(serverlessservicesResource, c.ns, name, pt, data, subresources...), &v1alpha1.ServerlessService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ServerlessService), err +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go index 808a1cd98c..de5c9dac5b 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,8 +20,6 @@ package v1alpha1 type CertificateExpansion interface{} -type ClusterIngressExpansion interface{} - type IngressExpansion interface{} type ServerlessServiceExpansion interface{} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go index 0158983bfb..1610ffc7e4 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/ingress.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/networking/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // IngressesGetter has a method to return a IngressInterface. @@ -76,11 +78,16 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1alpha1.In // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *v1alpha1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1alpha1.IngressList, err // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go similarity index 85% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go index 31681c7caa..fc7fd0617b 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/networking_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,16 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/networking/v1alpha1" - "github.com/knative/serving/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface CertificatesGetter - ClusterIngressesGetter IngressesGetter ServerlessServicesGetter } @@ -42,10 +40,6 @@ func (c *NetworkingV1alpha1Client) Certificates(namespace string) CertificateInt return newCertificates(c, namespace) } -func (c *NetworkingV1alpha1Client) ClusterIngresses() ClusterIngressInterface { - return newClusterIngresses(c) -} - func (c *NetworkingV1alpha1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } @@ -86,7 +80,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go index c728b95a00..7fbd71d4e8 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/serverlessservice.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/networking/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // ServerlessServicesGetter has a method to return a ServerlessServiceInterface. @@ -76,11 +78,16 @@ func (c *serverlessServices) Get(name string, options v1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of ServerlessServices that match those selectors. func (c *serverlessServices) List(opts v1.ListOptions) (result *v1alpha1.ServerlessServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ServerlessServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("serverlessservices"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *serverlessServices) List(opts v1.ListOptions) (result *v1alpha1.Serverl // Watch returns a watch.Interface that watches the requested serverlessServices. func (c *serverlessServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serverlessservices"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *serverlessServices) Delete(name string, options *v1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *serverlessServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("serverlessservices"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/configuration.go new file mode 100644 index 0000000000..2bff52a806 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/configuration.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "knative.dev/serving/pkg/apis/serving/v1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" +) + +// ConfigurationsGetter has a method to return a ConfigurationInterface. +// A group's client should implement this interface. +type ConfigurationsGetter interface { + Configurations(namespace string) ConfigurationInterface +} + +// ConfigurationInterface has methods to work with Configuration resources. +type ConfigurationInterface interface { + Create(*v1.Configuration) (*v1.Configuration, error) + Update(*v1.Configuration) (*v1.Configuration, error) + UpdateStatus(*v1.Configuration) (*v1.Configuration, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Configuration, error) + List(opts metav1.ListOptions) (*v1.ConfigurationList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Configuration, err error) + ConfigurationExpansion +} + +// configurations implements ConfigurationInterface +type configurations struct { + client rest.Interface + ns string +} + +// newConfigurations returns a Configurations +func newConfigurations(c *ServingV1Client, namespace string) *configurations { + return &configurations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the configuration, and returns the corresponding configuration object, and an error if there is any. +func (c *configurations) Get(name string, options metav1.GetOptions) (result *v1.Configuration, err error) { + result = &v1.Configuration{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Configurations that match those selectors. +func (c *configurations) List(opts metav1.ListOptions) (result *v1.ConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ConfigurationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configurations. +func (c *configurations) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("configurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a configuration and creates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *configurations) Create(configuration *v1.Configuration) (result *v1.Configuration, err error) { + result = &v1.Configuration{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configurations"). + Body(configuration). + Do(). + Into(result) + return +} + +// Update takes the representation of a configuration and updates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *configurations) Update(configuration *v1.Configuration) (result *v1.Configuration, err error) { + result = &v1.Configuration{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configurations"). + Name(configuration.Name). + Body(configuration). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *configurations) UpdateStatus(configuration *v1.Configuration) (result *v1.Configuration, err error) { + result = &v1.Configuration{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configurations"). + Name(configuration.Name). + SubResource("status"). + Body(configuration). + Do(). + Into(result) + return +} + +// Delete takes name of the configuration and deletes it. Returns an error if one occurs. +func (c *configurations) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("configurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched configuration. +func (c *configurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Configuration, err error) { + result = &v1.Configuration{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("configurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/doc.go new file mode 100644 index 0000000000..5b83bd1f41 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_configuration.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_configuration.go new file mode 100644 index 0000000000..7a55655155 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_configuration.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// FakeConfigurations implements ConfigurationInterface +type FakeConfigurations struct { + Fake *FakeServingV1 + ns string +} + +var configurationsResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1", Resource: "configurations"} + +var configurationsKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1", Kind: "Configuration"} + +// Get takes name of the configuration, and returns the corresponding configuration object, and an error if there is any. +func (c *FakeConfigurations) Get(name string, options v1.GetOptions) (result *servingv1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(configurationsResource, c.ns, name), &servingv1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Configuration), err +} + +// List takes label and field selectors, and returns the list of Configurations that match those selectors. +func (c *FakeConfigurations) List(opts v1.ListOptions) (result *servingv1.ConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(configurationsResource, configurationsKind, c.ns, opts), &servingv1.ConfigurationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &servingv1.ConfigurationList{ListMeta: obj.(*servingv1.ConfigurationList).ListMeta} + for _, item := range obj.(*servingv1.ConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configurations. +func (c *FakeConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(configurationsResource, c.ns, opts)) + +} + +// Create takes the representation of a configuration and creates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *FakeConfigurations) Create(configuration *servingv1.Configuration) (result *servingv1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(configurationsResource, c.ns, configuration), &servingv1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Configuration), err +} + +// Update takes the representation of a configuration and updates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *FakeConfigurations) Update(configuration *servingv1.Configuration) (result *servingv1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(configurationsResource, c.ns, configuration), &servingv1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Configuration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeConfigurations) UpdateStatus(configuration *servingv1.Configuration) (*servingv1.Configuration, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(configurationsResource, "status", c.ns, configuration), &servingv1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Configuration), err +} + +// Delete takes name of the configuration and deletes it. Returns an error if one occurs. +func (c *FakeConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(configurationsResource, c.ns, name), &servingv1.Configuration{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(configurationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &servingv1.ConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched configuration. +func (c *FakeConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *servingv1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(configurationsResource, c.ns, name, pt, data, subresources...), &servingv1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Configuration), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_revision.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_revision.go new file mode 100644 index 0000000000..2d57ce7b5a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_revision.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// FakeRevisions implements RevisionInterface +type FakeRevisions struct { + Fake *FakeServingV1 + ns string +} + +var revisionsResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1", Resource: "revisions"} + +var revisionsKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1", Kind: "Revision"} + +// Get takes name of the revision, and returns the corresponding revision object, and an error if there is any. +func (c *FakeRevisions) Get(name string, options v1.GetOptions) (result *servingv1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(revisionsResource, c.ns, name), &servingv1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Revision), err +} + +// List takes label and field selectors, and returns the list of Revisions that match those selectors. +func (c *FakeRevisions) List(opts v1.ListOptions) (result *servingv1.RevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(revisionsResource, revisionsKind, c.ns, opts), &servingv1.RevisionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &servingv1.RevisionList{ListMeta: obj.(*servingv1.RevisionList).ListMeta} + for _, item := range obj.(*servingv1.RevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested revisions. +func (c *FakeRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(revisionsResource, c.ns, opts)) + +} + +// Create takes the representation of a revision and creates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *FakeRevisions) Create(revision *servingv1.Revision) (result *servingv1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(revisionsResource, c.ns, revision), &servingv1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Revision), err +} + +// Update takes the representation of a revision and updates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *FakeRevisions) Update(revision *servingv1.Revision) (result *servingv1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(revisionsResource, c.ns, revision), &servingv1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Revision), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRevisions) UpdateStatus(revision *servingv1.Revision) (*servingv1.Revision, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(revisionsResource, "status", c.ns, revision), &servingv1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Revision), err +} + +// Delete takes name of the revision and deletes it. Returns an error if one occurs. +func (c *FakeRevisions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(revisionsResource, c.ns, name), &servingv1.Revision{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(revisionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &servingv1.RevisionList{}) + return err +} + +// Patch applies the patch and returns the patched revision. +func (c *FakeRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *servingv1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(revisionsResource, c.ns, name, pt, data, subresources...), &servingv1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Revision), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_route.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_route.go new file mode 100644 index 0000000000..00692ab987 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_route.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// FakeRoutes implements RouteInterface +type FakeRoutes struct { + Fake *FakeServingV1 + ns string +} + +var routesResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1", Resource: "routes"} + +var routesKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1", Kind: "Route"} + +// Get takes name of the route, and returns the corresponding route object, and an error if there is any. +func (c *FakeRoutes) Get(name string, options v1.GetOptions) (result *servingv1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(routesResource, c.ns, name), &servingv1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Route), err +} + +// List takes label and field selectors, and returns the list of Routes that match those selectors. +func (c *FakeRoutes) List(opts v1.ListOptions) (result *servingv1.RouteList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(routesResource, routesKind, c.ns, opts), &servingv1.RouteList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &servingv1.RouteList{ListMeta: obj.(*servingv1.RouteList).ListMeta} + for _, item := range obj.(*servingv1.RouteList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *FakeRoutes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(routesResource, c.ns, opts)) + +} + +// Create takes the representation of a route and creates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Create(route *servingv1.Route) (result *servingv1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(routesResource, c.ns, route), &servingv1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Route), err +} + +// Update takes the representation of a route and updates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Update(route *servingv1.Route) (result *servingv1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(routesResource, c.ns, route), &servingv1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Route), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRoutes) UpdateStatus(route *servingv1.Route) (*servingv1.Route, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(routesResource, "status", c.ns, route), &servingv1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Route), err +} + +// Delete takes name of the route and deletes it. Returns an error if one occurs. +func (c *FakeRoutes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(routesResource, c.ns, name), &servingv1.Route{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoutes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(routesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &servingv1.RouteList{}) + return err +} + +// Patch applies the patch and returns the patched route. +func (c *FakeRoutes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *servingv1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(routesResource, c.ns, name, pt, data, subresources...), &servingv1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Route), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_service.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_service.go new file mode 100644 index 0000000000..f9f335e282 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_service.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeServingV1 + ns string +} + +var servicesResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1", Resource: "services"} + +var servicesKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1", Kind: "Service"} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *FakeServices) Get(name string, options v1.GetOptions) (result *servingv1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &servingv1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Service), err +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *FakeServices) List(opts v1.ListOptions) (result *servingv1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &servingv1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &servingv1.ServiceList{ListMeta: obj.(*servingv1.ServiceList).ListMeta} + for _, item := range obj.(*servingv1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Create(service *servingv1.Service) (result *servingv1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &servingv1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Service), err +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Update(service *servingv1.Service) (result *servingv1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &servingv1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Service), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServices) UpdateStatus(service *servingv1.Service) (*servingv1.Service, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &servingv1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Service), err +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &servingv1.Service{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &servingv1.ServiceList{}) + return err +} + +// Patch applies the patch and returns the patched service. +func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *servingv1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &servingv1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*servingv1.Service), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_serving_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_serving_client.go new file mode 100644 index 0000000000..617587c4c2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/fake/fake_serving_client.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1" +) + +type FakeServingV1 struct { + *testing.Fake +} + +func (c *FakeServingV1) Configurations(namespace string) v1.ConfigurationInterface { + return &FakeConfigurations{c, namespace} +} + +func (c *FakeServingV1) Revisions(namespace string) v1.RevisionInterface { + return &FakeRevisions{c, namespace} +} + +func (c *FakeServingV1) Routes(namespace string) v1.RouteInterface { + return &FakeRoutes{c, namespace} +} + +func (c *FakeServingV1) Services(namespace string) v1.ServiceInterface { + return &FakeServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeServingV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/generated_expansion.go new file mode 100644 index 0000000000..4c0460b13c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ConfigurationExpansion interface{} + +type RevisionExpansion interface{} + +type RouteExpansion interface{} + +type ServiceExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/revision.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/revision.go new file mode 100644 index 0000000000..e6d5120437 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/revision.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "knative.dev/serving/pkg/apis/serving/v1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" +) + +// RevisionsGetter has a method to return a RevisionInterface. +// A group's client should implement this interface. +type RevisionsGetter interface { + Revisions(namespace string) RevisionInterface +} + +// RevisionInterface has methods to work with Revision resources. +type RevisionInterface interface { + Create(*v1.Revision) (*v1.Revision, error) + Update(*v1.Revision) (*v1.Revision, error) + UpdateStatus(*v1.Revision) (*v1.Revision, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Revision, error) + List(opts metav1.ListOptions) (*v1.RevisionList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Revision, err error) + RevisionExpansion +} + +// revisions implements RevisionInterface +type revisions struct { + client rest.Interface + ns string +} + +// newRevisions returns a Revisions +func newRevisions(c *ServingV1Client, namespace string) *revisions { + return &revisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the revision, and returns the corresponding revision object, and an error if there is any. +func (c *revisions) Get(name string, options metav1.GetOptions) (result *v1.Revision, err error) { + result = &v1.Revision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("revisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Revisions that match those selectors. +func (c *revisions) List(opts metav1.ListOptions) (result *v1.RevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("revisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested revisions. +func (c *revisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("revisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a revision and creates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *revisions) Create(revision *v1.Revision) (result *v1.Revision, err error) { + result = &v1.Revision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("revisions"). + Body(revision). + Do(). + Into(result) + return +} + +// Update takes the representation of a revision and updates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *revisions) Update(revision *v1.Revision) (result *v1.Revision, err error) { + result = &v1.Revision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("revisions"). + Name(revision.Name). + Body(revision). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *revisions) UpdateStatus(revision *v1.Revision) (result *v1.Revision, err error) { + result = &v1.Revision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("revisions"). + Name(revision.Name). + SubResource("status"). + Body(revision). + Do(). + Into(result) + return +} + +// Delete takes name of the revision and deletes it. Returns an error if one occurs. +func (c *revisions) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("revisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *revisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("revisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched revision. +func (c *revisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Revision, err error) { + result = &v1.Revision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("revisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/route.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/route.go new file mode 100644 index 0000000000..b3105f3a95 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/route.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "knative.dev/serving/pkg/apis/serving/v1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" +) + +// RoutesGetter has a method to return a RouteInterface. +// A group's client should implement this interface. +type RoutesGetter interface { + Routes(namespace string) RouteInterface +} + +// RouteInterface has methods to work with Route resources. +type RouteInterface interface { + Create(*v1.Route) (*v1.Route, error) + Update(*v1.Route) (*v1.Route, error) + UpdateStatus(*v1.Route) (*v1.Route, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Route, error) + List(opts metav1.ListOptions) (*v1.RouteList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Route, err error) + RouteExpansion +} + +// routes implements RouteInterface +type routes struct { + client rest.Interface + ns string +} + +// newRoutes returns a Routes +func newRoutes(c *ServingV1Client, namespace string) *routes { + return &routes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the route, and returns the corresponding route object, and an error if there is any. +func (c *routes) Get(name string, options metav1.GetOptions) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Get(). + Namespace(c.ns). + Resource("routes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Routes that match those selectors. +func (c *routes) List(opts metav1.ListOptions) (result *v1.RouteList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RouteList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("routes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *routes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("routes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a route and creates it. Returns the server's representation of the route, and an error, if there is any. +func (c *routes) Create(route *v1.Route) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Post(). + Namespace(c.ns). + Resource("routes"). + Body(route). + Do(). + Into(result) + return +} + +// Update takes the representation of a route and updates it. Returns the server's representation of the route, and an error, if there is any. +func (c *routes) Update(route *v1.Route) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Put(). + Namespace(c.ns). + Resource("routes"). + Name(route.Name). + Body(route). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *routes) UpdateStatus(route *v1.Route) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Put(). + Namespace(c.ns). + Resource("routes"). + Name(route.Name). + SubResource("status"). + Body(route). + Do(). + Into(result) + return +} + +// Delete takes name of the route and deletes it. Returns an error if one occurs. +func (c *routes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("routes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *routes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("routes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched route. +func (c *routes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("routes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/service.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/service.go new file mode 100644 index 0000000000..a2148ae012 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/service.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "knative.dev/serving/pkg/apis/serving/v1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Service, error) + List(opts metav1.ListOptions) (*v1.ServiceList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client rest.Interface + ns string +} + +// newServices returns a Services +func newServices(c *ServingV1Client, namespace string) *services { + return &services{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched service. +func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("services"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/serving_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/serving_client.go new file mode 100644 index 0000000000..25925ba2d0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1/serving_client.go @@ -0,0 +1,104 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/client/clientset/versioned/scheme" +) + +type ServingV1Interface interface { + RESTClient() rest.Interface + ConfigurationsGetter + RevisionsGetter + RoutesGetter + ServicesGetter +} + +// ServingV1Client is used to interact with features provided by the serving.knative.dev group. +type ServingV1Client struct { + restClient rest.Interface +} + +func (c *ServingV1Client) Configurations(namespace string) ConfigurationInterface { + return newConfigurations(c, namespace) +} + +func (c *ServingV1Client) Revisions(namespace string) RevisionInterface { + return newRevisions(c, namespace) +} + +func (c *ServingV1Client) Routes(namespace string) RouteInterface { + return newRoutes(c, namespace) +} + +func (c *ServingV1Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +// NewForConfig creates a new ServingV1Client for the given config. +func NewForConfig(c *rest.Config) (*ServingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ServingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ServingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ServingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ServingV1Client for the given RESTClient. +func New(c rest.Interface) *ServingV1Client { + return &ServingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ServingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go index 17c02afdc8..1d583fdbaf 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/configuration.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // ConfigurationsGetter has a method to return a ConfigurationInterface. @@ -76,11 +78,16 @@ func (c *configurations) Get(name string, options v1.GetOptions) (result *v1alph // List takes label and field selectors, and returns the list of Configurations that match those selectors. func (c *configurations) List(opts v1.ListOptions) (result *v1alpha1.ConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ConfigurationList{} err = c.client.Get(). Namespace(c.ns). Resource("configurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *configurations) List(opts v1.ListOptions) (result *v1alpha1.Configurati // Watch returns a watch.Interface that watches the requested configurations. func (c *configurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *configurations) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("configurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/doc.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/doc.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/doc.go index a1c6bb9fe8..41e872fe9a 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1/doc.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_configuration.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_configuration.go new file mode 100644 index 0000000000..670989b3f5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_configuration.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// FakeConfigurations implements ConfigurationInterface +type FakeConfigurations struct { + Fake *FakeServingV1alpha1 + ns string +} + +var configurationsResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1alpha1", Resource: "configurations"} + +var configurationsKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1alpha1", Kind: "Configuration"} + +// Get takes name of the configuration, and returns the corresponding configuration object, and an error if there is any. +func (c *FakeConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(configurationsResource, c.ns, name), &v1alpha1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Configuration), err +} + +// List takes label and field selectors, and returns the list of Configurations that match those selectors. +func (c *FakeConfigurations) List(opts v1.ListOptions) (result *v1alpha1.ConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(configurationsResource, configurationsKind, c.ns, opts), &v1alpha1.ConfigurationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ConfigurationList{ListMeta: obj.(*v1alpha1.ConfigurationList).ListMeta} + for _, item := range obj.(*v1alpha1.ConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configurations. +func (c *FakeConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(configurationsResource, c.ns, opts)) + +} + +// Create takes the representation of a configuration and creates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *FakeConfigurations) Create(configuration *v1alpha1.Configuration) (result *v1alpha1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(configurationsResource, c.ns, configuration), &v1alpha1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Configuration), err +} + +// Update takes the representation of a configuration and updates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *FakeConfigurations) Update(configuration *v1alpha1.Configuration) (result *v1alpha1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(configurationsResource, c.ns, configuration), &v1alpha1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Configuration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeConfigurations) UpdateStatus(configuration *v1alpha1.Configuration) (*v1alpha1.Configuration, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(configurationsResource, "status", c.ns, configuration), &v1alpha1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Configuration), err +} + +// Delete takes name of the configuration and deletes it. Returns an error if one occurs. +func (c *FakeConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(configurationsResource, c.ns, name), &v1alpha1.Configuration{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(configurationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched configuration. +func (c *FakeConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(configurationsResource, c.ns, name, pt, data, subresources...), &v1alpha1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Configuration), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_revision.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_revision.go new file mode 100644 index 0000000000..113ab1aab9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_revision.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// FakeRevisions implements RevisionInterface +type FakeRevisions struct { + Fake *FakeServingV1alpha1 + ns string +} + +var revisionsResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1alpha1", Resource: "revisions"} + +var revisionsKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1alpha1", Kind: "Revision"} + +// Get takes name of the revision, and returns the corresponding revision object, and an error if there is any. +func (c *FakeRevisions) Get(name string, options v1.GetOptions) (result *v1alpha1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(revisionsResource, c.ns, name), &v1alpha1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Revision), err +} + +// List takes label and field selectors, and returns the list of Revisions that match those selectors. +func (c *FakeRevisions) List(opts v1.ListOptions) (result *v1alpha1.RevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(revisionsResource, revisionsKind, c.ns, opts), &v1alpha1.RevisionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RevisionList{ListMeta: obj.(*v1alpha1.RevisionList).ListMeta} + for _, item := range obj.(*v1alpha1.RevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested revisions. +func (c *FakeRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(revisionsResource, c.ns, opts)) + +} + +// Create takes the representation of a revision and creates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *FakeRevisions) Create(revision *v1alpha1.Revision) (result *v1alpha1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(revisionsResource, c.ns, revision), &v1alpha1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Revision), err +} + +// Update takes the representation of a revision and updates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *FakeRevisions) Update(revision *v1alpha1.Revision) (result *v1alpha1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(revisionsResource, c.ns, revision), &v1alpha1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Revision), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRevisions) UpdateStatus(revision *v1alpha1.Revision) (*v1alpha1.Revision, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(revisionsResource, "status", c.ns, revision), &v1alpha1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Revision), err +} + +// Delete takes name of the revision and deletes it. Returns an error if one occurs. +func (c *FakeRevisions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(revisionsResource, c.ns, name), &v1alpha1.Revision{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(revisionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RevisionList{}) + return err +} + +// Patch applies the patch and returns the patched revision. +func (c *FakeRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(revisionsResource, c.ns, name, pt, data, subresources...), &v1alpha1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Revision), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_route.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_route.go new file mode 100644 index 0000000000..a972e90cf4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_route.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// FakeRoutes implements RouteInterface +type FakeRoutes struct { + Fake *FakeServingV1alpha1 + ns string +} + +var routesResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1alpha1", Resource: "routes"} + +var routesKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1alpha1", Kind: "Route"} + +// Get takes name of the route, and returns the corresponding route object, and an error if there is any. +func (c *FakeRoutes) Get(name string, options v1.GetOptions) (result *v1alpha1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(routesResource, c.ns, name), &v1alpha1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Route), err +} + +// List takes label and field selectors, and returns the list of Routes that match those selectors. +func (c *FakeRoutes) List(opts v1.ListOptions) (result *v1alpha1.RouteList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(routesResource, routesKind, c.ns, opts), &v1alpha1.RouteList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RouteList{ListMeta: obj.(*v1alpha1.RouteList).ListMeta} + for _, item := range obj.(*v1alpha1.RouteList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *FakeRoutes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(routesResource, c.ns, opts)) + +} + +// Create takes the representation of a route and creates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Create(route *v1alpha1.Route) (result *v1alpha1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(routesResource, c.ns, route), &v1alpha1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Route), err +} + +// Update takes the representation of a route and updates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Update(route *v1alpha1.Route) (result *v1alpha1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(routesResource, c.ns, route), &v1alpha1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Route), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRoutes) UpdateStatus(route *v1alpha1.Route) (*v1alpha1.Route, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(routesResource, "status", c.ns, route), &v1alpha1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Route), err +} + +// Delete takes name of the route and deletes it. Returns an error if one occurs. +func (c *FakeRoutes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(routesResource, c.ns, name), &v1alpha1.Route{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoutes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(routesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RouteList{}) + return err +} + +// Patch applies the patch and returns the patched route. +func (c *FakeRoutes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(routesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Route), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_service.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_service.go new file mode 100644 index 0000000000..c319e904d8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_service.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeServingV1alpha1 + ns string +} + +var servicesResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1alpha1", Resource: "services"} + +var servicesKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1alpha1", Kind: "Service"} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *FakeServices) Get(name string, options v1.GetOptions) (result *v1alpha1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1alpha1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Service), err +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *FakeServices) List(opts v1.ListOptions) (result *v1alpha1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1alpha1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ServiceList{ListMeta: obj.(*v1alpha1.ServiceList).ListMeta} + for _, item := range obj.(*v1alpha1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Create(service *v1alpha1.Service) (result *v1alpha1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1alpha1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Service), err +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Update(service *v1alpha1.Service) (result *v1alpha1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1alpha1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Service), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServices) UpdateStatus(service *v1alpha1.Service) (*v1alpha1.Service, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1alpha1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Service), err +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &v1alpha1.Service{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ServiceList{}) + return err +} + +// Patch applies the patch and returns the patched service. +func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Service), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_serving_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_serving_client.go new file mode 100644 index 0000000000..dfe37782ed --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/fake/fake_serving_client.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1" +) + +type FakeServingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeServingV1alpha1) Configurations(namespace string) v1alpha1.ConfigurationInterface { + return &FakeConfigurations{c, namespace} +} + +func (c *FakeServingV1alpha1) Revisions(namespace string) v1alpha1.RevisionInterface { + return &FakeRevisions{c, namespace} +} + +func (c *FakeServingV1alpha1) Routes(namespace string) v1alpha1.RouteInterface { + return &FakeRoutes{c, namespace} +} + +func (c *FakeServingV1alpha1) Services(namespace string) v1alpha1.ServiceInterface { + return &FakeServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeServingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go index 6ce17decf7..1e9bae5365 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go index 4299306610..efac4f465a 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/revision.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // RevisionsGetter has a method to return a RevisionInterface. @@ -76,11 +78,16 @@ func (c *revisions) Get(name string, options v1.GetOptions) (result *v1alpha1.Re // List takes label and field selectors, and returns the list of Revisions that match those selectors. func (c *revisions) List(opts v1.ListOptions) (result *v1alpha1.RevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("revisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *revisions) List(opts v1.ListOptions) (result *v1alpha1.RevisionList, er // Watch returns a watch.Interface that watches the requested revisions. func (c *revisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("revisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *revisions) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *revisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("revisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go index 320477113f..82e9d2373b 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/route.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // RoutesGetter has a method to return a RouteInterface. @@ -76,11 +78,16 @@ func (c *routes) Get(name string, options v1.GetOptions) (result *v1alpha1.Route // List takes label and field selectors, and returns the list of Routes that match those selectors. func (c *routes) List(opts v1.ListOptions) (result *v1alpha1.RouteList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.RouteList{} err = c.client.Get(). Namespace(c.ns). Resource("routes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *routes) List(opts v1.ListOptions) (result *v1alpha1.RouteList, err erro // Watch returns a watch.Interface that watches the requested routes. func (c *routes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("routes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *routes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *routes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("routes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go index 1309f2fb37..a5b669ab38 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/service.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // ServicesGetter has a method to return a ServiceInterface. @@ -76,11 +78,16 @@ func (c *services) Get(name string, options v1.GetOptions) (result *v1alpha1.Ser // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts v1.ListOptions) (result *v1alpha1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1alpha1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *services) List(opts v1.ListOptions) (result *v1alpha1.ServiceList, err // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *services) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *services) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("services"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go index 0a75f4a864..e6ade911be 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1/serving_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" - "github.com/knative/serving/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) type ServingV1alpha1Interface interface { @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go similarity index 90% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go index d065962d65..abc50b8a1b 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/configuration.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // ConfigurationsGetter has a method to return a ConfigurationInterface. @@ -76,11 +78,16 @@ func (c *configurations) Get(name string, options v1.GetOptions) (result *v1beta // List takes label and field selectors, and returns the list of Configurations that match those selectors. func (c *configurations) List(opts v1.ListOptions) (result *v1beta1.ConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ConfigurationList{} err = c.client.Get(). Namespace(c.ns). Resource("configurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *configurations) List(opts v1.ListOptions) (result *v1beta1.Configuratio // Watch returns a watch.Interface that watches the requested configurations. func (c *configurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configurations"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *configurations) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("configurations"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go similarity index 94% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go index ede2d4a287..b641cb3730 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_configuration.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_configuration.go new file mode 100644 index 0000000000..c0aa1d2510 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_configuration.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// FakeConfigurations implements ConfigurationInterface +type FakeConfigurations struct { + Fake *FakeServingV1beta1 + ns string +} + +var configurationsResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1beta1", Resource: "configurations"} + +var configurationsKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1beta1", Kind: "Configuration"} + +// Get takes name of the configuration, and returns the corresponding configuration object, and an error if there is any. +func (c *FakeConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(configurationsResource, c.ns, name), &v1beta1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Configuration), err +} + +// List takes label and field selectors, and returns the list of Configurations that match those selectors. +func (c *FakeConfigurations) List(opts v1.ListOptions) (result *v1beta1.ConfigurationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(configurationsResource, configurationsKind, c.ns, opts), &v1beta1.ConfigurationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ConfigurationList{ListMeta: obj.(*v1beta1.ConfigurationList).ListMeta} + for _, item := range obj.(*v1beta1.ConfigurationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configurations. +func (c *FakeConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(configurationsResource, c.ns, opts)) + +} + +// Create takes the representation of a configuration and creates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *FakeConfigurations) Create(configuration *v1beta1.Configuration) (result *v1beta1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(configurationsResource, c.ns, configuration), &v1beta1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Configuration), err +} + +// Update takes the representation of a configuration and updates it. Returns the server's representation of the configuration, and an error, if there is any. +func (c *FakeConfigurations) Update(configuration *v1beta1.Configuration) (result *v1beta1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(configurationsResource, c.ns, configuration), &v1beta1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Configuration), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeConfigurations) UpdateStatus(configuration *v1beta1.Configuration) (*v1beta1.Configuration, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(configurationsResource, "status", c.ns, configuration), &v1beta1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Configuration), err +} + +// Delete takes name of the configuration and deletes it. Returns an error if one occurs. +func (c *FakeConfigurations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(configurationsResource, c.ns, name), &v1beta1.Configuration{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(configurationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ConfigurationList{}) + return err +} + +// Patch applies the patch and returns the patched configuration. +func (c *FakeConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Configuration, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(configurationsResource, c.ns, name, pt, data, subresources...), &v1beta1.Configuration{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Configuration), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_revision.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_revision.go new file mode 100644 index 0000000000..4025c07594 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_revision.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// FakeRevisions implements RevisionInterface +type FakeRevisions struct { + Fake *FakeServingV1beta1 + ns string +} + +var revisionsResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1beta1", Resource: "revisions"} + +var revisionsKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1beta1", Kind: "Revision"} + +// Get takes name of the revision, and returns the corresponding revision object, and an error if there is any. +func (c *FakeRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(revisionsResource, c.ns, name), &v1beta1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Revision), err +} + +// List takes label and field selectors, and returns the list of Revisions that match those selectors. +func (c *FakeRevisions) List(opts v1.ListOptions) (result *v1beta1.RevisionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(revisionsResource, revisionsKind, c.ns, opts), &v1beta1.RevisionList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.RevisionList{ListMeta: obj.(*v1beta1.RevisionList).ListMeta} + for _, item := range obj.(*v1beta1.RevisionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested revisions. +func (c *FakeRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(revisionsResource, c.ns, opts)) + +} + +// Create takes the representation of a revision and creates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *FakeRevisions) Create(revision *v1beta1.Revision) (result *v1beta1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(revisionsResource, c.ns, revision), &v1beta1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Revision), err +} + +// Update takes the representation of a revision and updates it. Returns the server's representation of the revision, and an error, if there is any. +func (c *FakeRevisions) Update(revision *v1beta1.Revision) (result *v1beta1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(revisionsResource, c.ns, revision), &v1beta1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Revision), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRevisions) UpdateStatus(revision *v1beta1.Revision) (*v1beta1.Revision, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(revisionsResource, "status", c.ns, revision), &v1beta1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Revision), err +} + +// Delete takes name of the revision and deletes it. Returns an error if one occurs. +func (c *FakeRevisions) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(revisionsResource, c.ns, name), &v1beta1.Revision{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(revisionsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RevisionList{}) + return err +} + +// Patch applies the patch and returns the patched revision. +func (c *FakeRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Revision, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(revisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.Revision{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Revision), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_route.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_route.go new file mode 100644 index 0000000000..4bb9dc913d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_route.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// FakeRoutes implements RouteInterface +type FakeRoutes struct { + Fake *FakeServingV1beta1 + ns string +} + +var routesResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1beta1", Resource: "routes"} + +var routesKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1beta1", Kind: "Route"} + +// Get takes name of the route, and returns the corresponding route object, and an error if there is any. +func (c *FakeRoutes) Get(name string, options v1.GetOptions) (result *v1beta1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(routesResource, c.ns, name), &v1beta1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Route), err +} + +// List takes label and field selectors, and returns the list of Routes that match those selectors. +func (c *FakeRoutes) List(opts v1.ListOptions) (result *v1beta1.RouteList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(routesResource, routesKind, c.ns, opts), &v1beta1.RouteList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.RouteList{ListMeta: obj.(*v1beta1.RouteList).ListMeta} + for _, item := range obj.(*v1beta1.RouteList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *FakeRoutes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(routesResource, c.ns, opts)) + +} + +// Create takes the representation of a route and creates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Create(route *v1beta1.Route) (result *v1beta1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(routesResource, c.ns, route), &v1beta1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Route), err +} + +// Update takes the representation of a route and updates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Update(route *v1beta1.Route) (result *v1beta1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(routesResource, c.ns, route), &v1beta1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Route), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRoutes) UpdateStatus(route *v1beta1.Route) (*v1beta1.Route, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(routesResource, "status", c.ns, route), &v1beta1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Route), err +} + +// Delete takes name of the route and deletes it. Returns an error if one occurs. +func (c *FakeRoutes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(routesResource, c.ns, name), &v1beta1.Route{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoutes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(routesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.RouteList{}) + return err +} + +// Patch applies the patch and returns the patched route. +func (c *FakeRoutes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Route, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(routesResource, c.ns, name, pt, data, subresources...), &v1beta1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Route), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_service.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_service.go new file mode 100644 index 0000000000..593f70c17a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_service.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeServingV1beta1 + ns string +} + +var servicesResource = schema.GroupVersionResource{Group: "serving.knative.dev", Version: "v1beta1", Resource: "services"} + +var servicesKind = schema.GroupVersionKind{Group: "serving.knative.dev", Version: "v1beta1", Kind: "Service"} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *FakeServices) Get(name string, options v1.GetOptions) (result *v1beta1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1beta1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Service), err +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *FakeServices) List(opts v1.ListOptions) (result *v1beta1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1beta1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ServiceList{ListMeta: obj.(*v1beta1.ServiceList).ListMeta} + for _, item := range obj.(*v1beta1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) + +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Create(service *v1beta1.Service) (result *v1beta1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1beta1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Service), err +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *FakeServices) Update(service *v1beta1.Service) (result *v1beta1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1beta1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Service), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServices) UpdateStatus(service *v1beta1.Service) (*v1beta1.Service, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1beta1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Service), err +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &v1beta1.Service{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ServiceList{}) + return err +} + +// Patch applies the patch and returns the patched service. +func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Service, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1beta1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Service), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_serving_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_serving_client.go new file mode 100644 index 0000000000..74b9670397 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/fake/fake_serving_client.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1beta1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1" +) + +type FakeServingV1beta1 struct { + *testing.Fake +} + +func (c *FakeServingV1beta1) Configurations(namespace string) v1beta1.ConfigurationInterface { + return &FakeConfigurations{c, namespace} +} + +func (c *FakeServingV1beta1) Revisions(namespace string) v1beta1.RevisionInterface { + return &FakeRevisions{c, namespace} +} + +func (c *FakeServingV1beta1) Routes(namespace string) v1beta1.RouteInterface { + return &FakeRoutes{c, namespace} +} + +func (c *FakeServingV1beta1) Services(namespace string) v1beta1.ServiceInterface { + return &FakeServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeServingV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go similarity index 95% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go index 58d005da44..b7a7ae310c 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go index b76df83b7c..a8ca29f396 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/revision.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // RevisionsGetter has a method to return a RevisionInterface. @@ -76,11 +78,16 @@ func (c *revisions) Get(name string, options v1.GetOptions) (result *v1beta1.Rev // List takes label and field selectors, and returns the list of Revisions that match those selectors. func (c *revisions) List(opts v1.ListOptions) (result *v1beta1.RevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("revisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *revisions) List(opts v1.ListOptions) (result *v1beta1.RevisionList, err // Watch returns a watch.Interface that watches the requested revisions. func (c *revisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("revisions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *revisions) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *revisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("revisions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go index f278511fab..b893abcf27 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/route.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // RoutesGetter has a method to return a RouteInterface. @@ -76,11 +78,16 @@ func (c *routes) Get(name string, options v1.GetOptions) (result *v1beta1.Route, // List takes label and field selectors, and returns the list of Routes that match those selectors. func (c *routes) List(opts v1.ListOptions) (result *v1beta1.RouteList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.RouteList{} err = c.client.Get(). Namespace(c.ns). Resource("routes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *routes) List(opts v1.ListOptions) (result *v1beta1.RouteList, err error // Watch returns a watch.Interface that watches the requested routes. func (c *routes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("routes"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *routes) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *routes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("routes"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go index 5fe1555b3d..12985f2c77 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/service.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,14 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" - scheme "github.com/knative/serving/pkg/client/clientset/versioned/scheme" + "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + scheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) // ServicesGetter has a method to return a ServiceInterface. @@ -76,11 +78,16 @@ func (c *services) Get(name string, options v1.GetOptions) (result *v1beta1.Serv // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts v1.ListOptions) (result *v1beta1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -88,11 +95,16 @@ func (c *services) List(opts v1.ListOptions) (result *v1beta1.ServiceList, err e // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -150,10 +162,15 @@ func (c *services) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *services) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Namespace(c.ns). Resource("services"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go similarity index 89% rename from test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go rename to test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go index 20d6cbf084..235b173914 100644 --- a/test/vendor/github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go +++ b/test/vendor/knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1/serving_client.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/knative/serving/pkg/apis/serving/v1beta1" - "github.com/knative/serving/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/pkg/client/clientset/versioned/scheme" ) type ServingV1beta1Interface interface { @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/interface.go new file mode 100644 index 0000000000..a21a2a0bde --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package autoscaling + +import ( + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/interface.go new file mode 100644 index 0000000000..fa410aa8d3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Metrics returns a MetricInformer. + Metrics() MetricInformer + // PodAutoscalers returns a PodAutoscalerInformer. + PodAutoscalers() PodAutoscalerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Metrics returns a MetricInformer. +func (v *version) Metrics() MetricInformer { + return &metricInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodAutoscalers returns a PodAutoscalerInformer. +func (v *version) PodAutoscalers() PodAutoscalerInformer { + return &podAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/metric.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/metric.go new file mode 100644 index 0000000000..5a5a8f4515 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/metric.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1" +) + +// MetricInformer provides access to a shared informer and lister for +// Metrics. +type MetricInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.MetricLister +} + +type metricInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewMetricInformer constructs a new informer for Metric type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMetricInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMetricInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredMetricInformer constructs a new informer for Metric type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMetricInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1alpha1().Metrics(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1alpha1().Metrics(namespace).Watch(options) + }, + }, + &autoscalingv1alpha1.Metric{}, + resyncPeriod, + indexers, + ) +} + +func (f *metricInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMetricInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *metricInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscalingv1alpha1.Metric{}, f.defaultInformer) +} + +func (f *metricInformer) Lister() v1alpha1.MetricLister { + return v1alpha1.NewMetricLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/podautoscaler.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/podautoscaler.go new file mode 100644 index 0000000000..eb872c4f55 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1/podautoscaler.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1" +) + +// PodAutoscalerInformer provides access to a shared informer and lister for +// PodAutoscalers. +type PodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PodAutoscalerLister +} + +type podAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodAutoscalerInformer constructs a new informer for PodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodAutoscalerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodAutoscalerInformer constructs a new informer for PodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodAutoscalerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1alpha1().PodAutoscalers(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1alpha1().PodAutoscalers(namespace).Watch(options) + }, + }, + &autoscalingv1alpha1.PodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *podAutoscalerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscalingv1alpha1.PodAutoscaler{}, f.defaultInformer) +} + +func (f *podAutoscalerInformer) Lister() v1alpha1.PodAutoscalerLister { + return v1alpha1.NewPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/factory.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/factory.go new file mode 100644 index 0000000000..1d42b4c3e7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,192 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + autoscaling "knative.dev/serving/pkg/client/informers/externalversions/autoscaling" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + networking "knative.dev/serving/pkg/client/informers/externalversions/networking" + serving "knative.dev/serving/pkg/client/informers/externalversions/serving" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Autoscaling() autoscaling.Interface + Networking() networking.Interface + Serving() serving.Interface +} + +func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { + return autoscaling.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Networking() networking.Interface { + return networking.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Serving() serving.Interface { + return serving.New(f, f.namespace, f.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/generic.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/generic.go new file mode 100644 index 0000000000..00313b24f5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=autoscaling.internal.knative.dev, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("metrics"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1alpha1().Metrics().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("podautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1alpha1().PodAutoscalers().Informer()}, nil + + // Group=networking.internal.knative.dev, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithResource("certificates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().Certificates().Informer()}, nil + case networkingv1alpha1.SchemeGroupVersion.WithResource("ingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().Ingresses().Informer()}, nil + case networkingv1alpha1.SchemeGroupVersion.WithResource("serverlessservices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServerlessServices().Informer()}, nil + + // Group=serving.knative.dev, Version=v1 + case v1.SchemeGroupVersion.WithResource("configurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1().Configurations().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("revisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1().Revisions().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("routes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1().Routes().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("services"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1().Services().Informer()}, nil + + // Group=serving.knative.dev, Version=v1alpha1 + case servingv1alpha1.SchemeGroupVersion.WithResource("configurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1alpha1().Configurations().Informer()}, nil + case servingv1alpha1.SchemeGroupVersion.WithResource("revisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1alpha1().Revisions().Informer()}, nil + case servingv1alpha1.SchemeGroupVersion.WithResource("routes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1alpha1().Routes().Informer()}, nil + case servingv1alpha1.SchemeGroupVersion.WithResource("services"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1alpha1().Services().Informer()}, nil + + // Group=serving.knative.dev, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("configurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1beta1().Configurations().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("revisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1beta1().Revisions().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("routes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1beta1().Routes().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("services"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Serving().V1beta1().Services().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..94e406ae77 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/interface.go new file mode 100644 index 0000000000..1a234e1c13 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package networking + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/certificate.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/certificate.go new file mode 100644 index 0000000000..b53730e218 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/certificate.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/networking/v1alpha1" +) + +// CertificateInformer provides access to a shared informer and lister for +// Certificates. +type CertificateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.CertificateLister +} + +type certificateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateInformer constructs a new informer for Certificate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateInformer constructs a new informer for Certificate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().Certificates(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().Certificates(namespace).Watch(options) + }, + }, + &networkingv1alpha1.Certificate{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.Certificate{}, f.defaultInformer) +} + +func (f *certificateInformer) Lister() v1alpha1.CertificateLister { + return v1alpha1.NewCertificateLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/ingress.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/ingress.go new file mode 100644 index 0000000000..a43875dfb5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/ingress.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/networking/v1alpha1" +) + +// IngressInformer provides access to a shared informer and lister for +// Ingresses. +type IngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.IngressLister +} + +type ingressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().Ingresses(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().Ingresses(namespace).Watch(options) + }, + }, + &networkingv1alpha1.Ingress{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.Ingress{}, f.defaultInformer) +} + +func (f *ingressInformer) Lister() v1alpha1.IngressLister { + return v1alpha1.NewIngressLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/interface.go new file mode 100644 index 0000000000..55d1bf1b57 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Certificates returns a CertificateInformer. + Certificates() CertificateInformer + // Ingresses returns a IngressInformer. + Ingresses() IngressInformer + // ServerlessServices returns a ServerlessServiceInformer. + ServerlessServices() ServerlessServiceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Certificates returns a CertificateInformer. +func (v *version) Certificates() CertificateInformer { + return &certificateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Ingresses returns a IngressInformer. +func (v *version) Ingresses() IngressInformer { + return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServerlessServices returns a ServerlessServiceInformer. +func (v *version) ServerlessServices() ServerlessServiceInformer { + return &serverlessServiceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/serverlessservice.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/serverlessservice.go new file mode 100644 index 0000000000..e346aca70b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1/serverlessservice.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/networking/v1alpha1" +) + +// ServerlessServiceInformer provides access to a shared informer and lister for +// ServerlessServices. +type ServerlessServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ServerlessServiceLister +} + +type serverlessServiceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServerlessServiceInformer constructs a new informer for ServerlessService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServerlessServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServerlessServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServerlessServiceInformer constructs a new informer for ServerlessService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServerlessServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().ServerlessServices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().ServerlessServices(namespace).Watch(options) + }, + }, + &networkingv1alpha1.ServerlessService{}, + resyncPeriod, + indexers, + ) +} + +func (f *serverlessServiceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServerlessServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serverlessServiceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.ServerlessService{}, f.defaultInformer) +} + +func (f *serverlessServiceInformer) Lister() v1alpha1.ServerlessServiceLister { + return v1alpha1.NewServerlessServiceLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/interface.go new file mode 100644 index 0000000000..be69bb69dd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package serving + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1" + v1beta1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/configuration.go new file mode 100644 index 0000000000..6e5b5e5e32 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/configuration.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/serving/pkg/client/listers/serving/v1" +) + +// ConfigurationInformer provides access to a shared informer and lister for +// Configurations. +type ConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ConfigurationLister +} + +type configurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConfigurationInformer constructs a new informer for Configuration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigurationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigurationInformer constructs a new informer for Configuration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Configurations(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Configurations(namespace).Watch(options) + }, + }, + &servingv1.Configuration{}, + resyncPeriod, + indexers, + ) +} + +func (f *configurationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigurationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *configurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1.Configuration{}, f.defaultInformer) +} + +func (f *configurationInformer) Lister() v1.ConfigurationLister { + return v1.NewConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/interface.go new file mode 100644 index 0000000000..d1a44d3186 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Configurations returns a ConfigurationInformer. + Configurations() ConfigurationInformer + // Revisions returns a RevisionInformer. + Revisions() RevisionInformer + // Routes returns a RouteInformer. + Routes() RouteInformer + // Services returns a ServiceInformer. + Services() ServiceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Configurations returns a ConfigurationInformer. +func (v *version) Configurations() ConfigurationInformer { + return &configurationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Revisions returns a RevisionInformer. +func (v *version) Revisions() RevisionInformer { + return &revisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Routes returns a RouteInformer. +func (v *version) Routes() RouteInformer { + return &routeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Services returns a ServiceInformer. +func (v *version) Services() ServiceInformer { + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/revision.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/revision.go new file mode 100644 index 0000000000..759a009327 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/revision.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/serving/pkg/client/listers/serving/v1" +) + +// RevisionInformer provides access to a shared informer and lister for +// Revisions. +type RevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RevisionLister +} + +type revisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRevisionInformer constructs a new informer for Revision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRevisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRevisionInformer constructs a new informer for Revision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRevisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Revisions(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Revisions(namespace).Watch(options) + }, + }, + &servingv1.Revision{}, + resyncPeriod, + indexers, + ) +} + +func (f *revisionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *revisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1.Revision{}, f.defaultInformer) +} + +func (f *revisionInformer) Lister() v1.RevisionLister { + return v1.NewRevisionLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/route.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/route.go new file mode 100644 index 0000000000..dd94268445 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/route.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/serving/pkg/client/listers/serving/v1" +) + +// RouteInformer provides access to a shared informer and lister for +// Routes. +type RouteInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RouteLister +} + +type routeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Routes(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Routes(namespace).Watch(options) + }, + }, + &servingv1.Route{}, + resyncPeriod, + indexers, + ) +} + +func (f *routeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *routeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1.Route{}, f.defaultInformer) +} + +func (f *routeInformer) Lister() v1.RouteLister { + return v1.NewRouteLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/service.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/service.go new file mode 100644 index 0000000000..2f17a4fc92 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1/service.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/serving/pkg/client/listers/serving/v1" +) + +// ServiceInformer provides access to a shared informer and lister for +// Services. +type ServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceLister +} + +type serviceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Services(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1().Services(namespace).Watch(options) + }, + }, + &servingv1.Service{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1.Service{}, f.defaultInformer) +} + +func (f *serviceInformer) Lister() v1.ServiceLister { + return v1.NewServiceLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/configuration.go new file mode 100644 index 0000000000..a7f4b47e25 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/configuration.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +// ConfigurationInformer provides access to a shared informer and lister for +// Configurations. +type ConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ConfigurationLister +} + +type configurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConfigurationInformer constructs a new informer for Configuration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigurationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigurationInformer constructs a new informer for Configuration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Configurations(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Configurations(namespace).Watch(options) + }, + }, + &servingv1alpha1.Configuration{}, + resyncPeriod, + indexers, + ) +} + +func (f *configurationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigurationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *configurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1alpha1.Configuration{}, f.defaultInformer) +} + +func (f *configurationInformer) Lister() v1alpha1.ConfigurationLister { + return v1alpha1.NewConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/interface.go new file mode 100644 index 0000000000..7b49153ba8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Configurations returns a ConfigurationInformer. + Configurations() ConfigurationInformer + // Revisions returns a RevisionInformer. + Revisions() RevisionInformer + // Routes returns a RouteInformer. + Routes() RouteInformer + // Services returns a ServiceInformer. + Services() ServiceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Configurations returns a ConfigurationInformer. +func (v *version) Configurations() ConfigurationInformer { + return &configurationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Revisions returns a RevisionInformer. +func (v *version) Revisions() RevisionInformer { + return &revisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Routes returns a RouteInformer. +func (v *version) Routes() RouteInformer { + return &routeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Services returns a ServiceInformer. +func (v *version) Services() ServiceInformer { + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/revision.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/revision.go new file mode 100644 index 0000000000..3fccf1d3b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/revision.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +// RevisionInformer provides access to a shared informer and lister for +// Revisions. +type RevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RevisionLister +} + +type revisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRevisionInformer constructs a new informer for Revision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRevisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRevisionInformer constructs a new informer for Revision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRevisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Revisions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Revisions(namespace).Watch(options) + }, + }, + &servingv1alpha1.Revision{}, + resyncPeriod, + indexers, + ) +} + +func (f *revisionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *revisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1alpha1.Revision{}, f.defaultInformer) +} + +func (f *revisionInformer) Lister() v1alpha1.RevisionLister { + return v1alpha1.NewRevisionLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/route.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/route.go new file mode 100644 index 0000000000..79db675ef0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/route.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +// RouteInformer provides access to a shared informer and lister for +// Routes. +type RouteInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RouteLister +} + +type routeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Routes(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Routes(namespace).Watch(options) + }, + }, + &servingv1alpha1.Route{}, + resyncPeriod, + indexers, + ) +} + +func (f *routeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *routeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1alpha1.Route{}, f.defaultInformer) +} + +func (f *routeInformer) Lister() v1alpha1.RouteLister { + return v1alpha1.NewRouteLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/service.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/service.go new file mode 100644 index 0000000000..1b141a6f3f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1/service.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +// ServiceInformer provides access to a shared informer and lister for +// Services. +type ServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ServiceLister +} + +type serviceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Services(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1alpha1().Services(namespace).Watch(options) + }, + }, + &servingv1alpha1.Service{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1alpha1.Service{}, f.defaultInformer) +} + +func (f *serviceInformer) Lister() v1alpha1.ServiceLister { + return v1alpha1.NewServiceLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/configuration.go new file mode 100644 index 0000000000..338b8d014c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/configuration.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "knative.dev/serving/pkg/client/listers/serving/v1beta1" +) + +// ConfigurationInformer provides access to a shared informer and lister for +// Configurations. +type ConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ConfigurationLister +} + +type configurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConfigurationInformer constructs a new informer for Configuration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigurationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigurationInformer constructs a new informer for Configuration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Configurations(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Configurations(namespace).Watch(options) + }, + }, + &servingv1beta1.Configuration{}, + resyncPeriod, + indexers, + ) +} + +func (f *configurationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigurationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *configurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1beta1.Configuration{}, f.defaultInformer) +} + +func (f *configurationInformer) Lister() v1beta1.ConfigurationLister { + return v1beta1.NewConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/interface.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/interface.go new file mode 100644 index 0000000000..6e9290d1e2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Configurations returns a ConfigurationInformer. + Configurations() ConfigurationInformer + // Revisions returns a RevisionInformer. + Revisions() RevisionInformer + // Routes returns a RouteInformer. + Routes() RouteInformer + // Services returns a ServiceInformer. + Services() ServiceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Configurations returns a ConfigurationInformer. +func (v *version) Configurations() ConfigurationInformer { + return &configurationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Revisions returns a RevisionInformer. +func (v *version) Revisions() RevisionInformer { + return &revisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Routes returns a RouteInformer. +func (v *version) Routes() RouteInformer { + return &routeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Services returns a ServiceInformer. +func (v *version) Services() ServiceInformer { + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/revision.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/revision.go new file mode 100644 index 0000000000..1d44246d0c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/revision.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "knative.dev/serving/pkg/client/listers/serving/v1beta1" +) + +// RevisionInformer provides access to a shared informer and lister for +// Revisions. +type RevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RevisionLister +} + +type revisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRevisionInformer constructs a new informer for Revision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRevisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRevisionInformer constructs a new informer for Revision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRevisionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Revisions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Revisions(namespace).Watch(options) + }, + }, + &servingv1beta1.Revision{}, + resyncPeriod, + indexers, + ) +} + +func (f *revisionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *revisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1beta1.Revision{}, f.defaultInformer) +} + +func (f *revisionInformer) Lister() v1beta1.RevisionLister { + return v1beta1.NewRevisionLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/route.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/route.go new file mode 100644 index 0000000000..3d22f848f5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/route.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "knative.dev/serving/pkg/client/listers/serving/v1beta1" +) + +// RouteInformer provides access to a shared informer and lister for +// Routes. +type RouteInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RouteLister +} + +type routeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRouteInformer constructs a new informer for Route type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Routes(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Routes(namespace).Watch(options) + }, + }, + &servingv1beta1.Route{}, + resyncPeriod, + indexers, + ) +} + +func (f *routeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *routeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1beta1.Route{}, f.defaultInformer) +} + +func (f *routeInformer) Lister() v1beta1.RouteLister { + return v1beta1.NewRouteLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/service.go b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/service.go new file mode 100644 index 0000000000..e06575d1ef --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1/service.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servingv1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" + versioned "knative.dev/serving/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "knative.dev/serving/pkg/client/listers/serving/v1beta1" +) + +// ServiceInformer provides access to a shared informer and lister for +// Services. +type ServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ServiceLister +} + +type serviceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Services(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServingV1beta1().Services(namespace).Watch(options) + }, + }, + &servingv1beta1.Service{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servingv1beta1.Service{}, f.defaultInformer) +} + +func (f *serviceInformer) Lister() v1beta1.ServiceLister { + return v1beta1.NewServiceLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/client/client.go b/test/vendor/knative.dev/serving/pkg/client/injection/client/client.go new file mode 100644 index 0000000000..1c49fa91a6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + rest "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + versioned "knative.dev/serving/pkg/client/clientset/versioned" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/clientset/versioned.Interface from context.") + } + return untyped.(versioned.Interface) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/client/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/client/fake/fake.go new file mode 100644 index 0000000000..582e58d23c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/client/fake/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + fake "knative.dev/serving/pkg/client/clientset/versioned/fake" + client "knative.dev/serving/pkg/client/injection/client" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/clientset/versioned/fake.Clientset from context.") + } + return untyped.(*fake.Clientset) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake/fake.go new file mode 100644 index 0000000000..dd70a7f114 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake/fake.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + injection "knative.dev/pkg/injection" + podscalable "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" +) + +var Get = podscalable.Get + +func init() { + injection.Fake.RegisterDuck(podscalable.WithDuck) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/podscalable.go b/test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/podscalable.go new file mode 100644 index 0000000000..7ef3e9632e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/podscalable.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package podscalable + +import ( + "context" + + duck "knative.dev/pkg/apis/duck" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + dynamicclient "knative.dev/pkg/injection/clients/dynamicclient" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +func init() { + injection.Default.RegisterDuck(WithDuck) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func WithDuck(ctx context.Context) context.Context { + dc := dynamicclient.Get(ctx) + dif := &duck.CachedInformerFactory{ + Delegate: &duck.TypedInformerFactory{ + Client: dc, + Type: (&v1alpha1.PodScalable{}).GetFullType(), + ResyncPeriod: controller.GetResyncPeriod(ctx), + StopChannel: ctx.Done(), + }, + } + return context.WithValue(ctx, Key{}, dif) +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) duck.InformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/pkg/apis/duck.InformerFactory from context.") + } + return untyped.(duck.InformerFactory) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake/fake.go new file mode 100644 index 0000000000..0d44396954 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + metric "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" +) + +var Get = metric.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Autoscaling().V1alpha1().Metrics() + return context.WithValue(ctx, metric.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/metric.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/metric.go new file mode 100644 index 0000000000..139e4f125b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/metric.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package metric + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Autoscaling().V1alpha1().Metrics() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.MetricInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1.MetricInformer from context.") + } + return untyped.(v1alpha1.MetricInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake/fake.go new file mode 100644 index 0000000000..02a92d217a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + podautoscaler "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" +) + +var Get = podautoscaler.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Autoscaling().V1alpha1().PodAutoscalers() + return context.WithValue(ctx, podautoscaler.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/podautoscaler.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/podautoscaler.go new file mode 100644 index 0000000000..12ed4e9c06 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/podautoscaler.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package podautoscaler + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Autoscaling().V1alpha1().PodAutoscalers() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.PodAutoscalerInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/autoscaling/v1alpha1.PodAutoscalerInformer from context.") + } + return untyped.(v1alpha1.PodAutoscalerInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/factory.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/factory.go new file mode 100644 index 0000000000..ba5f57b48e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/factory.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package factory + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + externalversions "knative.dev/serving/pkg/client/informers/externalversions" + client "knative.dev/serving/pkg/client/injection/client" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions.SharedInformerFactory from context.") + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/fake/fake.go new file mode 100644 index 0000000000..e984ede573 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/factory/fake/fake.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + externalversions "knative.dev/serving/pkg/client/informers/externalversions" + fake "knative.dev/serving/pkg/client/injection/client/fake" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/certificate.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/certificate.go new file mode 100644 index 0000000000..3f75ffd2a7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/certificate.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package certificate + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha1().Certificates() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.CertificateInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1.CertificateInformer from context.") + } + return untyped.(v1alpha1.CertificateInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake/fake.go new file mode 100644 index 0000000000..e564016484 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + certificate "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate" +) + +var Get = certificate.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha1().Certificates() + return context.WithValue(ctx, certificate.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake/fake.go new file mode 100644 index 0000000000..ad03ab6a3e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + ingress "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress" +) + +var Get = ingress.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha1().Ingresses() + return context.WithValue(ctx, ingress.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/ingress.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/ingress.go new file mode 100644 index 0000000000..c132bf3d0f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/ingress.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package ingress + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha1().Ingresses() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.IngressInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1.IngressInformer from context.") + } + return untyped.(v1alpha1.IngressInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake/fake.go new file mode 100644 index 0000000000..4e29c0c39d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + serverlessservice "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice" +) + +var Get = serverlessservice.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha1().ServerlessServices() + return context.WithValue(ctx, serverlessservice.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/serverlessservice.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/serverlessservice.go new file mode 100644 index 0000000000..883afe4bbd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/serverlessservice.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package serverlessservice + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha1().ServerlessServices() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.ServerlessServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/networking/v1alpha1.ServerlessServiceInformer from context.") + } + return untyped.(v1alpha1.ServerlessServiceInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/configuration.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/configuration.go new file mode 100644 index 0000000000..bdadc1a5a3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/configuration.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package configuration + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1().Configurations() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.ConfigurationInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1.ConfigurationInformer from context.") + } + return untyped.(v1.ConfigurationInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/fake/fake.go new file mode 100644 index 0000000000..509a96f77d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + configuration "knative.dev/serving/pkg/client/injection/informers/serving/v1/configuration" +) + +var Get = configuration.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1().Configurations() + return context.WithValue(ctx, configuration.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/fake/fake.go new file mode 100644 index 0000000000..9d36e59abf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + revision "knative.dev/serving/pkg/client/injection/informers/serving/v1/revision" +) + +var Get = revision.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1().Revisions() + return context.WithValue(ctx, revision.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/revision.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/revision.go new file mode 100644 index 0000000000..399fdcc041 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/revision/revision.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package revision + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1().Revisions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.RevisionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1.RevisionInformer from context.") + } + return untyped.(v1.RevisionInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/fake/fake.go new file mode 100644 index 0000000000..571222105b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + route "knative.dev/serving/pkg/client/injection/informers/serving/v1/route" +) + +var Get = route.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1().Routes() + return context.WithValue(ctx, route.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/route.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/route.go new file mode 100644 index 0000000000..44a9d3fbd8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/route/route.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package route + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1().Routes() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.RouteInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1.RouteInformer from context.") + } + return untyped.(v1.RouteInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/fake/fake.go new file mode 100644 index 0000000000..e41017317e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + service "knative.dev/serving/pkg/client/injection/informers/serving/v1/service" +) + +var Get = service.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1().Services() + return context.WithValue(ctx, service.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/service.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/service.go new file mode 100644 index 0000000000..1b72173367 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1/service/service.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package service + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1().Services() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.ServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1.ServiceInformer from context.") + } + return untyped.(v1.ServiceInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/configuration.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/configuration.go new file mode 100644 index 0000000000..bf0df97344 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/configuration.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package configuration + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1alpha1().Configurations() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.ConfigurationInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1.ConfigurationInformer from context.") + } + return untyped.(v1alpha1.ConfigurationInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake/fake.go new file mode 100644 index 0000000000..6879048d9e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + configuration "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration" +) + +var Get = configuration.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1alpha1().Configurations() + return context.WithValue(ctx, configuration.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake/fake.go new file mode 100644 index 0000000000..9790562174 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + revision "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" +) + +var Get = revision.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1alpha1().Revisions() + return context.WithValue(ctx, revision.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/revision.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/revision.go new file mode 100644 index 0000000000..87c9ca9c27 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/revision.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package revision + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1alpha1().Revisions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.RevisionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1.RevisionInformer from context.") + } + return untyped.(v1alpha1.RevisionInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake/fake.go new file mode 100644 index 0000000000..43898d4c3a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + route "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route" +) + +var Get = route.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1alpha1().Routes() + return context.WithValue(ctx, route.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/route.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/route.go new file mode 100644 index 0000000000..0031190027 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/route.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package route + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1alpha1().Routes() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.RouteInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1.RouteInformer from context.") + } + return untyped.(v1alpha1.RouteInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/fake/fake.go new file mode 100644 index 0000000000..3a1f9f9be3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + service "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service" +) + +var Get = service.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1alpha1().Services() + return context.WithValue(ctx, service.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/service.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/service.go new file mode 100644 index 0000000000..fee595a55f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/service.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package service + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1alpha1().Services() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.ServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1alpha1.ServiceInformer from context.") + } + return untyped.(v1alpha1.ServiceInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/configuration.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/configuration.go new file mode 100644 index 0000000000..24b7b64dd4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/configuration.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package configuration + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1beta1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1beta1().Configurations() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1beta1.ConfigurationInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1.ConfigurationInformer from context.") + } + return untyped.(v1beta1.ConfigurationInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/fake/fake.go new file mode 100644 index 0000000000..cd3f98dcbf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + configuration "knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/configuration" +) + +var Get = configuration.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1beta1().Configurations() + return context.WithValue(ctx, configuration.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/fake/fake.go new file mode 100644 index 0000000000..0719c07589 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + revision "knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision" +) + +var Get = revision.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1beta1().Revisions() + return context.WithValue(ctx, revision.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/revision.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/revision.go new file mode 100644 index 0000000000..d8fb163983 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/revision/revision.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package revision + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1beta1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1beta1().Revisions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1beta1.RevisionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1.RevisionInformer from context.") + } + return untyped.(v1beta1.RevisionInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/fake/fake.go new file mode 100644 index 0000000000..3ff99e00fb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + route "knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route" +) + +var Get = route.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1beta1().Routes() + return context.WithValue(ctx, route.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/route.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/route.go new file mode 100644 index 0000000000..892307eae3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/route/route.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package route + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1beta1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1beta1().Routes() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1beta1.RouteInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1.RouteInformer from context.") + } + return untyped.(v1beta1.RouteInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/fake/fake.go new file mode 100644 index 0000000000..57e710f2e5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/injection/informers/factory/fake" + service "knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service" +) + +var Get = service.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Serving().V1beta1().Services() + return context.WithValue(ctx, service.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/service.go b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/service.go new file mode 100644 index 0000000000..e71373944e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/injection/informers/serving/v1beta1/service/service.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package service + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1beta1 "knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1" + factory "knative.dev/serving/pkg/client/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Serving().V1beta1().Services() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1beta1.ServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/informers/externalversions/serving/v1beta1.ServiceInformer from context.") + } + return untyped.(v1beta1.ServiceInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/clientset.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/clientset.go new file mode 100644 index 0000000000..aeb8b2a29f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + networkingv1alpha3 "knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client +} + +// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client +func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface { + return c.networkingV1alpha3 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.networkingV1alpha3 = networkingv1alpha3.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/doc.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/doc.go new file mode 100644 index 0000000000..e48c2aa446 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/clientset_generated.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..52b7f33d1d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "knative.dev/serving/pkg/client/istio/clientset/versioned" + networkingv1alpha3 "knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3" + fakenetworkingv1alpha3 "knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client +func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface { + return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..2c4903250c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/register.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..85b5cbcb2f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + networkingv1alpha3.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/doc.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..7acc2dcf25 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/register.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..6f2a312b25 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + networkingv1alpha3.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/destinationrule.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/destinationrule.go new file mode 100644 index 0000000000..5b0802801e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/destinationrule.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "time" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +// DestinationRulesGetter has a method to return a DestinationRuleInterface. +// A group's client should implement this interface. +type DestinationRulesGetter interface { + DestinationRules(namespace string) DestinationRuleInterface +} + +// DestinationRuleInterface has methods to work with DestinationRule resources. +type DestinationRuleInterface interface { + Create(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error) + Update(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.DestinationRule, error) + List(opts v1.ListOptions) (*v1alpha3.DestinationRuleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) + DestinationRuleExpansion +} + +// destinationRules implements DestinationRuleInterface +type destinationRules struct { + client rest.Interface + ns string +} + +// newDestinationRules returns a DestinationRules +func newDestinationRules(c *NetworkingV1alpha3Client, namespace string) *destinationRules { + return &destinationRules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any. +func (c *destinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DestinationRules that match those selectors. +func (c *destinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha3.DestinationRuleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested destinationRules. +func (c *destinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *destinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("destinationrules"). + Body(destinationRule). + Do(). + Into(result) + return +} + +// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *destinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("destinationrules"). + Name(destinationRule.Name). + Body(destinationRule). + Do(). + Into(result) + return +} + +// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs. +func (c *destinationRules) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("destinationrules"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *destinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched destinationRule. +func (c *destinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("destinationrules"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/doc.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/doc.go new file mode 100644 index 0000000000..65f0a55be9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha3 diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/envoyfilter.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/envoyfilter.go new file mode 100644 index 0000000000..6ce1ba10dd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/envoyfilter.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "time" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +// EnvoyFiltersGetter has a method to return a EnvoyFilterInterface. +// A group's client should implement this interface. +type EnvoyFiltersGetter interface { + EnvoyFilters(namespace string) EnvoyFilterInterface +} + +// EnvoyFilterInterface has methods to work with EnvoyFilter resources. +type EnvoyFilterInterface interface { + Create(*v1alpha3.EnvoyFilter) (*v1alpha3.EnvoyFilter, error) + Update(*v1alpha3.EnvoyFilter) (*v1alpha3.EnvoyFilter, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.EnvoyFilter, error) + List(opts v1.ListOptions) (*v1alpha3.EnvoyFilterList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.EnvoyFilter, err error) + EnvoyFilterExpansion +} + +// envoyFilters implements EnvoyFilterInterface +type envoyFilters struct { + client rest.Interface + ns string +} + +// newEnvoyFilters returns a EnvoyFilters +func newEnvoyFilters(c *NetworkingV1alpha3Client, namespace string) *envoyFilters { + return &envoyFilters{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the envoyFilter, and returns the corresponding envoyFilter object, and an error if there is any. +func (c *envoyFilters) Get(name string, options v1.GetOptions) (result *v1alpha3.EnvoyFilter, err error) { + result = &v1alpha3.EnvoyFilter{} + err = c.client.Get(). + Namespace(c.ns). + Resource("envoyfilters"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of EnvoyFilters that match those selectors. +func (c *envoyFilters) List(opts v1.ListOptions) (result *v1alpha3.EnvoyFilterList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha3.EnvoyFilterList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("envoyfilters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested envoyFilters. +func (c *envoyFilters) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("envoyfilters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a envoyFilter and creates it. Returns the server's representation of the envoyFilter, and an error, if there is any. +func (c *envoyFilters) Create(envoyFilter *v1alpha3.EnvoyFilter) (result *v1alpha3.EnvoyFilter, err error) { + result = &v1alpha3.EnvoyFilter{} + err = c.client.Post(). + Namespace(c.ns). + Resource("envoyfilters"). + Body(envoyFilter). + Do(). + Into(result) + return +} + +// Update takes the representation of a envoyFilter and updates it. Returns the server's representation of the envoyFilter, and an error, if there is any. +func (c *envoyFilters) Update(envoyFilter *v1alpha3.EnvoyFilter) (result *v1alpha3.EnvoyFilter, err error) { + result = &v1alpha3.EnvoyFilter{} + err = c.client.Put(). + Namespace(c.ns). + Resource("envoyfilters"). + Name(envoyFilter.Name). + Body(envoyFilter). + Do(). + Into(result) + return +} + +// Delete takes name of the envoyFilter and deletes it. Returns an error if one occurs. +func (c *envoyFilters) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("envoyfilters"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *envoyFilters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("envoyfilters"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched envoyFilter. +func (c *envoyFilters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.EnvoyFilter, err error) { + result = &v1alpha3.EnvoyFilter{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("envoyfilters"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/doc.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/doc.go new file mode 100644 index 0000000000..c7f6e65cab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_destinationrule.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_destinationrule.go new file mode 100644 index 0000000000..36937c27ef --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_destinationrule.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDestinationRules implements DestinationRuleInterface +type FakeDestinationRules struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var destinationrulesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "destinationrules"} + +var destinationrulesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "DestinationRule"} + +// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any. +func (c *FakeDestinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(destinationrulesResource, c.ns, name), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} + +// List takes label and field selectors, and returns the list of DestinationRules that match those selectors. +func (c *FakeDestinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(destinationrulesResource, destinationrulesKind, c.ns, opts), &v1alpha3.DestinationRuleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.DestinationRuleList{ListMeta: obj.(*v1alpha3.DestinationRuleList).ListMeta} + for _, item := range obj.(*v1alpha3.DestinationRuleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested destinationRules. +func (c *FakeDestinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(destinationrulesResource, c.ns, opts)) + +} + +// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *FakeDestinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(destinationrulesResource, c.ns, destinationRule), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} + +// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *FakeDestinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(destinationrulesResource, c.ns, destinationRule), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} + +// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs. +func (c *FakeDestinationRules) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(destinationrulesResource, c.ns, name), &v1alpha3.DestinationRule{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDestinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(destinationrulesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.DestinationRuleList{}) + return err +} + +// Patch applies the patch and returns the patched destinationRule. +func (c *FakeDestinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(destinationrulesResource, c.ns, name, pt, data, subresources...), &v1alpha3.DestinationRule{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.DestinationRule), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_envoyfilter.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_envoyfilter.go new file mode 100644 index 0000000000..381a8efe94 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_envoyfilter.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEnvoyFilters implements EnvoyFilterInterface +type FakeEnvoyFilters struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var envoyfiltersResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "envoyfilters"} + +var envoyfiltersKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "EnvoyFilter"} + +// Get takes name of the envoyFilter, and returns the corresponding envoyFilter object, and an error if there is any. +func (c *FakeEnvoyFilters) Get(name string, options v1.GetOptions) (result *v1alpha3.EnvoyFilter, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(envoyfiltersResource, c.ns, name), &v1alpha3.EnvoyFilter{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.EnvoyFilter), err +} + +// List takes label and field selectors, and returns the list of EnvoyFilters that match those selectors. +func (c *FakeEnvoyFilters) List(opts v1.ListOptions) (result *v1alpha3.EnvoyFilterList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(envoyfiltersResource, envoyfiltersKind, c.ns, opts), &v1alpha3.EnvoyFilterList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.EnvoyFilterList{ListMeta: obj.(*v1alpha3.EnvoyFilterList).ListMeta} + for _, item := range obj.(*v1alpha3.EnvoyFilterList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested envoyFilters. +func (c *FakeEnvoyFilters) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(envoyfiltersResource, c.ns, opts)) + +} + +// Create takes the representation of a envoyFilter and creates it. Returns the server's representation of the envoyFilter, and an error, if there is any. +func (c *FakeEnvoyFilters) Create(envoyFilter *v1alpha3.EnvoyFilter) (result *v1alpha3.EnvoyFilter, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(envoyfiltersResource, c.ns, envoyFilter), &v1alpha3.EnvoyFilter{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.EnvoyFilter), err +} + +// Update takes the representation of a envoyFilter and updates it. Returns the server's representation of the envoyFilter, and an error, if there is any. +func (c *FakeEnvoyFilters) Update(envoyFilter *v1alpha3.EnvoyFilter) (result *v1alpha3.EnvoyFilter, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(envoyfiltersResource, c.ns, envoyFilter), &v1alpha3.EnvoyFilter{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.EnvoyFilter), err +} + +// Delete takes name of the envoyFilter and deletes it. Returns an error if one occurs. +func (c *FakeEnvoyFilters) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(envoyfiltersResource, c.ns, name), &v1alpha3.EnvoyFilter{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEnvoyFilters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(envoyfiltersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.EnvoyFilterList{}) + return err +} + +// Patch applies the patch and returns the patched envoyFilter. +func (c *FakeEnvoyFilters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.EnvoyFilter, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(envoyfiltersResource, c.ns, name, pt, data, subresources...), &v1alpha3.EnvoyFilter{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.EnvoyFilter), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_gateway.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_gateway.go new file mode 100644 index 0000000000..a04c805c3d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_gateway.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeGateways implements GatewayInterface +type FakeGateways struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var gatewaysResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "gateways"} + +var gatewaysKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "Gateway"} + +// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any. +func (c *FakeGateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(gatewaysResource, c.ns, name), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} + +// List takes label and field selectors, and returns the list of Gateways that match those selectors. +func (c *FakeGateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(gatewaysResource, gatewaysKind, c.ns, opts), &v1alpha3.GatewayList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.GatewayList{ListMeta: obj.(*v1alpha3.GatewayList).ListMeta} + for _, item := range obj.(*v1alpha3.GatewayList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested gateways. +func (c *FakeGateways) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(gatewaysResource, c.ns, opts)) + +} + +// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *FakeGateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(gatewaysResource, c.ns, gateway), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} + +// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *FakeGateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(gatewaysResource, c.ns, gateway), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} + +// Delete takes name of the gateway and deletes it. Returns an error if one occurs. +func (c *FakeGateways) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(gatewaysResource, c.ns, name), &v1alpha3.Gateway{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeGateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(gatewaysResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.GatewayList{}) + return err +} + +// Patch applies the patch and returns the patched gateway. +func (c *FakeGateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(gatewaysResource, c.ns, name, pt, data, subresources...), &v1alpha3.Gateway{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Gateway), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_networking_client.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_networking_client.go new file mode 100644 index 0000000000..dbb1af13cf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_networking_client.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha3 "knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3" +) + +type FakeNetworkingV1alpha3 struct { + *testing.Fake +} + +func (c *FakeNetworkingV1alpha3) DestinationRules(namespace string) v1alpha3.DestinationRuleInterface { + return &FakeDestinationRules{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) EnvoyFilters(namespace string) v1alpha3.EnvoyFilterInterface { + return &FakeEnvoyFilters{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) Gateways(namespace string) v1alpha3.GatewayInterface { + return &FakeGateways{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) ServiceEntries(namespace string) v1alpha3.ServiceEntryInterface { + return &FakeServiceEntries{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) Sidecars(namespace string) v1alpha3.SidecarInterface { + return &FakeSidecars{c, namespace} +} + +func (c *FakeNetworkingV1alpha3) VirtualServices(namespace string) v1alpha3.VirtualServiceInterface { + return &FakeVirtualServices{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNetworkingV1alpha3) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_serviceentry.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_serviceentry.go new file mode 100644 index 0000000000..470bceaffd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_serviceentry.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServiceEntries implements ServiceEntryInterface +type FakeServiceEntries struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var serviceentriesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "serviceentries"} + +var serviceentriesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "ServiceEntry"} + +// Get takes name of the serviceEntry, and returns the corresponding serviceEntry object, and an error if there is any. +func (c *FakeServiceEntries) Get(name string, options v1.GetOptions) (result *v1alpha3.ServiceEntry, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(serviceentriesResource, c.ns, name), &v1alpha3.ServiceEntry{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.ServiceEntry), err +} + +// List takes label and field selectors, and returns the list of ServiceEntries that match those selectors. +func (c *FakeServiceEntries) List(opts v1.ListOptions) (result *v1alpha3.ServiceEntryList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(serviceentriesResource, serviceentriesKind, c.ns, opts), &v1alpha3.ServiceEntryList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.ServiceEntryList{ListMeta: obj.(*v1alpha3.ServiceEntryList).ListMeta} + for _, item := range obj.(*v1alpha3.ServiceEntryList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceEntries. +func (c *FakeServiceEntries) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(serviceentriesResource, c.ns, opts)) + +} + +// Create takes the representation of a serviceEntry and creates it. Returns the server's representation of the serviceEntry, and an error, if there is any. +func (c *FakeServiceEntries) Create(serviceEntry *v1alpha3.ServiceEntry) (result *v1alpha3.ServiceEntry, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(serviceentriesResource, c.ns, serviceEntry), &v1alpha3.ServiceEntry{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.ServiceEntry), err +} + +// Update takes the representation of a serviceEntry and updates it. Returns the server's representation of the serviceEntry, and an error, if there is any. +func (c *FakeServiceEntries) Update(serviceEntry *v1alpha3.ServiceEntry) (result *v1alpha3.ServiceEntry, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(serviceentriesResource, c.ns, serviceEntry), &v1alpha3.ServiceEntry{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.ServiceEntry), err +} + +// Delete takes name of the serviceEntry and deletes it. Returns an error if one occurs. +func (c *FakeServiceEntries) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(serviceentriesResource, c.ns, name), &v1alpha3.ServiceEntry{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceEntries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(serviceentriesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.ServiceEntryList{}) + return err +} + +// Patch applies the patch and returns the patched serviceEntry. +func (c *FakeServiceEntries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.ServiceEntry, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(serviceentriesResource, c.ns, name, pt, data, subresources...), &v1alpha3.ServiceEntry{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.ServiceEntry), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_sidecar.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_sidecar.go new file mode 100644 index 0000000000..230dfbdcf2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_sidecar.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeSidecars implements SidecarInterface +type FakeSidecars struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var sidecarsResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "sidecars"} + +var sidecarsKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "Sidecar"} + +// Get takes name of the sidecar, and returns the corresponding sidecar object, and an error if there is any. +func (c *FakeSidecars) Get(name string, options v1.GetOptions) (result *v1alpha3.Sidecar, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(sidecarsResource, c.ns, name), &v1alpha3.Sidecar{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Sidecar), err +} + +// List takes label and field selectors, and returns the list of Sidecars that match those selectors. +func (c *FakeSidecars) List(opts v1.ListOptions) (result *v1alpha3.SidecarList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(sidecarsResource, sidecarsKind, c.ns, opts), &v1alpha3.SidecarList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.SidecarList{ListMeta: obj.(*v1alpha3.SidecarList).ListMeta} + for _, item := range obj.(*v1alpha3.SidecarList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested sidecars. +func (c *FakeSidecars) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(sidecarsResource, c.ns, opts)) + +} + +// Create takes the representation of a sidecar and creates it. Returns the server's representation of the sidecar, and an error, if there is any. +func (c *FakeSidecars) Create(sidecar *v1alpha3.Sidecar) (result *v1alpha3.Sidecar, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(sidecarsResource, c.ns, sidecar), &v1alpha3.Sidecar{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Sidecar), err +} + +// Update takes the representation of a sidecar and updates it. Returns the server's representation of the sidecar, and an error, if there is any. +func (c *FakeSidecars) Update(sidecar *v1alpha3.Sidecar) (result *v1alpha3.Sidecar, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(sidecarsResource, c.ns, sidecar), &v1alpha3.Sidecar{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Sidecar), err +} + +// Delete takes name of the sidecar and deletes it. Returns an error if one occurs. +func (c *FakeSidecars) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(sidecarsResource, c.ns, name), &v1alpha3.Sidecar{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeSidecars) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(sidecarsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.SidecarList{}) + return err +} + +// Patch applies the patch and returns the patched sidecar. +func (c *FakeSidecars) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Sidecar, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(sidecarsResource, c.ns, name, pt, data, subresources...), &v1alpha3.Sidecar{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.Sidecar), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_virtualservice.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_virtualservice.go new file mode 100644 index 0000000000..16dc607e02 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/fake/fake_virtualservice.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVirtualServices implements VirtualServiceInterface +type FakeVirtualServices struct { + Fake *FakeNetworkingV1alpha3 + ns string +} + +var virtualservicesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "virtualservices"} + +var virtualservicesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "VirtualService"} + +// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any. +func (c *FakeVirtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(virtualservicesResource, c.ns, name), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} + +// List takes label and field selectors, and returns the list of VirtualServices that match those selectors. +func (c *FakeVirtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(virtualservicesResource, virtualservicesKind, c.ns, opts), &v1alpha3.VirtualServiceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha3.VirtualServiceList{ListMeta: obj.(*v1alpha3.VirtualServiceList).ListMeta} + for _, item := range obj.(*v1alpha3.VirtualServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested virtualServices. +func (c *FakeVirtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(virtualservicesResource, c.ns, opts)) + +} + +// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *FakeVirtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(virtualservicesResource, c.ns, virtualService), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} + +// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *FakeVirtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(virtualservicesResource, c.ns, virtualService), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} + +// Delete takes name of the virtualService and deletes it. Returns an error if one occurs. +func (c *FakeVirtualServices) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(virtualservicesResource, c.ns, name), &v1alpha3.VirtualService{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(virtualservicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha3.VirtualServiceList{}) + return err +} + +// Patch applies the patch and returns the patched virtualService. +func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, pt, data, subresources...), &v1alpha3.VirtualService{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha3.VirtualService), err +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/gateway.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/gateway.go new file mode 100644 index 0000000000..da95ee6e79 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/gateway.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "time" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +// GatewaysGetter has a method to return a GatewayInterface. +// A group's client should implement this interface. +type GatewaysGetter interface { + Gateways(namespace string) GatewayInterface +} + +// GatewayInterface has methods to work with Gateway resources. +type GatewayInterface interface { + Create(*v1alpha3.Gateway) (*v1alpha3.Gateway, error) + Update(*v1alpha3.Gateway) (*v1alpha3.Gateway, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.Gateway, error) + List(opts v1.ListOptions) (*v1alpha3.GatewayList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) + GatewayExpansion +} + +// gateways implements GatewayInterface +type gateways struct { + client rest.Interface + ns string +} + +// newGateways returns a Gateways +func newGateways(c *NetworkingV1alpha3Client, namespace string) *gateways { + return &gateways{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any. +func (c *gateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Gateways that match those selectors. +func (c *gateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha3.GatewayList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested gateways. +func (c *gateways) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *gateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Post(). + Namespace(c.ns). + Resource("gateways"). + Body(gateway). + Do(). + Into(result) + return +} + +// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *gateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Put(). + Namespace(c.ns). + Resource("gateways"). + Name(gateway.Name). + Body(gateway). + Do(). + Into(result) + return +} + +// Delete takes name of the gateway and deletes it. Returns an error if one occurs. +func (c *gateways) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("gateways"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *gateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched gateway. +func (c *gateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("gateways"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/generated_expansion.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/generated_expansion.go new file mode 100644 index 0000000000..29c55e0a6b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +type DestinationRuleExpansion interface{} + +type EnvoyFilterExpansion interface{} + +type GatewayExpansion interface{} + +type ServiceEntryExpansion interface{} + +type SidecarExpansion interface{} + +type VirtualServiceExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/networking_client.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/networking_client.go new file mode 100644 index 0000000000..57fab99ef4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/networking_client.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + rest "k8s.io/client-go/rest" + "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +type NetworkingV1alpha3Interface interface { + RESTClient() rest.Interface + DestinationRulesGetter + EnvoyFiltersGetter + GatewaysGetter + ServiceEntriesGetter + SidecarsGetter + VirtualServicesGetter +} + +// NetworkingV1alpha3Client is used to interact with features provided by the networking.istio.io group. +type NetworkingV1alpha3Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1alpha3Client) DestinationRules(namespace string) DestinationRuleInterface { + return newDestinationRules(c, namespace) +} + +func (c *NetworkingV1alpha3Client) EnvoyFilters(namespace string) EnvoyFilterInterface { + return newEnvoyFilters(c, namespace) +} + +func (c *NetworkingV1alpha3Client) Gateways(namespace string) GatewayInterface { + return newGateways(c, namespace) +} + +func (c *NetworkingV1alpha3Client) ServiceEntries(namespace string) ServiceEntryInterface { + return newServiceEntries(c, namespace) +} + +func (c *NetworkingV1alpha3Client) Sidecars(namespace string) SidecarInterface { + return newSidecars(c, namespace) +} + +func (c *NetworkingV1alpha3Client) VirtualServices(namespace string) VirtualServiceInterface { + return newVirtualServices(c, namespace) +} + +// NewForConfig creates a new NetworkingV1alpha3Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1alpha3Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1alpha3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1alpha3Client { + return &NetworkingV1alpha3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1alpha3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/serviceentry.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/serviceentry.go new file mode 100644 index 0000000000..718ca4ee2f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/serviceentry.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "time" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +// ServiceEntriesGetter has a method to return a ServiceEntryInterface. +// A group's client should implement this interface. +type ServiceEntriesGetter interface { + ServiceEntries(namespace string) ServiceEntryInterface +} + +// ServiceEntryInterface has methods to work with ServiceEntry resources. +type ServiceEntryInterface interface { + Create(*v1alpha3.ServiceEntry) (*v1alpha3.ServiceEntry, error) + Update(*v1alpha3.ServiceEntry) (*v1alpha3.ServiceEntry, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.ServiceEntry, error) + List(opts v1.ListOptions) (*v1alpha3.ServiceEntryList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.ServiceEntry, err error) + ServiceEntryExpansion +} + +// serviceEntries implements ServiceEntryInterface +type serviceEntries struct { + client rest.Interface + ns string +} + +// newServiceEntries returns a ServiceEntries +func newServiceEntries(c *NetworkingV1alpha3Client, namespace string) *serviceEntries { + return &serviceEntries{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the serviceEntry, and returns the corresponding serviceEntry object, and an error if there is any. +func (c *serviceEntries) Get(name string, options v1.GetOptions) (result *v1alpha3.ServiceEntry, err error) { + result = &v1alpha3.ServiceEntry{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceentries"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceEntries that match those selectors. +func (c *serviceEntries) List(opts v1.ListOptions) (result *v1alpha3.ServiceEntryList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha3.ServiceEntryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceEntries. +func (c *serviceEntries) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serviceentries"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a serviceEntry and creates it. Returns the server's representation of the serviceEntry, and an error, if there is any. +func (c *serviceEntries) Create(serviceEntry *v1alpha3.ServiceEntry) (result *v1alpha3.ServiceEntry, err error) { + result = &v1alpha3.ServiceEntry{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceentries"). + Body(serviceEntry). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceEntry and updates it. Returns the server's representation of the serviceEntry, and an error, if there is any. +func (c *serviceEntries) Update(serviceEntry *v1alpha3.ServiceEntry) (result *v1alpha3.ServiceEntry, err error) { + result = &v1alpha3.ServiceEntry{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceentries"). + Name(serviceEntry.Name). + Body(serviceEntry). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceEntry and deletes it. Returns an error if one occurs. +func (c *serviceEntries) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceentries"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceEntries) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceentries"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched serviceEntry. +func (c *serviceEntries) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.ServiceEntry, err error) { + result = &v1alpha3.ServiceEntry{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serviceentries"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/sidecar.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/sidecar.go new file mode 100644 index 0000000000..411e98ef8a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/sidecar.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "time" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +// SidecarsGetter has a method to return a SidecarInterface. +// A group's client should implement this interface. +type SidecarsGetter interface { + Sidecars(namespace string) SidecarInterface +} + +// SidecarInterface has methods to work with Sidecar resources. +type SidecarInterface interface { + Create(*v1alpha3.Sidecar) (*v1alpha3.Sidecar, error) + Update(*v1alpha3.Sidecar) (*v1alpha3.Sidecar, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.Sidecar, error) + List(opts v1.ListOptions) (*v1alpha3.SidecarList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Sidecar, err error) + SidecarExpansion +} + +// sidecars implements SidecarInterface +type sidecars struct { + client rest.Interface + ns string +} + +// newSidecars returns a Sidecars +func newSidecars(c *NetworkingV1alpha3Client, namespace string) *sidecars { + return &sidecars{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the sidecar, and returns the corresponding sidecar object, and an error if there is any. +func (c *sidecars) Get(name string, options v1.GetOptions) (result *v1alpha3.Sidecar, err error) { + result = &v1alpha3.Sidecar{} + err = c.client.Get(). + Namespace(c.ns). + Resource("sidecars"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Sidecars that match those selectors. +func (c *sidecars) List(opts v1.ListOptions) (result *v1alpha3.SidecarList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha3.SidecarList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("sidecars"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested sidecars. +func (c *sidecars) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("sidecars"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a sidecar and creates it. Returns the server's representation of the sidecar, and an error, if there is any. +func (c *sidecars) Create(sidecar *v1alpha3.Sidecar) (result *v1alpha3.Sidecar, err error) { + result = &v1alpha3.Sidecar{} + err = c.client.Post(). + Namespace(c.ns). + Resource("sidecars"). + Body(sidecar). + Do(). + Into(result) + return +} + +// Update takes the representation of a sidecar and updates it. Returns the server's representation of the sidecar, and an error, if there is any. +func (c *sidecars) Update(sidecar *v1alpha3.Sidecar) (result *v1alpha3.Sidecar, err error) { + result = &v1alpha3.Sidecar{} + err = c.client.Put(). + Namespace(c.ns). + Resource("sidecars"). + Name(sidecar.Name). + Body(sidecar). + Do(). + Into(result) + return +} + +// Delete takes name of the sidecar and deletes it. Returns an error if one occurs. +func (c *sidecars) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("sidecars"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *sidecars) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("sidecars"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched sidecar. +func (c *sidecars) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Sidecar, err error) { + result = &v1alpha3.Sidecar{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("sidecars"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/virtualservice.go b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/virtualservice.go new file mode 100644 index 0000000000..5661c55518 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/clientset/versioned/typed/networking/v1alpha3/virtualservice.go @@ -0,0 +1,174 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "time" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + scheme "knative.dev/serving/pkg/client/istio/clientset/versioned/scheme" +) + +// VirtualServicesGetter has a method to return a VirtualServiceInterface. +// A group's client should implement this interface. +type VirtualServicesGetter interface { + VirtualServices(namespace string) VirtualServiceInterface +} + +// VirtualServiceInterface has methods to work with VirtualService resources. +type VirtualServiceInterface interface { + Create(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error) + Update(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.VirtualService, error) + List(opts v1.ListOptions) (*v1alpha3.VirtualServiceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) + VirtualServiceExpansion +} + +// virtualServices implements VirtualServiceInterface +type virtualServices struct { + client rest.Interface + ns string +} + +// newVirtualServices returns a VirtualServices +func newVirtualServices(c *NetworkingV1alpha3Client, namespace string) *virtualServices { + return &virtualServices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any. +func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VirtualServices that match those selectors. +func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha3.VirtualServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested virtualServices. +func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *virtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Post(). + Namespace(c.ns). + Resource("virtualservices"). + Body(virtualService). + Do(). + Into(result) + return +} + +// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *virtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Put(). + Namespace(c.ns). + Resource("virtualservices"). + Name(virtualService.Name). + Body(virtualService). + Do(). + Into(result) + return +} + +// Delete takes name of the virtualService and deletes it. Returns an error if one occurs. +func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("virtualservices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched virtualService. +func (c *virtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("virtualservices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/factory.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/factory.go new file mode 100644 index 0000000000..c579b6e966 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + networking "knative.dev/serving/pkg/client/istio/informers/externalversions/networking" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Networking() networking.Interface +} + +func (f *sharedInformerFactory) Networking() networking.Interface { + return networking.New(f, f.namespace, f.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/generic.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/generic.go new file mode 100644 index 0000000000..748d16f75e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/generic.go @@ -0,0 +1,72 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=networking.istio.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithResource("destinationrules"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().DestinationRules().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("envoyfilters"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().EnvoyFilters().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("gateways"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().Gateways().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("serviceentries"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().ServiceEntries().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("sidecars"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().Sidecars().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("virtualservices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().VirtualServices().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces/factory_interfaces.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..6f1dd544c3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/interface.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/interface.go new file mode 100644 index 0000000000..8033bb32b5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package networking + +import ( + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha3 provides access to shared informers for resources in V1alpha3. + V1alpha3() v1alpha3.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha3 returns a new v1alpha3.Interface. +func (g *group) V1alpha3() v1alpha3.Interface { + return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/destinationrule.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/destinationrule.go new file mode 100644 index 0000000000..57434d23a5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/destinationrule.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +// DestinationRuleInformer provides access to a shared informer and lister for +// DestinationRules. +type DestinationRuleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.DestinationRuleLister +} + +type destinationRuleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDestinationRuleInformer constructs a new informer for DestinationRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDestinationRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDestinationRuleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDestinationRuleInformer constructs a new informer for DestinationRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDestinationRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().DestinationRules(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().DestinationRules(namespace).Watch(options) + }, + }, + &networkingv1alpha3.DestinationRule{}, + resyncPeriod, + indexers, + ) +} + +func (f *destinationRuleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDestinationRuleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *destinationRuleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha3.DestinationRule{}, f.defaultInformer) +} + +func (f *destinationRuleInformer) Lister() v1alpha3.DestinationRuleLister { + return v1alpha3.NewDestinationRuleLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/envoyfilter.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/envoyfilter.go new file mode 100644 index 0000000000..5c6cdb5609 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/envoyfilter.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +// EnvoyFilterInformer provides access to a shared informer and lister for +// EnvoyFilters. +type EnvoyFilterInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.EnvoyFilterLister +} + +type envoyFilterInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEnvoyFilterInformer constructs a new informer for EnvoyFilter type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEnvoyFilterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEnvoyFilterInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEnvoyFilterInformer constructs a new informer for EnvoyFilter type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEnvoyFilterInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().EnvoyFilters(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().EnvoyFilters(namespace).Watch(options) + }, + }, + &networkingv1alpha3.EnvoyFilter{}, + resyncPeriod, + indexers, + ) +} + +func (f *envoyFilterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEnvoyFilterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *envoyFilterInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha3.EnvoyFilter{}, f.defaultInformer) +} + +func (f *envoyFilterInformer) Lister() v1alpha3.EnvoyFilterLister { + return v1alpha3.NewEnvoyFilterLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/gateway.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/gateway.go new file mode 100644 index 0000000000..c403f2e45e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/gateway.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +// GatewayInformer provides access to a shared informer and lister for +// Gateways. +type GatewayInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.GatewayLister +} + +type gatewayInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewGatewayInformer constructs a new informer for Gateway type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewGatewayInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredGatewayInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredGatewayInformer constructs a new informer for Gateway type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredGatewayInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().Gateways(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().Gateways(namespace).Watch(options) + }, + }, + &networkingv1alpha3.Gateway{}, + resyncPeriod, + indexers, + ) +} + +func (f *gatewayInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredGatewayInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *gatewayInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha3.Gateway{}, f.defaultInformer) +} + +func (f *gatewayInformer) Lister() v1alpha3.GatewayLister { + return v1alpha3.NewGatewayLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/interface.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/interface.go new file mode 100644 index 0000000000..e8e5c7a37a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/interface.go @@ -0,0 +1,80 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DestinationRules returns a DestinationRuleInformer. + DestinationRules() DestinationRuleInformer + // EnvoyFilters returns a EnvoyFilterInformer. + EnvoyFilters() EnvoyFilterInformer + // Gateways returns a GatewayInformer. + Gateways() GatewayInformer + // ServiceEntries returns a ServiceEntryInformer. + ServiceEntries() ServiceEntryInformer + // Sidecars returns a SidecarInformer. + Sidecars() SidecarInformer + // VirtualServices returns a VirtualServiceInformer. + VirtualServices() VirtualServiceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DestinationRules returns a DestinationRuleInformer. +func (v *version) DestinationRules() DestinationRuleInformer { + return &destinationRuleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// EnvoyFilters returns a EnvoyFilterInformer. +func (v *version) EnvoyFilters() EnvoyFilterInformer { + return &envoyFilterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Gateways returns a GatewayInformer. +func (v *version) Gateways() GatewayInformer { + return &gatewayInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServiceEntries returns a ServiceEntryInformer. +func (v *version) ServiceEntries() ServiceEntryInformer { + return &serviceEntryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Sidecars returns a SidecarInformer. +func (v *version) Sidecars() SidecarInformer { + return &sidecarInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VirtualServices returns a VirtualServiceInformer. +func (v *version) VirtualServices() VirtualServiceInformer { + return &virtualServiceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/serviceentry.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/serviceentry.go new file mode 100644 index 0000000000..56ebf525a6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/serviceentry.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +// ServiceEntryInformer provides access to a shared informer and lister for +// ServiceEntries. +type ServiceEntryInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.ServiceEntryLister +} + +type serviceEntryInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceEntryInformer constructs a new informer for ServiceEntry type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceEntryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceEntryInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceEntryInformer constructs a new informer for ServiceEntry type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceEntryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().ServiceEntries(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().ServiceEntries(namespace).Watch(options) + }, + }, + &networkingv1alpha3.ServiceEntry{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceEntryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceEntryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceEntryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha3.ServiceEntry{}, f.defaultInformer) +} + +func (f *serviceEntryInformer) Lister() v1alpha3.ServiceEntryLister { + return v1alpha3.NewServiceEntryLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/sidecar.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/sidecar.go new file mode 100644 index 0000000000..159702377c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/sidecar.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +// SidecarInformer provides access to a shared informer and lister for +// Sidecars. +type SidecarInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.SidecarLister +} + +type sidecarInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewSidecarInformer constructs a new informer for Sidecar type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSidecarInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSidecarInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSidecarInformer constructs a new informer for Sidecar type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSidecarInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().Sidecars(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().Sidecars(namespace).Watch(options) + }, + }, + &networkingv1alpha3.Sidecar{}, + resyncPeriod, + indexers, + ) +} + +func (f *sidecarInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSidecarInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *sidecarInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha3.Sidecar{}, f.defaultInformer) +} + +func (f *sidecarInformer) Lister() v1alpha3.SidecarLister { + return v1alpha3.NewSidecarLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/virtualservice.go b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/virtualservice.go new file mode 100644 index 0000000000..c4959ddbd4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3/virtualservice.go @@ -0,0 +1,89 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" + internalinterfaces "knative.dev/serving/pkg/client/istio/informers/externalversions/internalinterfaces" + v1alpha3 "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +// VirtualServiceInformer provides access to a shared informer and lister for +// VirtualServices. +type VirtualServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.VirtualServiceLister +} + +type virtualServiceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVirtualServiceInformer constructs a new informer for VirtualService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVirtualServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVirtualServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVirtualServiceInformer constructs a new informer for VirtualService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVirtualServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().VirtualServices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().VirtualServices(namespace).Watch(options) + }, + }, + &networkingv1alpha3.VirtualService{}, + resyncPeriod, + indexers, + ) +} + +func (f *virtualServiceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVirtualServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *virtualServiceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha3.VirtualService{}, f.defaultInformer) +} + +func (f *virtualServiceInformer) Lister() v1alpha3.VirtualServiceLister { + return v1alpha3.NewVirtualServiceLister(f.Informer().GetIndexer()) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/client/client.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/client/client.go new file mode 100644 index 0000000000..144b33ffc1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/client/client.go @@ -0,0 +1,49 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package client + +import ( + "context" + + rest "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + versioned "knative.dev/serving/pkg/client/istio/clientset/versioned" +) + +func init() { + injection.Default.RegisterClient(withClient) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg)) +} + +// Get extracts the versioned.Interface client from the context. +func Get(ctx context.Context) versioned.Interface { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/clientset/versioned.Interface from context.") + } + return untyped.(versioned.Interface) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/client/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/client/fake/fake.go new file mode 100644 index 0000000000..acc4634c76 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/client/fake/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + fake "knative.dev/serving/pkg/client/istio/clientset/versioned/fake" + client "knative.dev/serving/pkg/client/istio/injection/client" +) + +func init() { + injection.Fake.RegisterClient(withClient) +} + +func withClient(ctx context.Context, cfg *rest.Config) context.Context { + ctx, _ = With(ctx) + return ctx +} + +func With(ctx context.Context, objects ...runtime.Object) (context.Context, *fake.Clientset) { + cs := fake.NewSimpleClientset(objects...) + return context.WithValue(ctx, client.Key{}, cs), cs +} + +// Get extracts the Kubernetes client from the context. +func Get(ctx context.Context) *fake.Clientset { + untyped := ctx.Value(client.Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/clientset/versioned/fake.Clientset from context.") + } + return untyped.(*fake.Clientset) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/factory.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/factory.go new file mode 100644 index 0000000000..923ce343a9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/factory.go @@ -0,0 +1,56 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package factory + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + externalversions "knative.dev/serving/pkg/client/istio/informers/externalversions" + client "knative.dev/serving/pkg/client/istio/injection/client" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions.SharedInformerFactory from context.") + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/fake/fake.go new file mode 100644 index 0000000000..a928e18fd2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/factory/fake/fake.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + externalversions "knative.dev/serving/pkg/client/istio/informers/externalversions" + fake "knative.dev/serving/pkg/client/istio/injection/client/fake" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +var Get = factory.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, factory.Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/destinationrule.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/destinationrule.go new file mode 100644 index 0000000000..4ab48d4b09 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/destinationrule.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package destinationrule + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().DestinationRules() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.DestinationRuleInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3.DestinationRuleInformer from context.") + } + return untyped.(v1alpha3.DestinationRuleInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/fake/fake.go new file mode 100644 index 0000000000..8816fe1a2c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/istio/injection/informers/factory/fake" + destinationrule "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/destinationrule" +) + +var Get = destinationrule.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().DestinationRules() + return context.WithValue(ctx, destinationrule.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/envoyfilter.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/envoyfilter.go new file mode 100644 index 0000000000..e7d51e29de --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/envoyfilter.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package envoyfilter + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().EnvoyFilters() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.EnvoyFilterInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3.EnvoyFilterInformer from context.") + } + return untyped.(v1alpha3.EnvoyFilterInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/fake/fake.go new file mode 100644 index 0000000000..8b4ffae237 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/istio/injection/informers/factory/fake" + envoyfilter "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/envoyfilter" +) + +var Get = envoyfilter.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().EnvoyFilters() + return context.WithValue(ctx, envoyfilter.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/fake/fake.go new file mode 100644 index 0000000000..0cb7808c66 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/istio/injection/informers/factory/fake" + gateway "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway" +) + +var Get = gateway.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().Gateways() + return context.WithValue(ctx, gateway.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/gateway.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/gateway.go new file mode 100644 index 0000000000..d09f8e5770 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/gateway.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package gateway + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().Gateways() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.GatewayInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3.GatewayInformer from context.") + } + return untyped.(v1alpha3.GatewayInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/fake/fake.go new file mode 100644 index 0000000000..d2cb7cd0ab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/istio/injection/informers/factory/fake" + serviceentry "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry" +) + +var Get = serviceentry.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().ServiceEntries() + return context.WithValue(ctx, serviceentry.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/serviceentry.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/serviceentry.go new file mode 100644 index 0000000000..ec829172a8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/serviceentry/serviceentry.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package serviceentry + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().ServiceEntries() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.ServiceEntryInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3.ServiceEntryInformer from context.") + } + return untyped.(v1alpha3.ServiceEntryInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/fake/fake.go new file mode 100644 index 0000000000..9a14848ec7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/istio/injection/informers/factory/fake" + sidecar "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar" +) + +var Get = sidecar.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().Sidecars() + return context.WithValue(ctx, sidecar.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/sidecar.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/sidecar.go new file mode 100644 index 0000000000..2e65cb1fd5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/sidecar/sidecar.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package sidecar + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().Sidecars() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.SidecarInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3.SidecarInformer from context.") + } + return untyped.(v1alpha3.SidecarInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/fake/fake.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/fake/fake.go new file mode 100644 index 0000000000..055ea2cab8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + fake "knative.dev/serving/pkg/client/istio/injection/informers/factory/fake" + virtualservice "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice" +) + +var Get = virtualservice.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Networking().V1alpha3().VirtualServices() + return context.WithValue(ctx, virtualservice.Key{}, inf), inf.Informer() +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/virtualservice.go b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/virtualservice.go new file mode 100644 index 0000000000..a60c91b799 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/virtualservice.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package virtualservice + +import ( + "context" + + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" + v1alpha3 "knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3" + factory "knative.dev/serving/pkg/client/istio/injection/informers/factory" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Networking().V1alpha3().VirtualServices() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha3.VirtualServiceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/serving/pkg/client/istio/informers/externalversions/networking/v1alpha3.VirtualServiceInformer from context.") + } + return untyped.(v1alpha3.VirtualServiceInformer) +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/destinationrule.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/destinationrule.go new file mode 100644 index 0000000000..ea2c715958 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/destinationrule.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DestinationRuleLister helps list DestinationRules. +type DestinationRuleLister interface { + // List lists all DestinationRules in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) + // DestinationRules returns an object that can list and get DestinationRules. + DestinationRules(namespace string) DestinationRuleNamespaceLister + DestinationRuleListerExpansion +} + +// destinationRuleLister implements the DestinationRuleLister interface. +type destinationRuleLister struct { + indexer cache.Indexer +} + +// NewDestinationRuleLister returns a new DestinationRuleLister. +func NewDestinationRuleLister(indexer cache.Indexer) DestinationRuleLister { + return &destinationRuleLister{indexer: indexer} +} + +// List lists all DestinationRules in the indexer. +func (s *destinationRuleLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.DestinationRule)) + }) + return ret, err +} + +// DestinationRules returns an object that can list and get DestinationRules. +func (s *destinationRuleLister) DestinationRules(namespace string) DestinationRuleNamespaceLister { + return destinationRuleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DestinationRuleNamespaceLister helps list and get DestinationRules. +type DestinationRuleNamespaceLister interface { + // List lists all DestinationRules in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) + // Get retrieves the DestinationRule from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.DestinationRule, error) + DestinationRuleNamespaceListerExpansion +} + +// destinationRuleNamespaceLister implements the DestinationRuleNamespaceLister +// interface. +type destinationRuleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DestinationRules in the indexer for a given namespace. +func (s destinationRuleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.DestinationRule)) + }) + return ret, err +} + +// Get retrieves the DestinationRule from the indexer for a given namespace and name. +func (s destinationRuleNamespaceLister) Get(name string) (*v1alpha3.DestinationRule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("destinationrule"), name) + } + return obj.(*v1alpha3.DestinationRule), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/envoyfilter.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/envoyfilter.go new file mode 100644 index 0000000000..9c6e2dd1b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/envoyfilter.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EnvoyFilterLister helps list EnvoyFilters. +type EnvoyFilterLister interface { + // List lists all EnvoyFilters in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.EnvoyFilter, err error) + // EnvoyFilters returns an object that can list and get EnvoyFilters. + EnvoyFilters(namespace string) EnvoyFilterNamespaceLister + EnvoyFilterListerExpansion +} + +// envoyFilterLister implements the EnvoyFilterLister interface. +type envoyFilterLister struct { + indexer cache.Indexer +} + +// NewEnvoyFilterLister returns a new EnvoyFilterLister. +func NewEnvoyFilterLister(indexer cache.Indexer) EnvoyFilterLister { + return &envoyFilterLister{indexer: indexer} +} + +// List lists all EnvoyFilters in the indexer. +func (s *envoyFilterLister) List(selector labels.Selector) (ret []*v1alpha3.EnvoyFilter, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.EnvoyFilter)) + }) + return ret, err +} + +// EnvoyFilters returns an object that can list and get EnvoyFilters. +func (s *envoyFilterLister) EnvoyFilters(namespace string) EnvoyFilterNamespaceLister { + return envoyFilterNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EnvoyFilterNamespaceLister helps list and get EnvoyFilters. +type EnvoyFilterNamespaceLister interface { + // List lists all EnvoyFilters in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.EnvoyFilter, err error) + // Get retrieves the EnvoyFilter from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.EnvoyFilter, error) + EnvoyFilterNamespaceListerExpansion +} + +// envoyFilterNamespaceLister implements the EnvoyFilterNamespaceLister +// interface. +type envoyFilterNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all EnvoyFilters in the indexer for a given namespace. +func (s envoyFilterNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.EnvoyFilter, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.EnvoyFilter)) + }) + return ret, err +} + +// Get retrieves the EnvoyFilter from the indexer for a given namespace and name. +func (s envoyFilterNamespaceLister) Get(name string) (*v1alpha3.EnvoyFilter, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("envoyfilter"), name) + } + return obj.(*v1alpha3.EnvoyFilter), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/expansion_generated.go new file mode 100644 index 0000000000..3a6e0b4650 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/expansion_generated.go @@ -0,0 +1,67 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +// DestinationRuleListerExpansion allows custom methods to be added to +// DestinationRuleLister. +type DestinationRuleListerExpansion interface{} + +// DestinationRuleNamespaceListerExpansion allows custom methods to be added to +// DestinationRuleNamespaceLister. +type DestinationRuleNamespaceListerExpansion interface{} + +// EnvoyFilterListerExpansion allows custom methods to be added to +// EnvoyFilterLister. +type EnvoyFilterListerExpansion interface{} + +// EnvoyFilterNamespaceListerExpansion allows custom methods to be added to +// EnvoyFilterNamespaceLister. +type EnvoyFilterNamespaceListerExpansion interface{} + +// GatewayListerExpansion allows custom methods to be added to +// GatewayLister. +type GatewayListerExpansion interface{} + +// GatewayNamespaceListerExpansion allows custom methods to be added to +// GatewayNamespaceLister. +type GatewayNamespaceListerExpansion interface{} + +// ServiceEntryListerExpansion allows custom methods to be added to +// ServiceEntryLister. +type ServiceEntryListerExpansion interface{} + +// ServiceEntryNamespaceListerExpansion allows custom methods to be added to +// ServiceEntryNamespaceLister. +type ServiceEntryNamespaceListerExpansion interface{} + +// SidecarListerExpansion allows custom methods to be added to +// SidecarLister. +type SidecarListerExpansion interface{} + +// SidecarNamespaceListerExpansion allows custom methods to be added to +// SidecarNamespaceLister. +type SidecarNamespaceListerExpansion interface{} + +// VirtualServiceListerExpansion allows custom methods to be added to +// VirtualServiceLister. +type VirtualServiceListerExpansion interface{} + +// VirtualServiceNamespaceListerExpansion allows custom methods to be added to +// VirtualServiceNamespaceLister. +type VirtualServiceNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/gateway.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/gateway.go new file mode 100644 index 0000000000..c04309fd82 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/gateway.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// GatewayLister helps list Gateways. +type GatewayLister interface { + // List lists all Gateways in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) + // Gateways returns an object that can list and get Gateways. + Gateways(namespace string) GatewayNamespaceLister + GatewayListerExpansion +} + +// gatewayLister implements the GatewayLister interface. +type gatewayLister struct { + indexer cache.Indexer +} + +// NewGatewayLister returns a new GatewayLister. +func NewGatewayLister(indexer cache.Indexer) GatewayLister { + return &gatewayLister{indexer: indexer} +} + +// List lists all Gateways in the indexer. +func (s *gatewayLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Gateway)) + }) + return ret, err +} + +// Gateways returns an object that can list and get Gateways. +func (s *gatewayLister) Gateways(namespace string) GatewayNamespaceLister { + return gatewayNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// GatewayNamespaceLister helps list and get Gateways. +type GatewayNamespaceLister interface { + // List lists all Gateways in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) + // Get retrieves the Gateway from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.Gateway, error) + GatewayNamespaceListerExpansion +} + +// gatewayNamespaceLister implements the GatewayNamespaceLister +// interface. +type gatewayNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Gateways in the indexer for a given namespace. +func (s gatewayNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Gateway)) + }) + return ret, err +} + +// Get retrieves the Gateway from the indexer for a given namespace and name. +func (s gatewayNamespaceLister) Get(name string) (*v1alpha3.Gateway, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("gateway"), name) + } + return obj.(*v1alpha3.Gateway), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/serviceentry.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/serviceentry.go new file mode 100644 index 0000000000..7222d1e887 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/serviceentry.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceEntryLister helps list ServiceEntries. +type ServiceEntryLister interface { + // List lists all ServiceEntries in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.ServiceEntry, err error) + // ServiceEntries returns an object that can list and get ServiceEntries. + ServiceEntries(namespace string) ServiceEntryNamespaceLister + ServiceEntryListerExpansion +} + +// serviceEntryLister implements the ServiceEntryLister interface. +type serviceEntryLister struct { + indexer cache.Indexer +} + +// NewServiceEntryLister returns a new ServiceEntryLister. +func NewServiceEntryLister(indexer cache.Indexer) ServiceEntryLister { + return &serviceEntryLister{indexer: indexer} +} + +// List lists all ServiceEntries in the indexer. +func (s *serviceEntryLister) List(selector labels.Selector) (ret []*v1alpha3.ServiceEntry, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.ServiceEntry)) + }) + return ret, err +} + +// ServiceEntries returns an object that can list and get ServiceEntries. +func (s *serviceEntryLister) ServiceEntries(namespace string) ServiceEntryNamespaceLister { + return serviceEntryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceEntryNamespaceLister helps list and get ServiceEntries. +type ServiceEntryNamespaceLister interface { + // List lists all ServiceEntries in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.ServiceEntry, err error) + // Get retrieves the ServiceEntry from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.ServiceEntry, error) + ServiceEntryNamespaceListerExpansion +} + +// serviceEntryNamespaceLister implements the ServiceEntryNamespaceLister +// interface. +type serviceEntryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServiceEntries in the indexer for a given namespace. +func (s serviceEntryNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.ServiceEntry, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.ServiceEntry)) + }) + return ret, err +} + +// Get retrieves the ServiceEntry from the indexer for a given namespace and name. +func (s serviceEntryNamespaceLister) Get(name string) (*v1alpha3.ServiceEntry, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("serviceentry"), name) + } + return obj.(*v1alpha3.ServiceEntry), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/sidecar.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/sidecar.go new file mode 100644 index 0000000000..c41d03a9ad --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/sidecar.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// SidecarLister helps list Sidecars. +type SidecarLister interface { + // List lists all Sidecars in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.Sidecar, err error) + // Sidecars returns an object that can list and get Sidecars. + Sidecars(namespace string) SidecarNamespaceLister + SidecarListerExpansion +} + +// sidecarLister implements the SidecarLister interface. +type sidecarLister struct { + indexer cache.Indexer +} + +// NewSidecarLister returns a new SidecarLister. +func NewSidecarLister(indexer cache.Indexer) SidecarLister { + return &sidecarLister{indexer: indexer} +} + +// List lists all Sidecars in the indexer. +func (s *sidecarLister) List(selector labels.Selector) (ret []*v1alpha3.Sidecar, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Sidecar)) + }) + return ret, err +} + +// Sidecars returns an object that can list and get Sidecars. +func (s *sidecarLister) Sidecars(namespace string) SidecarNamespaceLister { + return sidecarNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// SidecarNamespaceLister helps list and get Sidecars. +type SidecarNamespaceLister interface { + // List lists all Sidecars in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.Sidecar, err error) + // Get retrieves the Sidecar from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.Sidecar, error) + SidecarNamespaceListerExpansion +} + +// sidecarNamespaceLister implements the SidecarNamespaceLister +// interface. +type sidecarNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Sidecars in the indexer for a given namespace. +func (s sidecarNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Sidecar, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Sidecar)) + }) + return ret, err +} + +// Get retrieves the Sidecar from the indexer for a given namespace and name. +func (s sidecarNamespaceLister) Get(name string) (*v1alpha3.Sidecar, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("sidecar"), name) + } + return obj.(*v1alpha3.Sidecar), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/virtualservice.go b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/virtualservice.go new file mode 100644 index 0000000000..4fbc26a67c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3/virtualservice.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VirtualServiceLister helps list VirtualServices. +type VirtualServiceLister interface { + // List lists all VirtualServices in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) + // VirtualServices returns an object that can list and get VirtualServices. + VirtualServices(namespace string) VirtualServiceNamespaceLister + VirtualServiceListerExpansion +} + +// virtualServiceLister implements the VirtualServiceLister interface. +type virtualServiceLister struct { + indexer cache.Indexer +} + +// NewVirtualServiceLister returns a new VirtualServiceLister. +func NewVirtualServiceLister(indexer cache.Indexer) VirtualServiceLister { + return &virtualServiceLister{indexer: indexer} +} + +// List lists all VirtualServices in the indexer. +func (s *virtualServiceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.VirtualService)) + }) + return ret, err +} + +// VirtualServices returns an object that can list and get VirtualServices. +func (s *virtualServiceLister) VirtualServices(namespace string) VirtualServiceNamespaceLister { + return virtualServiceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VirtualServiceNamespaceLister helps list and get VirtualServices. +type VirtualServiceNamespaceLister interface { + // List lists all VirtualServices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) + // Get retrieves the VirtualService from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.VirtualService, error) + VirtualServiceNamespaceListerExpansion +} + +// virtualServiceNamespaceLister implements the VirtualServiceNamespaceLister +// interface. +type virtualServiceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VirtualServices in the indexer for a given namespace. +func (s virtualServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.VirtualService)) + }) + return ret, err +} + +// Get retrieves the VirtualService from the indexer for a given namespace and name. +func (s virtualServiceNamespaceLister) Get(name string) (*v1alpha3.VirtualService, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("virtualservice"), name) + } + return obj.(*v1alpha3.VirtualService), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..58fd37c078 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// MetricListerExpansion allows custom methods to be added to +// MetricLister. +type MetricListerExpansion interface{} + +// MetricNamespaceListerExpansion allows custom methods to be added to +// MetricNamespaceLister. +type MetricNamespaceListerExpansion interface{} + +// PodAutoscalerListerExpansion allows custom methods to be added to +// PodAutoscalerLister. +type PodAutoscalerListerExpansion interface{} + +// PodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// PodAutoscalerNamespaceLister. +type PodAutoscalerNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/metric.go b/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/metric.go new file mode 100644 index 0000000000..192ec63b93 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/metric.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +// MetricLister helps list Metrics. +type MetricLister interface { + // List lists all Metrics in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Metric, err error) + // Metrics returns an object that can list and get Metrics. + Metrics(namespace string) MetricNamespaceLister + MetricListerExpansion +} + +// metricLister implements the MetricLister interface. +type metricLister struct { + indexer cache.Indexer +} + +// NewMetricLister returns a new MetricLister. +func NewMetricLister(indexer cache.Indexer) MetricLister { + return &metricLister{indexer: indexer} +} + +// List lists all Metrics in the indexer. +func (s *metricLister) List(selector labels.Selector) (ret []*v1alpha1.Metric, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Metric)) + }) + return ret, err +} + +// Metrics returns an object that can list and get Metrics. +func (s *metricLister) Metrics(namespace string) MetricNamespaceLister { + return metricNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// MetricNamespaceLister helps list and get Metrics. +type MetricNamespaceLister interface { + // List lists all Metrics in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Metric, err error) + // Get retrieves the Metric from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Metric, error) + MetricNamespaceListerExpansion +} + +// metricNamespaceLister implements the MetricNamespaceLister +// interface. +type metricNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Metrics in the indexer for a given namespace. +func (s metricNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Metric, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Metric)) + }) + return ret, err +} + +// Get retrieves the Metric from the indexer for a given namespace and name. +func (s metricNamespaceLister) Get(name string) (*v1alpha1.Metric, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("metric"), name) + } + return obj.(*v1alpha1.Metric), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/podautoscaler.go b/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/podautoscaler.go new file mode 100644 index 0000000000..f9cdc39448 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1/podautoscaler.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +// PodAutoscalerLister helps list PodAutoscalers. +type PodAutoscalerLister interface { + // List lists all PodAutoscalers in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PodAutoscaler, err error) + // PodAutoscalers returns an object that can list and get PodAutoscalers. + PodAutoscalers(namespace string) PodAutoscalerNamespaceLister + PodAutoscalerListerExpansion +} + +// podAutoscalerLister implements the PodAutoscalerLister interface. +type podAutoscalerLister struct { + indexer cache.Indexer +} + +// NewPodAutoscalerLister returns a new PodAutoscalerLister. +func NewPodAutoscalerLister(indexer cache.Indexer) PodAutoscalerLister { + return &podAutoscalerLister{indexer: indexer} +} + +// List lists all PodAutoscalers in the indexer. +func (s *podAutoscalerLister) List(selector labels.Selector) (ret []*v1alpha1.PodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodAutoscaler)) + }) + return ret, err +} + +// PodAutoscalers returns an object that can list and get PodAutoscalers. +func (s *podAutoscalerLister) PodAutoscalers(namespace string) PodAutoscalerNamespaceLister { + return podAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodAutoscalerNamespaceLister helps list and get PodAutoscalers. +type PodAutoscalerNamespaceLister interface { + // List lists all PodAutoscalers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.PodAutoscaler, err error) + // Get retrieves the PodAutoscaler from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.PodAutoscaler, error) + PodAutoscalerNamespaceListerExpansion +} + +// podAutoscalerNamespaceLister implements the PodAutoscalerNamespaceLister +// interface. +type podAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodAutoscalers in the indexer for a given namespace. +func (s podAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the PodAutoscaler from the indexer for a given namespace and name. +func (s podAutoscalerNamespaceLister) Get(name string) (*v1alpha1.PodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("podautoscaler"), name) + } + return obj.(*v1alpha1.PodAutoscaler), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/certificate.go b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/certificate.go new file mode 100644 index 0000000000..81c7473c17 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/certificate.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// CertificateLister helps list Certificates. +type CertificateLister interface { + // List lists all Certificates in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Certificate, err error) + // Certificates returns an object that can list and get Certificates. + Certificates(namespace string) CertificateNamespaceLister + CertificateListerExpansion +} + +// certificateLister implements the CertificateLister interface. +type certificateLister struct { + indexer cache.Indexer +} + +// NewCertificateLister returns a new CertificateLister. +func NewCertificateLister(indexer cache.Indexer) CertificateLister { + return &certificateLister{indexer: indexer} +} + +// List lists all Certificates in the indexer. +func (s *certificateLister) List(selector labels.Selector) (ret []*v1alpha1.Certificate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Certificate)) + }) + return ret, err +} + +// Certificates returns an object that can list and get Certificates. +func (s *certificateLister) Certificates(namespace string) CertificateNamespaceLister { + return certificateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateNamespaceLister helps list and get Certificates. +type CertificateNamespaceLister interface { + // List lists all Certificates in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Certificate, err error) + // Get retrieves the Certificate from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Certificate, error) + CertificateNamespaceListerExpansion +} + +// certificateNamespaceLister implements the CertificateNamespaceLister +// interface. +type certificateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Certificates in the indexer for a given namespace. +func (s certificateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Certificate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Certificate)) + }) + return ret, err +} + +// Get retrieves the Certificate from the indexer for a given namespace and name. +func (s certificateNamespaceLister) Get(name string) (*v1alpha1.Certificate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("certificate"), name) + } + return obj.(*v1alpha1.Certificate), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..a31fdafdcf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// CertificateListerExpansion allows custom methods to be added to +// CertificateLister. +type CertificateListerExpansion interface{} + +// CertificateNamespaceListerExpansion allows custom methods to be added to +// CertificateNamespaceLister. +type CertificateNamespaceListerExpansion interface{} + +// IngressListerExpansion allows custom methods to be added to +// IngressLister. +type IngressListerExpansion interface{} + +// IngressNamespaceListerExpansion allows custom methods to be added to +// IngressNamespaceLister. +type IngressNamespaceListerExpansion interface{} + +// ServerlessServiceListerExpansion allows custom methods to be added to +// ServerlessServiceLister. +type ServerlessServiceListerExpansion interface{} + +// ServerlessServiceNamespaceListerExpansion allows custom methods to be added to +// ServerlessServiceNamespaceLister. +type ServerlessServiceNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/ingress.go b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/ingress.go new file mode 100644 index 0000000000..14efa3e959 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/ingress.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// IngressLister helps list Ingresses. +type IngressLister interface { + // List lists all Ingresses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Ingress, err error) + // Ingresses returns an object that can list and get Ingresses. + Ingresses(namespace string) IngressNamespaceLister + IngressListerExpansion +} + +// ingressLister implements the IngressLister interface. +type ingressLister struct { + indexer cache.Indexer +} + +// NewIngressLister returns a new IngressLister. +func NewIngressLister(indexer cache.Indexer) IngressLister { + return &ingressLister{indexer: indexer} +} + +// List lists all Ingresses in the indexer. +func (s *ingressLister) List(selector labels.Selector) (ret []*v1alpha1.Ingress, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Ingress)) + }) + return ret, err +} + +// Ingresses returns an object that can list and get Ingresses. +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { + return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IngressNamespaceLister helps list and get Ingresses. +type IngressNamespaceLister interface { + // List lists all Ingresses in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Ingress, err error) + // Get retrieves the Ingress from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Ingress, error) + IngressNamespaceListerExpansion +} + +// ingressNamespaceLister implements the IngressNamespaceLister +// interface. +type ingressNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Ingresses in the indexer for a given namespace. +func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Ingress, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Ingress)) + }) + return ret, err +} + +// Get retrieves the Ingress from the indexer for a given namespace and name. +func (s ingressNamespaceLister) Get(name string) (*v1alpha1.Ingress, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("ingress"), name) + } + return obj.(*v1alpha1.Ingress), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/serverlessservice.go b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/serverlessservice.go new file mode 100644 index 0000000000..f6d3fcd9f0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/networking/v1alpha1/serverlessservice.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// ServerlessServiceLister helps list ServerlessServices. +type ServerlessServiceLister interface { + // List lists all ServerlessServices in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ServerlessService, err error) + // ServerlessServices returns an object that can list and get ServerlessServices. + ServerlessServices(namespace string) ServerlessServiceNamespaceLister + ServerlessServiceListerExpansion +} + +// serverlessServiceLister implements the ServerlessServiceLister interface. +type serverlessServiceLister struct { + indexer cache.Indexer +} + +// NewServerlessServiceLister returns a new ServerlessServiceLister. +func NewServerlessServiceLister(indexer cache.Indexer) ServerlessServiceLister { + return &serverlessServiceLister{indexer: indexer} +} + +// List lists all ServerlessServices in the indexer. +func (s *serverlessServiceLister) List(selector labels.Selector) (ret []*v1alpha1.ServerlessService, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ServerlessService)) + }) + return ret, err +} + +// ServerlessServices returns an object that can list and get ServerlessServices. +func (s *serverlessServiceLister) ServerlessServices(namespace string) ServerlessServiceNamespaceLister { + return serverlessServiceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServerlessServiceNamespaceLister helps list and get ServerlessServices. +type ServerlessServiceNamespaceLister interface { + // List lists all ServerlessServices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.ServerlessService, err error) + // Get retrieves the ServerlessService from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.ServerlessService, error) + ServerlessServiceNamespaceListerExpansion +} + +// serverlessServiceNamespaceLister implements the ServerlessServiceNamespaceLister +// interface. +type serverlessServiceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServerlessServices in the indexer for a given namespace. +func (s serverlessServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ServerlessService, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ServerlessService)) + }) + return ret, err +} + +// Get retrieves the ServerlessService from the indexer for a given namespace and name. +func (s serverlessServiceNamespaceLister) Get(name string) (*v1alpha1.ServerlessService, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("serverlessservice"), name) + } + return obj.(*v1alpha1.ServerlessService), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/configuration.go new file mode 100644 index 0000000000..94c3f33641 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/configuration.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// ConfigurationLister helps list Configurations. +type ConfigurationLister interface { + // List lists all Configurations in the indexer. + List(selector labels.Selector) (ret []*v1.Configuration, err error) + // Configurations returns an object that can list and get Configurations. + Configurations(namespace string) ConfigurationNamespaceLister + ConfigurationListerExpansion +} + +// configurationLister implements the ConfigurationLister interface. +type configurationLister struct { + indexer cache.Indexer +} + +// NewConfigurationLister returns a new ConfigurationLister. +func NewConfigurationLister(indexer cache.Indexer) ConfigurationLister { + return &configurationLister{indexer: indexer} +} + +// List lists all Configurations in the indexer. +func (s *configurationLister) List(selector labels.Selector) (ret []*v1.Configuration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Configuration)) + }) + return ret, err +} + +// Configurations returns an object that can list and get Configurations. +func (s *configurationLister) Configurations(namespace string) ConfigurationNamespaceLister { + return configurationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigurationNamespaceLister helps list and get Configurations. +type ConfigurationNamespaceLister interface { + // List lists all Configurations in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Configuration, err error) + // Get retrieves the Configuration from the indexer for a given namespace and name. + Get(name string) (*v1.Configuration, error) + ConfigurationNamespaceListerExpansion +} + +// configurationNamespaceLister implements the ConfigurationNamespaceLister +// interface. +type configurationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Configurations in the indexer for a given namespace. +func (s configurationNamespaceLister) List(selector labels.Selector) (ret []*v1.Configuration, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Configuration)) + }) + return ret, err +} + +// Get retrieves the Configuration from the indexer for a given namespace and name. +func (s configurationNamespaceLister) Get(name string) (*v1.Configuration, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("configuration"), name) + } + return obj.(*v1.Configuration), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/expansion_generated.go new file mode 100644 index 0000000000..091768b12d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/expansion_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ConfigurationListerExpansion allows custom methods to be added to +// ConfigurationLister. +type ConfigurationListerExpansion interface{} + +// ConfigurationNamespaceListerExpansion allows custom methods to be added to +// ConfigurationNamespaceLister. +type ConfigurationNamespaceListerExpansion interface{} + +// RevisionListerExpansion allows custom methods to be added to +// RevisionLister. +type RevisionListerExpansion interface{} + +// RevisionNamespaceListerExpansion allows custom methods to be added to +// RevisionNamespaceLister. +type RevisionNamespaceListerExpansion interface{} + +// RouteListerExpansion allows custom methods to be added to +// RouteLister. +type RouteListerExpansion interface{} + +// RouteNamespaceListerExpansion allows custom methods to be added to +// RouteNamespaceLister. +type RouteNamespaceListerExpansion interface{} + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface{} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaceLister. +type ServiceNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/revision.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/revision.go new file mode 100644 index 0000000000..f53c58db1b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/revision.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// RevisionLister helps list Revisions. +type RevisionLister interface { + // List lists all Revisions in the indexer. + List(selector labels.Selector) (ret []*v1.Revision, err error) + // Revisions returns an object that can list and get Revisions. + Revisions(namespace string) RevisionNamespaceLister + RevisionListerExpansion +} + +// revisionLister implements the RevisionLister interface. +type revisionLister struct { + indexer cache.Indexer +} + +// NewRevisionLister returns a new RevisionLister. +func NewRevisionLister(indexer cache.Indexer) RevisionLister { + return &revisionLister{indexer: indexer} +} + +// List lists all Revisions in the indexer. +func (s *revisionLister) List(selector labels.Selector) (ret []*v1.Revision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Revision)) + }) + return ret, err +} + +// Revisions returns an object that can list and get Revisions. +func (s *revisionLister) Revisions(namespace string) RevisionNamespaceLister { + return revisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RevisionNamespaceLister helps list and get Revisions. +type RevisionNamespaceLister interface { + // List lists all Revisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Revision, err error) + // Get retrieves the Revision from the indexer for a given namespace and name. + Get(name string) (*v1.Revision, error) + RevisionNamespaceListerExpansion +} + +// revisionNamespaceLister implements the RevisionNamespaceLister +// interface. +type revisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Revisions in the indexer for a given namespace. +func (s revisionNamespaceLister) List(selector labels.Selector) (ret []*v1.Revision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Revision)) + }) + return ret, err +} + +// Get retrieves the Revision from the indexer for a given namespace and name. +func (s revisionNamespaceLister) Get(name string) (*v1.Revision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("revision"), name) + } + return obj.(*v1.Revision), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/route.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/route.go new file mode 100644 index 0000000000..02e6374fb5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/route.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// RouteLister helps list Routes. +type RouteLister interface { + // List lists all Routes in the indexer. + List(selector labels.Selector) (ret []*v1.Route, err error) + // Routes returns an object that can list and get Routes. + Routes(namespace string) RouteNamespaceLister + RouteListerExpansion +} + +// routeLister implements the RouteLister interface. +type routeLister struct { + indexer cache.Indexer +} + +// NewRouteLister returns a new RouteLister. +func NewRouteLister(indexer cache.Indexer) RouteLister { + return &routeLister{indexer: indexer} +} + +// List lists all Routes in the indexer. +func (s *routeLister) List(selector labels.Selector) (ret []*v1.Route, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Route)) + }) + return ret, err +} + +// Routes returns an object that can list and get Routes. +func (s *routeLister) Routes(namespace string) RouteNamespaceLister { + return routeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RouteNamespaceLister helps list and get Routes. +type RouteNamespaceLister interface { + // List lists all Routes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Route, err error) + // Get retrieves the Route from the indexer for a given namespace and name. + Get(name string) (*v1.Route, error) + RouteNamespaceListerExpansion +} + +// routeNamespaceLister implements the RouteNamespaceLister +// interface. +type routeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Routes in the indexer for a given namespace. +func (s routeNamespaceLister) List(selector labels.Selector) (ret []*v1.Route, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Route)) + }) + return ret, err +} + +// Get retrieves the Route from the indexer for a given namespace and name. +func (s routeNamespaceLister) Get(name string) (*v1.Route, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("route"), name) + } + return obj.(*v1.Route), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/service.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/service.go new file mode 100644 index 0000000000..5ba9b86c19 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1/service.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// ServiceLister helps list Services. +type ServiceLister interface { + // List lists all Services in the indexer. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + Get(name string) (*v1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("service"), name) + } + return obj.(*v1.Service), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/configuration.go new file mode 100644 index 0000000000..0b3e4d65e6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/configuration.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// ConfigurationLister helps list Configurations. +type ConfigurationLister interface { + // List lists all Configurations in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Configuration, err error) + // Configurations returns an object that can list and get Configurations. + Configurations(namespace string) ConfigurationNamespaceLister + ConfigurationListerExpansion +} + +// configurationLister implements the ConfigurationLister interface. +type configurationLister struct { + indexer cache.Indexer +} + +// NewConfigurationLister returns a new ConfigurationLister. +func NewConfigurationLister(indexer cache.Indexer) ConfigurationLister { + return &configurationLister{indexer: indexer} +} + +// List lists all Configurations in the indexer. +func (s *configurationLister) List(selector labels.Selector) (ret []*v1alpha1.Configuration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Configuration)) + }) + return ret, err +} + +// Configurations returns an object that can list and get Configurations. +func (s *configurationLister) Configurations(namespace string) ConfigurationNamespaceLister { + return configurationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigurationNamespaceLister helps list and get Configurations. +type ConfigurationNamespaceLister interface { + // List lists all Configurations in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Configuration, err error) + // Get retrieves the Configuration from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Configuration, error) + ConfigurationNamespaceListerExpansion +} + +// configurationNamespaceLister implements the ConfigurationNamespaceLister +// interface. +type configurationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Configurations in the indexer for a given namespace. +func (s configurationNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Configuration, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Configuration)) + }) + return ret, err +} + +// Get retrieves the Configuration from the indexer for a given namespace and name. +func (s configurationNamespaceLister) Get(name string) (*v1alpha1.Configuration, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("configuration"), name) + } + return obj.(*v1alpha1.Configuration), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..cc9c0e1aa6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/expansion_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ConfigurationListerExpansion allows custom methods to be added to +// ConfigurationLister. +type ConfigurationListerExpansion interface{} + +// ConfigurationNamespaceListerExpansion allows custom methods to be added to +// ConfigurationNamespaceLister. +type ConfigurationNamespaceListerExpansion interface{} + +// RevisionListerExpansion allows custom methods to be added to +// RevisionLister. +type RevisionListerExpansion interface{} + +// RevisionNamespaceListerExpansion allows custom methods to be added to +// RevisionNamespaceLister. +type RevisionNamespaceListerExpansion interface{} + +// RouteListerExpansion allows custom methods to be added to +// RouteLister. +type RouteListerExpansion interface{} + +// RouteNamespaceListerExpansion allows custom methods to be added to +// RouteNamespaceLister. +type RouteNamespaceListerExpansion interface{} + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface{} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaceLister. +type ServiceNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/revision.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/revision.go new file mode 100644 index 0000000000..508c59df23 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/revision.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// RevisionLister helps list Revisions. +type RevisionLister interface { + // List lists all Revisions in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Revision, err error) + // Revisions returns an object that can list and get Revisions. + Revisions(namespace string) RevisionNamespaceLister + RevisionListerExpansion +} + +// revisionLister implements the RevisionLister interface. +type revisionLister struct { + indexer cache.Indexer +} + +// NewRevisionLister returns a new RevisionLister. +func NewRevisionLister(indexer cache.Indexer) RevisionLister { + return &revisionLister{indexer: indexer} +} + +// List lists all Revisions in the indexer. +func (s *revisionLister) List(selector labels.Selector) (ret []*v1alpha1.Revision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Revision)) + }) + return ret, err +} + +// Revisions returns an object that can list and get Revisions. +func (s *revisionLister) Revisions(namespace string) RevisionNamespaceLister { + return revisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RevisionNamespaceLister helps list and get Revisions. +type RevisionNamespaceLister interface { + // List lists all Revisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Revision, err error) + // Get retrieves the Revision from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Revision, error) + RevisionNamespaceListerExpansion +} + +// revisionNamespaceLister implements the RevisionNamespaceLister +// interface. +type revisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Revisions in the indexer for a given namespace. +func (s revisionNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Revision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Revision)) + }) + return ret, err +} + +// Get retrieves the Revision from the indexer for a given namespace and name. +func (s revisionNamespaceLister) Get(name string) (*v1alpha1.Revision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("revision"), name) + } + return obj.(*v1alpha1.Revision), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/route.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/route.go new file mode 100644 index 0000000000..aee2b9f4a5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/route.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// RouteLister helps list Routes. +type RouteLister interface { + // List lists all Routes in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Route, err error) + // Routes returns an object that can list and get Routes. + Routes(namespace string) RouteNamespaceLister + RouteListerExpansion +} + +// routeLister implements the RouteLister interface. +type routeLister struct { + indexer cache.Indexer +} + +// NewRouteLister returns a new RouteLister. +func NewRouteLister(indexer cache.Indexer) RouteLister { + return &routeLister{indexer: indexer} +} + +// List lists all Routes in the indexer. +func (s *routeLister) List(selector labels.Selector) (ret []*v1alpha1.Route, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Route)) + }) + return ret, err +} + +// Routes returns an object that can list and get Routes. +func (s *routeLister) Routes(namespace string) RouteNamespaceLister { + return routeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RouteNamespaceLister helps list and get Routes. +type RouteNamespaceLister interface { + // List lists all Routes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Route, err error) + // Get retrieves the Route from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Route, error) + RouteNamespaceListerExpansion +} + +// routeNamespaceLister implements the RouteNamespaceLister +// interface. +type routeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Routes in the indexer for a given namespace. +func (s routeNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Route, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Route)) + }) + return ret, err +} + +// Get retrieves the Route from the indexer for a given namespace and name. +func (s routeNamespaceLister) Get(name string) (*v1alpha1.Route, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("route"), name) + } + return obj.(*v1alpha1.Route), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/service.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/service.go new file mode 100644 index 0000000000..798d2b2757 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1alpha1/service.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// ServiceLister helps list Services. +type ServiceLister interface { + // List lists all Services in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1alpha1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1alpha1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("service"), name) + } + return obj.(*v1alpha1.Service), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/configuration.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/configuration.go new file mode 100644 index 0000000000..5ccceff028 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/configuration.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// ConfigurationLister helps list Configurations. +type ConfigurationLister interface { + // List lists all Configurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Configuration, err error) + // Configurations returns an object that can list and get Configurations. + Configurations(namespace string) ConfigurationNamespaceLister + ConfigurationListerExpansion +} + +// configurationLister implements the ConfigurationLister interface. +type configurationLister struct { + indexer cache.Indexer +} + +// NewConfigurationLister returns a new ConfigurationLister. +func NewConfigurationLister(indexer cache.Indexer) ConfigurationLister { + return &configurationLister{indexer: indexer} +} + +// List lists all Configurations in the indexer. +func (s *configurationLister) List(selector labels.Selector) (ret []*v1beta1.Configuration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Configuration)) + }) + return ret, err +} + +// Configurations returns an object that can list and get Configurations. +func (s *configurationLister) Configurations(namespace string) ConfigurationNamespaceLister { + return configurationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigurationNamespaceLister helps list and get Configurations. +type ConfigurationNamespaceLister interface { + // List lists all Configurations in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Configuration, err error) + // Get retrieves the Configuration from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Configuration, error) + ConfigurationNamespaceListerExpansion +} + +// configurationNamespaceLister implements the ConfigurationNamespaceLister +// interface. +type configurationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Configurations in the indexer for a given namespace. +func (s configurationNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Configuration, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Configuration)) + }) + return ret, err +} + +// Get retrieves the Configuration from the indexer for a given namespace and name. +func (s configurationNamespaceLister) Get(name string) (*v1beta1.Configuration, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("configuration"), name) + } + return obj.(*v1beta1.Configuration), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/expansion_generated.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/expansion_generated.go new file mode 100644 index 0000000000..ad73dc85de --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/expansion_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// ConfigurationListerExpansion allows custom methods to be added to +// ConfigurationLister. +type ConfigurationListerExpansion interface{} + +// ConfigurationNamespaceListerExpansion allows custom methods to be added to +// ConfigurationNamespaceLister. +type ConfigurationNamespaceListerExpansion interface{} + +// RevisionListerExpansion allows custom methods to be added to +// RevisionLister. +type RevisionListerExpansion interface{} + +// RevisionNamespaceListerExpansion allows custom methods to be added to +// RevisionNamespaceLister. +type RevisionNamespaceListerExpansion interface{} + +// RouteListerExpansion allows custom methods to be added to +// RouteLister. +type RouteListerExpansion interface{} + +// RouteNamespaceListerExpansion allows custom methods to be added to +// RouteNamespaceLister. +type RouteNamespaceListerExpansion interface{} + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface{} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaceLister. +type ServiceNamespaceListerExpansion interface{} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/revision.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/revision.go new file mode 100644 index 0000000000..a74c986f56 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/revision.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// RevisionLister helps list Revisions. +type RevisionLister interface { + // List lists all Revisions in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Revision, err error) + // Revisions returns an object that can list and get Revisions. + Revisions(namespace string) RevisionNamespaceLister + RevisionListerExpansion +} + +// revisionLister implements the RevisionLister interface. +type revisionLister struct { + indexer cache.Indexer +} + +// NewRevisionLister returns a new RevisionLister. +func NewRevisionLister(indexer cache.Indexer) RevisionLister { + return &revisionLister{indexer: indexer} +} + +// List lists all Revisions in the indexer. +func (s *revisionLister) List(selector labels.Selector) (ret []*v1beta1.Revision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Revision)) + }) + return ret, err +} + +// Revisions returns an object that can list and get Revisions. +func (s *revisionLister) Revisions(namespace string) RevisionNamespaceLister { + return revisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RevisionNamespaceLister helps list and get Revisions. +type RevisionNamespaceLister interface { + // List lists all Revisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Revision, err error) + // Get retrieves the Revision from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Revision, error) + RevisionNamespaceListerExpansion +} + +// revisionNamespaceLister implements the RevisionNamespaceLister +// interface. +type revisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Revisions in the indexer for a given namespace. +func (s revisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Revision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Revision)) + }) + return ret, err +} + +// Get retrieves the Revision from the indexer for a given namespace and name. +func (s revisionNamespaceLister) Get(name string) (*v1beta1.Revision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("revision"), name) + } + return obj.(*v1beta1.Revision), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/route.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/route.go new file mode 100644 index 0000000000..d965d0b2cd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/route.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// RouteLister helps list Routes. +type RouteLister interface { + // List lists all Routes in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Route, err error) + // Routes returns an object that can list and get Routes. + Routes(namespace string) RouteNamespaceLister + RouteListerExpansion +} + +// routeLister implements the RouteLister interface. +type routeLister struct { + indexer cache.Indexer +} + +// NewRouteLister returns a new RouteLister. +func NewRouteLister(indexer cache.Indexer) RouteLister { + return &routeLister{indexer: indexer} +} + +// List lists all Routes in the indexer. +func (s *routeLister) List(selector labels.Selector) (ret []*v1beta1.Route, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Route)) + }) + return ret, err +} + +// Routes returns an object that can list and get Routes. +func (s *routeLister) Routes(namespace string) RouteNamespaceLister { + return routeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RouteNamespaceLister helps list and get Routes. +type RouteNamespaceLister interface { + // List lists all Routes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Route, err error) + // Get retrieves the Route from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Route, error) + RouteNamespaceListerExpansion +} + +// routeNamespaceLister implements the RouteNamespaceLister +// interface. +type routeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Routes in the indexer for a given namespace. +func (s routeNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Route, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Route)) + }) + return ret, err +} + +// Get retrieves the Route from the indexer for a given namespace and name. +func (s routeNamespaceLister) Get(name string) (*v1beta1.Route, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("route"), name) + } + return obj.(*v1beta1.Route), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/service.go b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/service.go new file mode 100644 index 0000000000..3fd62721c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/client/listers/serving/v1beta1/service.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1beta1 "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// ServiceLister helps list Services. +type ServiceLister interface { + // List lists all Services in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1beta1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1beta1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("service"), name) + } + return obj.(*v1beta1.Service), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/deployment/config.go b/test/vendor/knative.dev/serving/pkg/deployment/config.go new file mode 100644 index 0000000000..dcd3b70860 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/deployment/config.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "errors" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +const ( + // ConfigName is the name of config map for the deployment. + ConfigName = "config-deployment" + + // QueueSidecarImageKey is the config map key for queue sidecar image + QueueSidecarImageKey = "queueSidecarImage" + registriesSkippingTagResolving = "registriesSkippingTagResolving" +) + +// NewConfigFromMap creates a DeploymentConfig from the supplied Map +func NewConfigFromMap(configMap map[string]string) (*Config, error) { + nc := &Config{} + qsideCarImage, ok := configMap[QueueSidecarImageKey] + if !ok { + return nil, errors.New("queue sidecar image is missing") + } + nc.QueueSidecarImage = qsideCarImage + + if registries, ok := configMap[registriesSkippingTagResolving]; !ok { + // It is ok if registries are missing. + nc.RegistriesSkippingTagResolving = sets.NewString("ko.local", "dev.local") + } else { + nc.RegistriesSkippingTagResolving = sets.NewString(strings.Split(registries, ",")...) + } + return nc, nil +} + +// NewConfigFromConfigMap creates a DeploymentConfig from the supplied configMap +func NewConfigFromConfigMap(config *corev1.ConfigMap) (*Config, error) { + return NewConfigFromMap(config.Data) +} + +// Config includes the configurations for the controller. +type Config struct { + // QueueSidecarImage is the name of the image used for the queue sidecar + // injected into the revision pod + QueueSidecarImage string + + // Repositories for which tag to digest resolving should be skipped + RegistriesSkippingTagResolving sets.String +} diff --git a/test/vendor/knative.dev/serving/pkg/deployment/config_test.go b/test/vendor/knative.dev/serving/pkg/deployment/config_test.go new file mode 100644 index 0000000000..3954b998b9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/deployment/config_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/system" + + . "knative.dev/pkg/configmap/testing" + _ "knative.dev/pkg/system/testing" +) + +var noSidecarImage = "" + +func TestControllerConfigurationFromFile(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, ConfigName, QueueSidecarImageKey) + + if _, err := NewConfigFromConfigMap(cm); err != nil { + t.Errorf("NewConfigFromConfigMap(actual) = %v", err) + } + + if _, err := NewConfigFromConfigMap(example); err != nil { + t.Errorf("NewConfigFromConfigMap(example) = %v", err) + } +} + +func TestControllerConfiguration(t *testing.T) { + configTests := []struct { + name string + wantErr bool + wantController interface{} + config *corev1.ConfigMap + }{{ + name: "controller configuration with bad registries", + wantErr: false, + wantController: &Config{ + RegistriesSkippingTagResolving: sets.NewString("ko.local", ""), + QueueSidecarImage: noSidecarImage, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + QueueSidecarImageKey: noSidecarImage, + registriesSkippingTagResolving: "ko.local,,", + }, + }}, { + name: "controller configuration with registries", + wantErr: false, + wantController: &Config{ + RegistriesSkippingTagResolving: sets.NewString("ko.local", "ko.dev"), + QueueSidecarImage: noSidecarImage, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + QueueSidecarImageKey: noSidecarImage, + registriesSkippingTagResolving: "ko.local,ko.dev", + }, + }, + }, { + name: "controller with no side car image", + wantErr: true, + wantController: (*Config)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{}, + }, + }} + + for _, tt := range configTests { + actualController, err := NewConfigFromConfigMap(tt.config) + + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewConfigFromConfigMap() error = %v, WantErr %v", tt.name, err, tt.wantErr) + } + + if diff := cmp.Diff(actualController, tt.wantController); diff != "" { + t.Fatalf("Test: %q; want %v, but got %v", tt.name, tt.wantController, actualController) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/deployment/doc.go b/test/vendor/knative.dev/serving/pkg/deployment/doc.go new file mode 100644 index 0000000000..4315c4c86a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/deployment/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package deployment diff --git a/test/vendor/knative.dev/serving/pkg/deployment/testdata/config-deployment.yaml b/test/vendor/knative.dev/serving/pkg/deployment/testdata/config-deployment.yaml new file mode 120000 index 0000000000..10ba19c148 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/deployment/testdata/config-deployment.yaml @@ -0,0 +1 @@ +../../../config/core/configmaps/deployment.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/deployment/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/deployment/zz_generated.deepcopy.go new file mode 100644 index 0000000000..eb68858be3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/deployment/zz_generated.deepcopy.go @@ -0,0 +1,48 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package deployment + +import ( + sets "k8s.io/apimachinery/pkg/util/sets" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.RegistriesSkippingTagResolving != nil { + in, out := &in.RegistriesSkippingTagResolving, &out.RegistriesSkippingTagResolving + *out = make(sets.String, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/gc/OWNERS b/test/vendor/knative.dev/serving/pkg/gc/OWNERS new file mode 100644 index 0000000000..e57e66dd50 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-api-approvers + +reviewers: +- serving-api-reviewers + +labels: +- area/API diff --git a/test/vendor/knative.dev/serving/pkg/gc/config.go b/test/vendor/knative.dev/serving/pkg/gc/config.go new file mode 100644 index 0000000000..25c2d63844 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/config.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "context" + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" +) + +const ( + ConfigName = "config-gc" +) + +type Config struct { + // Delay duration after a revision create before considering it for GC + StaleRevisionCreateDelay time.Duration + // Timeout since a revision lastPinned before it should be GC'd + // This must be longer than the controller resync period + StaleRevisionTimeout time.Duration + // Minimum number of generations of revisions to keep before considering for GC + StaleRevisionMinimumGenerations int64 + // Minimum staleness duration before updating lastPinned + StaleRevisionLastpinnedDebounce time.Duration +} + +func NewConfigFromConfigMapFunc(ctx context.Context) func(configMap *corev1.ConfigMap) (*Config, error) { + logger := logging.FromContext(ctx) + minRevisionTimeout := controller.GetResyncPeriod(ctx) + return func(configMap *corev1.ConfigMap) (*Config, error) { + c := Config{} + + for _, dur := range []struct { + key string + field *time.Duration + defaultValue time.Duration + }{{ + key: "stale-revision-create-delay", + field: &c.StaleRevisionCreateDelay, + defaultValue: 48 * time.Hour, + }, { + key: "stale-revision-timeout", + field: &c.StaleRevisionTimeout, + defaultValue: 15 * time.Hour, + }, { + key: "stale-revision-lastpinned-debounce", + field: &c.StaleRevisionLastpinnedDebounce, + defaultValue: 5 * time.Hour, + }} { + if raw, ok := configMap.Data[dur.key]; !ok { + *dur.field = dur.defaultValue + } else if val, err := time.ParseDuration(raw); err != nil { + return nil, err + } else { + *dur.field = val + } + } + + if raw, ok := configMap.Data["stale-revision-minimum-generations"]; !ok { + c.StaleRevisionMinimumGenerations = 20 + } else if val, err := strconv.ParseInt(raw, 10 /*base*/, 64 /*bit count*/); err != nil { + return nil, err + } else if val < 0 { + return nil, fmt.Errorf("stale-revision-minimum-generations must be non-negative, was: %d", val) + } else { + c.StaleRevisionMinimumGenerations = val + } + + if c.StaleRevisionTimeout-c.StaleRevisionLastpinnedDebounce < minRevisionTimeout { + logger.Warnf("Got revision timeout of %v, minimum supported value is %v", c.StaleRevisionTimeout, minRevisionTimeout+c.StaleRevisionLastpinnedDebounce) + c.StaleRevisionTimeout = minRevisionTimeout + c.StaleRevisionLastpinnedDebounce + return &c, nil + } + return &c, nil + } +} diff --git a/test/vendor/knative.dev/serving/pkg/gc/config_test.go b/test/vendor/knative.dev/serving/pkg/gc/config_test.go new file mode 100644 index 0000000000..2e4f02704d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/config_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + logtesting "knative.dev/pkg/logging/testing" + + . "knative.dev/pkg/configmap/testing" +) + +func TestOurConfig(t *testing.T) { + actual, example := ConfigMapsFromTestFile(t, "config-gc") + for _, tt := range []struct { + name string + fail bool + want *Config + data *corev1.ConfigMap + }{{ + name: "Actual config", + fail: false, + want: &Config{ + StaleRevisionCreateDelay: 48 * time.Hour, + StaleRevisionTimeout: 15 * time.Hour, + StaleRevisionMinimumGenerations: 20, + StaleRevisionLastpinnedDebounce: 5 * time.Hour, + }, + data: actual, + }, { + name: "Example config", + fail: false, + want: &Config{ + StaleRevisionCreateDelay: 48 * time.Hour, + StaleRevisionTimeout: 15 * time.Hour, + StaleRevisionMinimumGenerations: 20, + StaleRevisionLastpinnedDebounce: 5 * time.Hour, + }, + data: example, + }, { + name: "With value overrides", + want: &Config{ + StaleRevisionCreateDelay: 15 * time.Hour, + StaleRevisionTimeout: 15 * time.Hour, + StaleRevisionMinimumGenerations: 10, + StaleRevisionLastpinnedDebounce: 5 * time.Hour, + }, + data: &corev1.ConfigMap{ + Data: map[string]string{ + "stale-revision-create-delay": "15h", + "stale-revision-minimum-generations": "10", + }, + }, + }, { + name: "Invalid duration", + fail: true, + want: nil, + data: &corev1.ConfigMap{ + Data: map[string]string{ + "stale-revision-create-delay": "invalid", + }, + }, + }, { + name: "Invalid negative minimum generation", + fail: true, + want: nil, + data: &corev1.ConfigMap{ + Data: map[string]string{ + "stale-revision-minimum-generations": "-1", + }, + }, + }, { + name: "Invalid minimum generation", + fail: true, + want: nil, + data: &corev1.ConfigMap{ + Data: map[string]string{ + "stale-revision-minimum-generations": "invalid", + }, + }, + }, { + name: "Below minimum timeout", + fail: false, + want: &Config{ + StaleRevisionCreateDelay: 15 * time.Hour, + StaleRevisionTimeout: 15 * time.Hour, + StaleRevisionMinimumGenerations: 10, + StaleRevisionLastpinnedDebounce: 5 * time.Hour, + }, + data: &corev1.ConfigMap{ + Data: map[string]string{ + "stale-revision-create-delay": "15h", + "stale-revision-minimum-generations": "10", + "stale-revision-timeout": "1h", + }, + }, + }} { + t.Run(tt.name, func(t *testing.T) { + testConfig, err := NewConfigFromConfigMapFunc(logtesting.TestContextWithLogger(t))(tt.data) + if tt.fail != (err != nil) { + t.Fatalf("Unexpected error value: %v", err) + } + + if diff := cmp.Diff(tt.want, testConfig); diff != "" { + t.Errorf("Unexpected controller config (-want, +got): %s", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/gc/doc.go b/test/vendor/knative.dev/serving/pkg/gc/doc.go new file mode 100644 index 0000000000..fb89ba3735 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// Package gc holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Route controller depends. +package gc diff --git a/test/vendor/knative.dev/serving/pkg/gc/testdata/config-gc.yaml b/test/vendor/knative.dev/serving/pkg/gc/testdata/config-gc.yaml new file mode 120000 index 0000000000..0fc14e43b5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/testdata/config-gc.yaml @@ -0,0 +1 @@ +../../../config/core/configmaps/gc.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy.go new file mode 100644 index 0000000000..9a3151e120 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy.go @@ -0,0 +1,37 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package gc + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy_test.go b/test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy_test.go new file mode 100644 index 0000000000..e79050ff36 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/gc/zz_generated.deepcopy_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func TestDeepCopy(t *testing.T) { + src := Config{ + StaleRevisionCreateDelay: 5 * time.Minute, + StaleRevisionTimeout: 5 * time.Minute, + StaleRevisionMinimumGenerations: 1, + StaleRevisionLastpinnedDebounce: 1 * time.Minute, + } + + if diff := cmp.Diff(src, *src.DeepCopy()); diff != "" { + t.Errorf("Unexpected DeepCopy (-want, +got): %v", diff) + } +} + +func TestDeepCopyInto(t *testing.T) { + var dest Config + src := Config{ + StaleRevisionCreateDelay: 5 * time.Minute, + StaleRevisionTimeout: 5 * time.Minute, + StaleRevisionMinimumGenerations: 1, + StaleRevisionLastpinnedDebounce: 1 * time.Minute, + } + + src.DeepCopyInto(&dest) + if diff := cmp.Diff(src, dest); diff != "" { + t.Errorf("Unexpected DeepCopyInto (-want, +got): %v", diff) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/http/OWNERS b/test/vendor/knative.dev/serving/pkg/http/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/knative.dev/serving/pkg/http/header.go b/test/vendor/knative.dev/serving/pkg/http/header.go new file mode 100644 index 0000000000..779b2491be --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/header.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import "net/http" + +// LastHeaderValue gets the last value associated with the given key. +// It is case insensitive; textproto.CanonicalMIMEHeaderKey is used +// to canonicalize the provided key. +// If there are no values associated with the key, Get returns "". +func LastHeaderValue(header http.Header, key string) string { + if header == nil { + return "" + } + + v := header[http.CanonicalHeaderKey(key)] + + if len(v) == 0 { + return "" + } + + return v[len(v)-1] +} diff --git a/test/vendor/knative.dev/serving/pkg/http/header_test.go b/test/vendor/knative.dev/serving/pkg/http/header_test.go new file mode 100644 index 0000000000..1d3f8b15b8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/header_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "net/http" + "testing" +) + +func TestLastHeaderValue(t *testing.T) { + headerKey := "Header-Key" + + tests := []struct { + name string + expected string + headers http.Header + }{{ + name: "nil headers", + expected: "", + headers: nil, + }, { + name: "empty header value ", + expected: "", + headers: http.Header{ + headerKey: nil, + }, + }, { + name: "single header value ", + expected: "first", + headers: http.Header{ + headerKey: {"first"}, + }, + }, { + name: "multi header value ", + expected: "second", + headers: http.Header{ + headerKey: {"first", "second"}, + }, + }} + + for _, test := range tests { + got := LastHeaderValue(test.headers, headerKey) + if got != test.expected { + t.Errorf("Unexpected header value got - %q want - %q", got, test.expected) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/http/request_log.go b/test/vendor/knative.dev/serving/pkg/http/request_log.go new file mode 100644 index 0000000000..8eaffd4430 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/request_log.go @@ -0,0 +1,177 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + "sync/atomic" + "text/template" + "time" + "unsafe" + + "knative.dev/serving/pkg/network" +) + +// RequestLogHandler implements an http.Handler that writes request logs +// and calls the next handler. +type RequestLogHandler struct { + handler http.Handler + inputGetter RequestLogTemplateInputGetter + writer io.Writer + // Uses an unsafe.Pointer combined with atomic operations to get the least + // contention possible. + template unsafe.Pointer + enableProbeRequestLog bool +} + +// RequestLogRevision provides revision related static information +// for the template execution. +type RequestLogRevision struct { + Name string + Namespace string + Service string + Configuration string + PodName string + PodIP string +} + +// RequestLogResponse provided response related information for the template execution. +type RequestLogResponse struct { + Code int + Size int + Latency float64 +} + +// RequestLogTemplateInput is the wrapper struct that provides all +// necessary information for the template execution. +type RequestLogTemplateInput struct { + Request *http.Request + Response *RequestLogResponse + Revision *RequestLogRevision +} + +// RequestLogTemplateInputGetter defines a function returning the input to pass to a request log writer. +type RequestLogTemplateInputGetter func(req *http.Request, resp *RequestLogResponse) *RequestLogTemplateInput + +// RequestLogTemplateInputGetterFromRevision returns a func that forms a template input using a static +// revision information. +func RequestLogTemplateInputGetterFromRevision(rev *RequestLogRevision) RequestLogTemplateInputGetter { + return func(req *http.Request, resp *RequestLogResponse) *RequestLogTemplateInput { + return &RequestLogTemplateInput{ + Request: req, + Response: resp, + Revision: rev, + } + } +} + +// NewRequestLogHandler creates an http.Handler that logs request logs to an io.Writer. +func NewRequestLogHandler(h http.Handler, w io.Writer, templateStr string, + inputGetter RequestLogTemplateInputGetter, enableProbeRequestLog bool) (*RequestLogHandler, error) { + reqHandler := &RequestLogHandler{ + handler: h, + writer: w, + inputGetter: inputGetter, + enableProbeRequestLog: enableProbeRequestLog, + } + if err := reqHandler.SetTemplate(templateStr); err != nil { + return nil, err + } + return reqHandler, nil +} + +// SetTemplate sets the template to use for formatting request logs. +// Setting the template to an empty string turns of writing request logs. +func (h *RequestLogHandler) SetTemplate(templateStr string) error { + var t *template.Template + // If templateStr is empty, we will set the template to nil + // and effectively disable request logs. + if templateStr != "" { + // Make sure that the template ends with a newline. Otherwise, + // logging backends will not be able to parse entries separately. + if !strings.HasSuffix(templateStr, "\n") { + templateStr = templateStr + "\n" + } + var err error + t, err = template.New("requestLog").Parse(templateStr) + if err != nil { + return err + } + } + + atomic.StorePointer(&h.template, unsafe.Pointer(t)) + return nil +} + +func (h *RequestLogHandler) getTemplate() *template.Template { + return (*template.Template)(atomic.LoadPointer(&h.template)) +} + +func (h *RequestLogHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + t := h.getTemplate() + if t == nil { + h.handler.ServeHTTP(w, r) + return + } + + rr := NewResponseRecorder(w, http.StatusOK) + startTime := time.Now() + + defer func() { + // Filter probe requests for request logs if disabled. + if network.IsProbe(r) && !h.enableProbeRequestLog { + return + } + + // If ServeHTTP panics, recover, record the failure and panic again. + err := recover() + latency := time.Since(startTime).Seconds() + if err != nil { + h.write(t, h.inputGetter(r, &RequestLogResponse{ + Code: http.StatusInternalServerError, + Latency: latency, + Size: 0, + })) + panic(err) + } else { + h.write(t, h.inputGetter(r, &RequestLogResponse{ + Code: rr.ResponseCode, + Latency: latency, + Size: (int)(rr.ResponseSize), + })) + } + }() + + h.handler.ServeHTTP(rr, r) +} + +func (h *RequestLogHandler) write(t *template.Template, in *RequestLogTemplateInput) { + // Use a local buffer to store the whole template expansion first. If h.writer + // is used directly, parallel template executions may result in interleaved + // output. + w := &bytes.Buffer{} + if err := t.Execute(w, in); err != nil { + // Template execution failed. Write an error message with some basic information about the request. + fmt.Fprintf(h.writer, "Invalid request log template: method: %v, response code: %v, latency: %v, url: %v\n", + in.Request.Method, in.Response.Code, in.Response.Latency, in.Request.URL) + } + h.writer.Write(w.Bytes()) +} diff --git a/test/vendor/knative.dev/serving/pkg/http/request_log_test.go b/test/vendor/knative.dev/serving/pkg/http/request_log_test.go new file mode 100644 index 0000000000..63caf04b14 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/request_log_test.go @@ -0,0 +1,289 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "knative.dev/serving/pkg/network" +) + +var ( + defaultRevInfo = &RequestLogRevision{ + Name: "rev", + Namespace: "ns", + Service: "svc", + Configuration: "cfg", + PodName: "pn", + PodIP: "ip", + } + defaultInputGetter = RequestLogTemplateInputGetterFromRevision(defaultRevInfo) + baseHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) +) + +func TestRequestLogHandler(t *testing.T) { + tests := []struct { + name string + url string + body string + template string + want string + wantErr bool + isProbe bool + enableProbeRequestLog bool + }{{ + name: "empty template", + url: "http://example.com/testpage", + body: "test", + template: "", + want: "", + }, { + name: "template with new line", + url: "http://example.com/testpage", + body: "test", + template: "{{.Request.URL}}\n", + want: "http://example.com/testpage\n", + }, { + name: "template without new line", + url: "http://example.com", + body: "test", + template: "{{.Request.ContentLength}}", + want: "4\n", + }, { + name: "invalid template", + url: "http://example.com", + body: "test", + template: "{{}}", + want: "", + wantErr: true, + }, { + name: "revision info", + url: "http://example.com", + body: "test", + template: "{{.Revision.Name}}, {{.Revision.Namespace}}, {{.Revision.Service}}, {{.Revision.Configuration}}, {{.Revision.PodName}}, {{.Revision.PodIP}}", + want: "rev, ns, svc, cfg, pn, ip\n", + }, { + name: "probe request and logging support disabled", + url: "http://example.com", + body: "test", + template: "{{.Request.ContentLength}}", + want: "", + isProbe: true, + }, { + name: "probe request and logging support enabled", + url: "http://example.com", + body: "test", + template: "{{.Request.ContentLength}}", + want: "4\n", + isProbe: true, + enableProbeRequestLog: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + buf := bytes.NewBufferString("") + handler, err := NewRequestLogHandler( + baseHandler, buf, test.template, defaultInputGetter, test.enableProbeRequestLog) + if test.wantErr != (err != nil) { + t.Errorf("got %v, want error %v", err, test.wantErr) + } + + if !test.wantErr { + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, test.url, bytes.NewBufferString(test.body)) + if test.isProbe { + req.Header.Set(network.ProbeHeaderName, "activator") + } + handler.ServeHTTP(resp, req) + + got := buf.String() + if got != test.want { + t.Errorf("got '%v', want '%v'", got, test.want) + } + } + }) + } +} + +func TestPanickingHandler(t *testing.T) { + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + panic("no!") + }) + buf := bytes.NewBufferString("") + handler, err := NewRequestLogHandler( + baseHandler, buf, "{{.Request.URL}}", defaultInputGetter, false) + if err != nil { + t.Errorf("got %v, want error: %v", err, false) + } + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("test")) + defer func() { + err := recover() + if err == nil { + t.Error("want ServeHTTP to panic, got nothing.") + } + + got := buf.String() + if want := "http://example.com\n"; got != want { + t.Errorf("got '%v', want '%v'", got, want) + } + }() + handler.ServeHTTP(resp, req) +} + +func TestFailedTemplateExecution(t *testing.T) { + buf := bytes.NewBufferString("") + handler, err := NewRequestLogHandler( + baseHandler, buf, "{{.Request.Something}}", defaultInputGetter, false) + if err != nil { + t.Errorf("got %v, wantErr %v, ", err, false) + } + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "http://example.com", bytes.NewBufferString("test")) + handler.ServeHTTP(resp, req) + + got := buf.String() + if want := "Invalid request log template: "; !strings.HasPrefix(got, want) { + t.Errorf("got: '%v', want: '%v'", got, want) + } +} + +func TestSetTemplate(t *testing.T) { + url, body := "http://example.com/testpage", "test" + tests := []struct { + name string + template string + want string + wantErr bool + }{{ + name: "empty template 1", + template: "", + want: "", + wantErr: false, + }, { + name: "template with new line", + template: "{{.Request.URL}}\n", + want: "http://example.com/testpage\n", + wantErr: false, + }, { + name: "empty template 2", + template: "", + want: "", + wantErr: false, + }, { + name: "template without new line", + template: "{{.Request.ContentLength}}", + want: "4\n", + wantErr: false, + }, { + name: "empty template 3", + template: "", + want: "", + wantErr: false, + }, { + name: "invalid template", + template: "{{}}", + want: "", + wantErr: true, + }} + + buf := bytes.NewBufferString("") + handler, err := NewRequestLogHandler(baseHandler, buf, "", defaultInputGetter, false) + if err != nil { + t.Fatalf("want: no error, got: %v", err) + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := handler.SetTemplate(test.template) + if test.wantErr != (err != nil) { + t.Errorf("got %v, want error %v", err, test.wantErr) + } + + if !test.wantErr { + buf.Reset() + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, url, bytes.NewBufferString(body)) + handler.ServeHTTP(resp, req) + got := buf.String() + if got != test.want { + t.Errorf("got '%v', want '%v'", got, test.want) + } + } + }) + } +} + +func BenchmarkRequestLogHandlerNoTemplate(b *testing.B) { + handler, err := NewRequestLogHandler(baseHandler, ioutil.Discard, "", defaultInputGetter, false) + if err != nil { + b.Fatalf("Failed to create handler: %v", err) + } + resp := httptest.NewRecorder() + + b.Run(fmt.Sprint("sequential"), func(b *testing.B) { + req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) + for j := 0; j < b.N; j++ { + handler.ServeHTTP(resp, req) + } + }) + + b.Run(fmt.Sprint("parallel"), func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) + for pb.Next() { + handler.ServeHTTP(resp, req) + } + }) + }) +} + +func BenchmarkRequestLogHandlerDefaultTemplate(b *testing.B) { + // Taken from config-observability.yaml + tpl := `{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}` + handler, err := NewRequestLogHandler(baseHandler, ioutil.Discard, tpl, defaultInputGetter, false) + if err != nil { + b.Fatalf("Failed to create handler: %v", err) + } + resp := httptest.NewRecorder() + + b.Run(fmt.Sprint("sequential"), func(b *testing.B) { + req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) + for j := 0; j < b.N; j++ { + handler.ServeHTTP(resp, req) + } + }) + + b.Run(fmt.Sprint("parallel"), func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) + for pb.Next() { + handler.ServeHTTP(resp, req) + } + }) + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/http/response_recorder.go b/test/vendor/knative.dev/serving/pkg/http/response_recorder.go new file mode 100644 index 0000000000..02c7688e30 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/response_recorder.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "bufio" + "net" + "net/http" + "sync/atomic" + + "knative.dev/pkg/websocket" +) + +var ( + _ http.Flusher = (*ResponseRecorder)(nil) + _ http.ResponseWriter = (*ResponseRecorder)(nil) +) + +// ResponseRecorder is an implementation of http.ResponseWriter and http.Flusher +// that captures the response code and size. +type ResponseRecorder struct { + ResponseCode int + ResponseSize int32 + + writer http.ResponseWriter + wroteHeader bool + // hijacked is whether this connection has been hijacked + // by a Handler with the Hijacker interface. + // This is guarded by a mutex in the default implementation. + // To emulate the same behavior, we will use an int32 and + // access to this field only through atomic calls. + hijacked int32 +} + +// NewResponseRecorder creates an http.ResponseWriter that captures the response code and size. +func NewResponseRecorder(w http.ResponseWriter, responseCode int) *ResponseRecorder { + return &ResponseRecorder{ + writer: w, + ResponseCode: responseCode, + } +} + +// Flush flushes the buffer to the client. +func (rr *ResponseRecorder) Flush() { + rr.writer.(http.Flusher).Flush() +} + +// Hijack calls Hijack() on the wrapped http.ResponseWriter if it implements +// http.Hijacker interface, which is required for net/http/httputil/reverseproxy +// to handle connection upgrade/switching protocol. Otherwise returns an error. +func (rr *ResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { + c, rw, err := websocket.HijackIfPossible(rr.writer) + if err != nil { + atomic.StoreInt32(&rr.hijacked, 1) + } + return c, rw, err +} + +// Header returns the header map that will be sent by WriteHeader. +func (rr *ResponseRecorder) Header() http.Header { + return rr.writer.Header() +} + +// Write writes the data to the connection as part of an HTTP reply. +func (rr *ResponseRecorder) Write(p []byte) (int, error) { + atomic.AddInt32(&rr.ResponseSize, (int32)(len(p))) + return rr.writer.Write(p) +} + +// WriteHeader sends an HTTP response header with the provided status code. +func (rr *ResponseRecorder) WriteHeader(code int) { + if rr.wroteHeader || atomic.LoadInt32(&rr.hijacked) == 1 { + return + } + + rr.writer.WriteHeader(code) + rr.wroteHeader = true + rr.ResponseCode = code +} diff --git a/test/vendor/knative.dev/serving/pkg/http/response_recorder_test.go b/test/vendor/knative.dev/serving/pkg/http/response_recorder_test.go new file mode 100644 index 0000000000..a112f5079f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/http/response_recorder_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "net/http" + "testing" + + "github.com/google/go-cmp/cmp" +) + +type fakeResponseWriter struct{} + +func (w *fakeResponseWriter) Header() http.Header { return http.Header{"item1": []string{"value1"}} } +func (w *fakeResponseWriter) Write(p []byte) (int, error) { return len(p), nil } +func (w *fakeResponseWriter) WriteHeader(code int) {} +func (w *fakeResponseWriter) Flush() {} + +var defaultHeader = http.Header{"item1": {"value1"}} + +func TestResponseRecorder(t *testing.T) { + tests := []struct { + name string + initialStatus int + finalStatus int + hijack bool + writeSize int + wantStatus int + wantSize int32 + }{{ + name: "no hijack", + initialStatus: http.StatusAccepted, + finalStatus: http.StatusBadGateway, + hijack: false, + writeSize: 12, + wantStatus: http.StatusBadGateway, + wantSize: 12, + }, { + name: "hijack", + initialStatus: http.StatusAccepted, + finalStatus: http.StatusBadGateway, + hijack: true, + writeSize: 12, + wantStatus: http.StatusAccepted, + wantSize: 12, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rr := NewResponseRecorder(&fakeResponseWriter{}, test.initialStatus) + if test.hijack { + rr.Hijack() + } + + b := make([]byte, test.writeSize) + rr.Write(b) + rr.Flush() + rr.WriteHeader(test.finalStatus) + + if got, want := rr.ResponseCode, test.wantStatus; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := rr.ResponseSize, test.wantSize; got != want { + t.Errorf("got %v, want %v", got, want) + } + if diff := cmp.Diff(rr.Header(), defaultHeader); diff != "" { + t.Errorf("Headers are different (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/logging/OWNERS b/test/vendor/knative.dev/serving/pkg/logging/OWNERS new file mode 100644 index 0000000000..ab8e1f2983 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/logging/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- monitoring-approvers + +reviewers: +- monitoring-reviewers + +labels: +- area/monitoring diff --git a/test/vendor/knative.dev/serving/pkg/logging/config_test.go b/test/vendor/knative.dev/serving/pkg/logging/config_test.go new file mode 100644 index 0000000000..c3092b55fd --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/logging/config_test.go @@ -0,0 +1,245 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + . "knative.dev/pkg/configmap/testing" + "knative.dev/pkg/logging" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" +) + +const testConfigFileName = "test-config-logging" + +func TestNewConfigNoEntry(t *testing.T) { + c, err := logging.NewConfigFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "knative-something", + Name: "config-logging", + }, + }) + if err != nil { + t.Errorf("Expected no errors. got: %v", err) + } + if got := c.LoggingConfig; got == "" { + t.Error("LoggingConfig = empty, want not empty") + } + if got, want := len(c.LoggingLevel), 0; got != want { + t.Errorf("len(LoggingLevel) = %d, want %d", got, want) + } + for _, component := range []string{"controller", "queueproxy", "webhook", "activator", "autoscaler"} { + if got, want := c.LoggingLevel[component], zap.InfoLevel; got != want { + t.Errorf("LoggingLevel[%s] = %q, want %q", component, got, want) + } + } +} + +func TestNewConfig(t *testing.T) { + wantCfg := "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}" + wantLevel := zapcore.InfoLevel + c, err := logging.NewConfigFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: "config-logging", + }, + Data: map[string]string{ + "zap-logger-config": wantCfg, + "loglevel.queueproxy": wantLevel.String(), + }, + }) + if err != nil { + t.Errorf("Expected no errors. got: %v", err) + } + if got := c.LoggingConfig; got != wantCfg { + t.Errorf("LoggingConfig = %v, want %v", got, wantCfg) + } + if got := c.LoggingLevel["queueproxy"]; got != wantLevel { + t.Errorf("LoggingLevel[queueproxy] = %v, want %v", got, wantLevel) + } +} + +func TestOurConfig(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, logging.ConfigMapName()) + + if cfg, err := logging.NewConfigFromConfigMap(cm); err != nil { + t.Errorf("Expected no errors. got: %v", err) + } else if cfg == nil { + t.Errorf("NewConfigFromConfigMap(actual) = %v, want non-nil", cfg) + } + + if cfg, err := logging.NewConfigFromConfigMap(example); err != nil { + t.Errorf("Expected no errors. got: %v", err) + } else if cfg == nil { + t.Errorf("NewConfigFromConfigMap(example) = %v, want non-nil", cfg) + } +} + +func TestLogLevelTestConfig(t *testing.T) { + const wantCfg = `{ + "level": "debug", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } +} +` + const wantLevel = zapcore.DebugLevel + components := []string{ + "autoscaler", + "controller", + "queueproxy", + "webhook", + "activator", + } + cm, _ := ConfigMapsFromTestFile(t, testConfigFileName, "loglevel.autoscaler", "loglevel.controller", "loglevel.queueproxy", "loglevel.webhook", "loglevel.activator", "zap-logger-config") + cfg, err := logging.NewConfigFromConfigMap(cm) + + if err != nil { + t.Errorf("Expected no errors. got: %v", err) + } + if cfg == nil { + t.Errorf("NewConfigFromConfigMap(actual) = %v, want non-nil", cfg) + } + + for _, c := range components { + if got := cfg.LoggingLevel[c]; got != wantLevel { + t.Errorf("LoggingLevel[%q] = %v, want %v", c, got, wantLevel) + } + } + if got := cfg.LoggingConfig; got != wantCfg { + t.Errorf("LoggingConfig = %v, want %v, diff(-want +got) %s", got, wantCfg, cmp.Diff(wantCfg, got)) + } +} + +func TestNewLoggerFromConfig(t *testing.T) { + c, _, _ := getTestConfig() + _, atomicLevel := logging.NewLoggerFromConfig(c, "queueproxy") + if atomicLevel.Level() != zapcore.DebugLevel { + t.Errorf("logger level wanted: DebugLevel, got: %v", atomicLevel) + } +} + +func TestEmptyLevel(t *testing.T) { + c, err := logging.NewConfigFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: "config-logging", + }, + Data: map[string]string{ + "zap-logger-config": "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}", + "loglevel.queueproxy": "", + }, + }) + if err != nil { + t.Errorf("Expected no errors, got: %v", err) + } + if got, want := c.LoggingLevel["queueproxy"], zapcore.InfoLevel; got != want { + t.Errorf("LoggingLevel[queueproxy] = %v, want: %v", got, want) + } +} + +func TestInvalidLevel(t *testing.T) { + wantCfg := "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}" + _, err := logging.NewConfigFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: "config-logging", + }, + Data: map[string]string{ + "zap-logger-config": wantCfg, + "loglevel.queueproxy": "invalid", + }, + }) + if err == nil { + t.Error("Expected errors. got nothing") + } +} + +func getTestConfig() (*logging.Config, string, string) { + wantCfg := "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}" + wantLevel := "debug" + c, _ := logging.NewConfigFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: "config-logging", + }, + Data: map[string]string{ + "zap-logger-config": wantCfg, + "loglevel.queueproxy": wantLevel, + }, + }) + return c, wantCfg, wantLevel +} + +func TestUpdateLevelFromConfigMap(t *testing.T) { + logger, atomicLevel := logging.NewLogger("", "debug") + want := zapcore.DebugLevel + if atomicLevel.Level() != zapcore.DebugLevel { + t.Fatalf("Expected initial logger level to %v, got: %v", want, atomicLevel.Level()) + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: "config-logging", + }, + Data: map[string]string{ + "zap-logger-config": "", + "loglevel.controller": "panic", + }, + } + + tests := []struct { + setLevel string + wantLevel zapcore.Level + }{ + {"info", zapcore.InfoLevel}, + {"error", zapcore.ErrorLevel}, + {"invalid", zapcore.ErrorLevel}, + {"debug", zapcore.DebugLevel}, + {"debug", zapcore.DebugLevel}, + } + + u := logging.UpdateLevelFromConfigMap(logger, atomicLevel, "controller") + for _, tt := range tests { + cm.Data["loglevel.controller"] = tt.setLevel + u(cm) + if atomicLevel.Level() != tt.wantLevel { + t.Errorf("Invalid logging level. want: %v, got: %v", tt.wantLevel, atomicLevel.Level()) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/logging/sync_file_writer.go b/test/vendor/knative.dev/serving/pkg/logging/sync_file_writer.go new file mode 100644 index 0000000000..332596fc6d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/logging/sync_file_writer.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "io" + "os" + "sync" +) + +var _ io.Writer = (*syncFileWriter)(nil) + +type syncFileWriter struct { + file *os.File + mux sync.Mutex +} + +// NewSyncFileWriter returns an io.Writer that is backed by an os.File +// and that synchronizes the writes to the file. +// This is suitable for use with non-threadsafe writers, e.g. os.Stdout. +func NewSyncFileWriter(file *os.File) io.Writer { + return &syncFileWriter{file, sync.Mutex{}} +} + +// Write writes len(b) bytes to the file. +func (w *syncFileWriter) Write(b []byte) (n int, err error) { + w.mux.Lock() + defer w.mux.Unlock() + return w.file.Write(b) +} diff --git a/test/vendor/knative.dev/serving/pkg/logging/sync_file_writer_test.go b/test/vendor/knative.dev/serving/pkg/logging/sync_file_writer_test.go new file mode 100644 index 0000000000..cc57426eb6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/logging/sync_file_writer_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestWrite(t *testing.T) { + file, err := ioutil.TempFile("", "sync_file_writer_test") + if err != nil { + t.Fatal("failed to create a temp file for the test") + } + defer os.Remove(file.Name()) + + w := NewSyncFileWriter(file) + w.Write([]byte("line1\n")) + w.Write([]byte("line2\n")) + file.Close() + + want := "line1\nline2\n" + gotBytes, _ := ioutil.ReadFile(file.Name()) + got := string(gotBytes) + if got != want { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/logging/testdata/config-logging.yaml b/test/vendor/knative.dev/serving/pkg/logging/testdata/config-logging.yaml new file mode 120000 index 0000000000..affd1b5f81 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/logging/testdata/config-logging.yaml @@ -0,0 +1 @@ +../../../config/core/configmaps/logging.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/logging/testdata/test-config-logging.yaml b/test/vendor/knative.dev/serving/pkg/logging/testdata/test-config-logging.yaml new file mode 120000 index 0000000000..761fc5070b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/logging/testdata/test-config-logging.yaml @@ -0,0 +1 @@ +../../../test/config/config-logging.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/metrics/OWNERS b/test/vendor/knative.dev/serving/pkg/metrics/OWNERS new file mode 100644 index 0000000000..ab8e1f2983 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- monitoring-approvers + +reviewers: +- monitoring-reviewers + +labels: +- area/monitoring diff --git a/test/vendor/knative.dev/serving/pkg/metrics/config.go b/test/vendor/knative.dev/serving/pkg/metrics/config.go new file mode 100644 index 0000000000..8c847060aa --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/config.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "strings" + "text/template" + + corev1 "k8s.io/api/core/v1" +) + +const ( + defaultLogURLTemplate = "http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))" + defaultRequestMetricsBackend = "prometheus" +) + +// ObservabilityConfig contains the configuration defined in the observability ConfigMap. +type ObservabilityConfig struct { + // EnableVarLogCollection specifies whether the logs under /var/log/ should be available + // for collection on the host node by the fluentd daemon set. + EnableVarLogCollection bool + + // LoggingURLTemplate is a string containing the logging url template where + // the variable REVISION_UID will be replaced with the created revision's UID. + LoggingURLTemplate string + + // RequestLogTemplate is the go template to use to shape the request logs. + RequestLogTemplate string + + // EnableProbeRequestLog enables queue-proxy to write health check probe request logs. + EnableProbeRequestLog bool + + // RequestMetricsBackend specifies the request metrics destination, e.g. Prometheus, + // Stackdriver. + RequestMetricsBackend string + + // EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from + // the pods via an HTTP server in the format expected by the pprof visualization tool. + EnableProfiling bool +} + +// NewObservabilityConfigFromConfigMap creates a ObservabilityConfig from the supplied ConfigMap +func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*ObservabilityConfig, error) { + oc := &ObservabilityConfig{} + if evlc, ok := configMap.Data["logging.enable-var-log-collection"]; ok { + oc.EnableVarLogCollection = strings.EqualFold(evlc, "true") + } + + if rut, ok := configMap.Data["logging.revision-url-template"]; ok { + oc.LoggingURLTemplate = rut + } else { + oc.LoggingURLTemplate = defaultLogURLTemplate + } + + if rlt, ok := configMap.Data["logging.request-log-template"]; ok { + // Verify that we get valid templates. + if _, err := template.New("requestLog").Parse(rlt); err != nil { + return nil, err + } + oc.RequestLogTemplate = rlt + } + + if eprl, ok := configMap.Data["logging.enable-probe-request-log"]; ok { + oc.EnableProbeRequestLog = strings.EqualFold(eprl, "true") + } + + if mb, ok := configMap.Data["metrics.request-metrics-backend-destination"]; ok { + oc.RequestMetricsBackend = mb + } else { + oc.RequestMetricsBackend = defaultRequestMetricsBackend + } + + if prof, ok := configMap.Data["profiling.enable"]; ok { + oc.EnableProfiling = strings.EqualFold(prof, "true") + } + + return oc, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/metrics/config_test.go b/test/vendor/knative.dev/serving/pkg/metrics/config_test.go new file mode 100644 index 0000000000..b5318b40bc --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/config_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/metrics" + "knative.dev/pkg/system" + + . "knative.dev/pkg/configmap/testing" + _ "knative.dev/pkg/system/testing" +) + +func TestOurObservability(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, metrics.ConfigMapName()) + + if _, err := NewObservabilityConfigFromConfigMap(cm); err != nil { + t.Errorf("NewObservabilityFromConfigMap(actual) = %v", err) + } + + if _, err := NewObservabilityConfigFromConfigMap(example); err != nil { + t.Errorf("NewObservabilityFromConfigMap(example) = %v", err) + } +} + +func TestObservabilityConfiguration(t *testing.T) { + observabilityConfigTests := []struct { + name string + wantErr bool + wantController interface{} + config *corev1.ConfigMap + }{{ + name: "observability configuration with all inputs", + wantErr: false, + wantController: &ObservabilityConfig{ + LoggingURLTemplate: "https://logging.io", + EnableVarLogCollection: true, + RequestLogTemplate: `{"requestMethod": "{{.Request.Method}}"}`, + EnableProbeRequestLog: true, + RequestMetricsBackend: "stackdriver", + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "true", + "logging.revision-url-template": "https://logging.io", + "logging.enable-probe-request-log": "true", + "logging.write-request-logs": "true", + "logging.request-log-template": `{"requestMethod": "{{.Request.Method}}"}`, + "metrics.request-metrics-backend-destination": "stackdriver", + }, + }, + }, { + name: "observability config with no map", + wantErr: false, + wantController: &ObservabilityConfig{ + EnableVarLogCollection: false, + LoggingURLTemplate: defaultLogURLTemplate, + RequestLogTemplate: "", + RequestMetricsBackend: "prometheus", + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + }, + }, { + name: "invalid request log template", + wantErr: true, + wantController: (*ObservabilityConfig)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.request-log-template": `{{ something }}`, + }, + }, + }} + + for _, tt := range observabilityConfigTests { + t.Run(tt.name, func(t *testing.T) { + actualController, err := NewObservabilityConfigFromConfigMap(tt.config) + + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewObservabilityFromConfigMap() error = %v, WantErr %v", tt.name, err, tt.wantErr) + } + + if diff := cmp.Diff(actualController, tt.wantController); diff != "" { + t.Fatalf("Test: %q; want %v, but got %v", tt.name, tt.wantController, actualController) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/metrics/doc.go b/test/vendor/knative.dev/serving/pkg/metrics/doc.go new file mode 100644 index 0000000000..eb005b410a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package metrics diff --git a/test/vendor/knative.dev/serving/pkg/metrics/key.go b/test/vendor/knative.dev/serving/pkg/metrics/key.go new file mode 100644 index 0000000000..b3656418d0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/key.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "go.opencensus.io/tag" + "knative.dev/pkg/metrics/metricskey" +) + +// Create the tag keys that will be used to add tags to our measurements. +// Tag keys must conform to the restrictions described in +// go.opencensus.io/tag/validate.go. Currently those restrictions are: +// - length between 1 and 255 inclusive +// - characters are printable US-ASCII +var ( + NamespaceTagKey = tag.MustNewKey(metricskey.LabelNamespaceName) + ServiceTagKey = tag.MustNewKey(metricskey.LabelServiceName) + ConfigTagKey = tag.MustNewKey(metricskey.LabelConfigurationName) + RevisionTagKey = tag.MustNewKey(metricskey.LabelRevisionName) + PodTagKey = tag.MustNewKey("pod_name") + ContainerTagKey = tag.MustNewKey("container_name") + ResponseCodeKey = tag.MustNewKey("response_code") + ResponseCodeClassKey = tag.MustNewKey("response_code_class") + NumTriesKey = tag.MustNewKey("num_tries") + + CommonRevisionKeys = []tag.Key{NamespaceTagKey, ServiceTagKey, ConfigTagKey, RevisionTagKey} +) diff --git a/test/vendor/knative.dev/serving/pkg/metrics/testdata/config-observability.yaml b/test/vendor/knative.dev/serving/pkg/metrics/testdata/config-observability.yaml new file mode 120000 index 0000000000..5bf896ec8c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/testdata/config-observability.yaml @@ -0,0 +1 @@ +../../../config/core/configmaps/observability.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/metrics/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/metrics/zz_generated.deepcopy.go new file mode 100644 index 0000000000..822bb4db22 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/metrics/zz_generated.deepcopy.go @@ -0,0 +1,37 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package metrics + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityConfig) DeepCopyInto(out *ObservabilityConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityConfig. +func (in *ObservabilityConfig) DeepCopy() *ObservabilityConfig { + if in == nil { + return nil + } + out := new(ObservabilityConfig) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/network/OWNERS b/test/vendor/knative.dev/serving/pkg/network/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/knative.dev/serving/pkg/network/bufferpool.go b/test/vendor/knative.dev/serving/pkg/network/bufferpool.go new file mode 100644 index 0000000000..d571fb1181 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/bufferpool.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "net/http/httputil" + "sync" +) + +// bufferPool implements the BufferPool interface to be used in +// httputil.ReverseProxy. It stores pointers to to slices to +// further avoid allocations, see https://staticcheck.io/docs/checks#SA6002. +type bufferPool struct { + pool *sync.Pool +} + +// NewBufferPool creates a new BytePool. This is only safe to use in the context +// of a httputil.ReverseProxy, as the buffers returned via Put are not cleaned +// explicitly. +func NewBufferPool() httputil.BufferPool { + return &bufferPool{ + pool: &sync.Pool{}, + } +} + +// Get gets a []byte from the bufferPool, or creates a new one if none are +// available in the pool. +func (b *bufferPool) Get() []byte { + buf := b.pool.Get() + if buf == nil { + // Use the default buffer size as defined in the ReverseProxy itself. + return make([]byte, 32*1024) + } + + return *buf.(*[]byte) +} + +// Put returns the given Buffer to the bufferPool. +func (b *bufferPool) Put(buffer []byte) { + b.pool.Put(&buffer) +} diff --git a/test/vendor/knative.dev/serving/pkg/network/bufferpool_test.go b/test/vendor/knative.dev/serving/pkg/network/bufferpool_test.go new file mode 100644 index 0000000000..5903b4d78b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/bufferpool_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "testing" + + pkgnet "knative.dev/pkg/network" +) + +func TestBufferPool(t *testing.T) { + pool := NewBufferPool() + // Transparently creates a new buffer + buf := pool.Get() + if got, want := len(buf), 32*1024; got != want { + t.Errorf("len(buf) = %d, want %d", got, want) + } +} + +// TestBufferPoolInReverseProxy asserts correctness of the pool in context +// of the httputil.ReverseProxy. It makes sure that the behavior around +// slice "cleanup" is correct and that slices returned to the pool do not +// pollute later requests. +func TestBufferPoolInReverseProxy(t *testing.T) { + want := "The testmessage successfully made its way through the roundtripper." + + url := &url.URL{} + proxy := httputil.NewSingleHostReverseProxy(url) + + pool := NewBufferPool() + proxy.BufferPool = pool + proxy.Transport = pkgnet.RoundTripperFunc(func(*http.Request) (*http.Response, error) { + recorder := httptest.NewRecorder() + recorder.WriteString(want) + return recorder.Result(), nil + }) + + pool.Put([]byte("I'm polluting this pool with a buffer that's not empty.")) + pool.Put([]byte("I'm adding a little less.")) + pool.Put([]byte("And I'm even adding more info than the first message did, for sanity.")) + + recorder := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, url.String(), nil) + proxy.ServeHTTP(recorder, req) + + if got := recorder.Body.String(); got != want { + t.Errorf("res.Body = %s, want %s", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/network/doc.go b/test/vendor/knative.dev/serving/pkg/network/doc.go new file mode 100644 index 0000000000..71c061a60c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// Package network holds the typed objects that define the schemas for +// configuring the knative/serving networking layer. +package network diff --git a/test/vendor/knative.dev/serving/pkg/network/ingress/doc.go b/test/vendor/knative.dev/serving/pkg/network/ingress/doc.go new file mode 100644 index 0000000000..518c77dc8f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/ingress/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ingress holds utilities related to the implementation of ingress +// controllers. +package ingress diff --git a/test/vendor/knative.dev/serving/pkg/network/ingress/ingress.go b/test/vendor/knative.dev/serving/pkg/network/ingress/ingress.go new file mode 100644 index 0000000000..f77814300c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/ingress/ingress.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/network" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + net "knative.dev/serving/pkg/network" +) + +// ComputeHash computes a hash of the Ingress Spec, Namespace and Name +func ComputeHash(ing *v1alpha1.Ingress) ([16]byte, error) { + bytes, err := json.Marshal(ing.Spec) + if err != nil { + return [16]byte{}, fmt.Errorf("failed to serialize Ingress: %w", err) + } + bytes = append(bytes, []byte(ing.GetNamespace())...) + bytes = append(bytes, []byte(ing.GetName())...) + return md5.Sum(bytes), nil +} + +// InsertProbe adds a AppendHeader rule so that any request going through a Gateway is tagged with +// the version of the Ingress currently deployed on the Gateway. +func InsertProbe(ing *v1alpha1.Ingress) (string, error) { + bytes, err := ComputeHash(ing) + if err != nil { + return "", fmt.Errorf("failed to compute the hash of the Ingress: %w", err) + } + hash := fmt.Sprintf("%x", bytes) + + for _, rule := range ing.Spec.Rules { + if rule.HTTP == nil { + return "", fmt.Errorf("rule is missing HTTP block: %+v", rule) + } + for i := range rule.HTTP.Paths { + if rule.HTTP.Paths[i].AppendHeaders == nil { + rule.HTTP.Paths[i].AppendHeaders = make(map[string]string, 1) + } + rule.HTTP.Paths[i].AppendHeaders[net.HashHeaderName] = hash + } + } + + return hash, nil +} + +// HostsPerVisibility takes an Ingress and a map from visibility levels to a set of string keys, +// it then returns a map from that key space to the hosts under that visibility. +func HostsPerVisibility(ing *v1alpha1.Ingress, visibilityToKey map[v1alpha1.IngressVisibility]sets.String) map[string]sets.String { + output := make(map[string]sets.String) + for _, rule := range ing.Spec.Rules { + for host := range ExpandedHosts(sets.NewString(rule.Hosts...)) { + for key := range visibilityToKey[rule.Visibility] { + if _, ok := output[key]; !ok { + output[key] = sets.NewString() + } + output[key].Insert(host) + } + } + } + return output +} + +// ExpandedHosts sets up hosts for the short-names for cluster DNS names. +func ExpandedHosts(hosts sets.String) sets.String { + expanded := sets.NewString() + allowedSuffixes := []string{ + "", + "." + network.GetClusterDomainName(), + ".svc." + network.GetClusterDomainName(), + } + for _, h := range hosts.List() { + for _, suffix := range allowedSuffixes { + if strings.HasSuffix(h, suffix) { + expanded.Insert(strings.TrimSuffix(h, suffix)) + } + } + } + return expanded +} diff --git a/test/vendor/knative.dev/serving/pkg/network/ingress/ingress_test.go b/test/vendor/knative.dev/serving/pkg/network/ingress/ingress_test.go new file mode 100644 index 0000000000..afdf34ded8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/ingress/ingress_test.go @@ -0,0 +1,243 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +func TestGetExpandedHosts(t *testing.T) { + for _, test := range []struct { + name string + hosts sets.String + want sets.String + }{{ + name: "cluster local service in non-default namespace", + hosts: sets.NewString( + "service.namespace.svc.cluster.local", + ), + want: sets.NewString( + "service.namespace", + "service.namespace.svc", + "service.namespace.svc.cluster.local", + ), + }, { + name: "example.com service", + hosts: sets.NewString( + "foo.bar.example.com", + ), + want: sets.NewString( + "foo.bar.example.com", + ), + }, { + name: "default.example.com service", + hosts: sets.NewString( + "foo.default.example.com", + ), + want: sets.NewString( + "foo.default.example.com", + ), + }, { + name: "mix", + hosts: sets.NewString( + "foo.default.example.com", + "foo.default.svc.cluster.local", + ), + want: sets.NewString( + "foo.default", + "foo.default.example.com", + "foo.default.svc", + "foo.default.svc.cluster.local", + ), + }} { + t.Run(test.name, func(t *testing.T) { + got := ExpandedHosts(test.hosts) + if diff := cmp.Diff(got, test.want); diff != "" { + t.Errorf("Unexpected (-want +got): %v", diff) + } + }) + } +} + +func TestInsertProbe(t *testing.T) { + tests := []struct { + name string + ingress *v1alpha1.Ingress + want string + }{{ + name: "with rules, no append header", + ingress: &v1alpha1.Ingress{ + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "example.com", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: "blah", + }, + }}, + }}, + }, + }}, + }, + }, + want: "b90f793b72c245476c6b4060967121ef", + }, { + name: "with rules, with append header", + ingress: &v1alpha1.Ingress{ + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "example.com", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: "blah", + }, + AppendHeaders: map[string]string{ + "Foo": "bar", + }, + }}, + }}, + }, + }}, + }, + }, + want: "061575cdf950105126a81d6da83cda8b", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + before := len(test.ingress.Spec.Rules[0].HTTP.Paths[0].AppendHeaders) + got, err := InsertProbe(test.ingress) + if err != nil { + t.Errorf("InsertProbe() = %v", err) + } + if got != test.want { + t.Errorf("InsertProbe() = %s, wanted %s", got, test.want) + } + after := len(test.ingress.Spec.Rules[0].HTTP.Paths[0].AppendHeaders) + if before+1 != after { + t.Errorf("InsertProbe() left %d headers, wanted %d", after, before+1) + } + }) + } +} + +func TestHostsPerVisibility(t *testing.T) { + tests := []struct { + name string + ingress *v1alpha1.Ingress + in map[v1alpha1.IngressVisibility]sets.String + want map[string]sets.String + }{{ + name: "external rule", + ingress: &v1alpha1.Ingress{ + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "example.com", + "foo.bar.svc.cluster.local", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: "blah", + }, + AppendHeaders: map[string]string{ + "Foo": "bar", + }, + }}, + }}, + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + in: map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString("foo"), + v1alpha1.IngressVisibilityClusterLocal: sets.NewString("bar", "baz"), + }, + want: map[string]sets.String{ + "foo": sets.NewString( + "example.com", + "foo.bar.svc.cluster.local", + "foo.bar.svc", + "foo.bar", + ), + }, + }, { + name: "internal rule", + ingress: &v1alpha1.Ingress{ + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.svc.cluster.local", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: "blah", + }, + AppendHeaders: map[string]string{ + "Foo": "bar", + }, + }}, + }}, + }, + Visibility: v1alpha1.IngressVisibilityClusterLocal, + }}, + }, + }, + in: map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString("foo"), + v1alpha1.IngressVisibilityClusterLocal: sets.NewString("bar", "baz"), + }, + want: map[string]sets.String{ + "bar": sets.NewString( + "foo.bar.svc.cluster.local", + "foo.bar.svc", + "foo.bar", + ), + "baz": sets.NewString( + "foo.bar.svc.cluster.local", + "foo.bar.svc", + "foo.bar", + ), + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := HostsPerVisibility(test.ingress, test.in) + if !cmp.Equal(got, test.want) { + t.Errorf("HostsPerVisibility (-want, +got) = %s", cmp.Diff(test.want, got)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/network/network.go b/test/vendor/knative.dev/serving/pkg/network/network.go new file mode 100644 index 0000000000..c620b81bb8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/network.go @@ -0,0 +1,406 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "text/template" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // ProbeHeaderName is the name of a header that can be added to + // requests to probe the knative networking layer. Requests + // with this header will not be passed to the user container or + // included in request metrics. + ProbeHeaderName = "K-Network-Probe" + + // ProxyHeaderName is the name of an internal header that activator + // uses to mark requests going through it. + ProxyHeaderName = "K-Proxy-Request" + + // HashHeaderName is the name of an internal header that Ingress controller + // uses to find out which version of the networking config is deployed. + HashHeaderName = "K-Network-Hash" + + // OriginalHostHeader is used to avoid Istio host based routing rules + // in Activator. + // The header contains the original Host value that can be rewritten + // at the Queue proxy level back to be a host header. + OriginalHostHeader = "K-Original-Host" + + // ConfigName is the name of the configmap containing all + // customizations for networking features. + ConfigName = "config-network" + + // IstioOutboundIPRangesKey is the name of the configuration entry + // that specifies Istio outbound ip ranges. + IstioOutboundIPRangesKey = "istio.sidecar.includeOutboundIPRanges" + + // DeprecatedDefaultIngressClassKey Please use DefaultIngressClassKey instead. + DeprecatedDefaultIngressClassKey = "clusteringress.class" + + // DefaultIngressClassKey is the name of the configuration entry + // that specifies the default Ingress. + DefaultIngressClassKey = "ingress.class" + + // DefaultCertificateClassKey is the name of the configuration entry + // that specifies the default Certificate. + DefaultCertificateClassKey = "certificate.class" + + // IstioIngressClassName value for specifying knative's Istio + // Ingress reconciler. + IstioIngressClassName = "istio.ingress.networking.knative.dev" + + // CertManagerCertificateClassName value for specifying Knative's Cert-Manager + // Certificate reconciler. + CertManagerCertificateClassName = "cert-manager.certificate.networking.internal.knative.dev" + + // DomainTemplateKey is the name of the configuration entry that + // specifies the golang template string to use to construct the + // Knative service's DNS name. + DomainTemplateKey = "domainTemplate" + + // TagTemplateKey is the name of the configuration entry that + // specifies the golang template string to use to construct the + // hostname for a Route's tag. + TagTemplateKey = "tagTemplate" + + // Since K8s 1.8, prober requests have + // User-Agent = "kube-probe/{major-version}.{minor-version}". + KubeProbeUAPrefix = "kube-probe/" + + // Istio with mTLS rewrites probes, but their probes pass a different + // user-agent. So we augment the probes with this header. + KubeletProbeHeaderName = "K-Kubelet-Probe" + + // DefaultDomainTemplate is the default golang template to use when + // constructing the Knative Route's Domain(host) + DefaultDomainTemplate = "{{.Name}}.{{.Namespace}}.{{.Domain}}" + + // DefaultTagTemplate is the default golang template to use when + // constructing the Knative Route's tag names. + DefaultTagTemplate = "{{.Tag}}-{{.Name}}" + + // AutoTLSKey is the name of the configuration entry + // that specifies enabling auto-TLS or not. + AutoTLSKey = "autoTLS" + + // HTTPProtocolKey is the name of the configuration entry that + // specifies the HTTP endpoint behavior of Knative ingress. + HTTPProtocolKey = "httpProtocol" + + // UserAgentKey is the constant for header "User-Agent". + UserAgentKey = "User-Agent" + + // ActivatorUserAgent is the user-agent header value set in probe requests sent + // from activator. + ActivatorUserAgent = "Knative-Activator-Probe" + + // QueueProxyUserAgent is the user-agent header value set in probe requests sent + // from queue-proxy. + QueueProxyUserAgent = "Knative-Queue-Proxy-Probe" +) + +// DomainTemplateValues are the available properties people can choose from +// in their Route's "DomainTemplate" golang template sting. +// We could add more over time - e.g. RevisionName if we thought that +// might be of interest to people. +type DomainTemplateValues struct { + Name string + Namespace string + Domain string + Annotations map[string]string +} + +// TagTemplateValues are the available properties people can choose from +// in their Route's "TagTemplate" golang template sting. +type TagTemplateValues struct { + Name string + Tag string +} + +// Config contains the networking configuration defined in the +// network config map. +type Config struct { + // IstioOutboundIPRange specifies the IP ranges to intercept + // by Istio sidecar. + IstioOutboundIPRanges string + + // DefaultIngressClass specifies the default Ingress class. + DefaultIngressClass string + + // DomainTemplate is the golang text template to use to generate the + // Route's domain (host) for the Service. + DomainTemplate string + + // TagTemplate is the golang text template to use to generate the + // Route's tag hostnames. + TagTemplate string + + // AutoTLS specifies if auto-TLS is enabled or not. + AutoTLS bool + + // HTTPProtocol specifics the behavior of HTTP endpoint of Knative + // ingress. + HTTPProtocol HTTPProtocol + + // DefaultCertificateClass specifies the default Certificate class. + DefaultCertificateClass string +} + +// HTTPProtocol indicates a type of HTTP endpoint behavior +// that Knative ingress could take. +type HTTPProtocol string + +const ( + // HTTPEnabled represents HTTP proocol is enabled in Knative ingress. + HTTPEnabled HTTPProtocol = "enabled" + + // HTTPDisabled represents HTTP protocol is disabled in Knative ingress. + HTTPDisabled HTTPProtocol = "disabled" + + // HTTPRedirected represents HTTP connection is redirected to HTTPS in Knative ingress. + HTTPRedirected HTTPProtocol = "redirected" +) + +func validateAndNormalizeOutboundIPRanges(s string) (string, error) { + s = strings.TrimSpace(s) + + // * is a valid value + if s == "*" { + return s, nil + } + + cidrs := strings.Split(s, ",") + var normalized []string + for _, cidr := range cidrs { + cidr = strings.TrimSpace(cidr) + if len(cidr) == 0 { + continue + } + if _, _, err := net.ParseCIDR(cidr); err != nil { + return "", err + } + + normalized = append(normalized, cidr) + } + + return strings.Join(normalized, ","), nil +} + +// NewConfigFromConfigMap creates a Config from the supplied ConfigMap +func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { + nc := &Config{} + if ipr, ok := configMap.Data[IstioOutboundIPRangesKey]; !ok { + // It is OK for this to be absent, we will elide the annotation. + nc.IstioOutboundIPRanges = "*" + } else if normalizedIpr, err := validateAndNormalizeOutboundIPRanges(ipr); err != nil { + return nil, err + } else { + nc.IstioOutboundIPRanges = normalizedIpr + } + + nc.DefaultIngressClass = IstioIngressClassName + if ingressClass, ok := configMap.Data[DefaultIngressClassKey]; ok { + nc.DefaultIngressClass = ingressClass + } else if ingressClass, ok := configMap.Data[DeprecatedDefaultIngressClassKey]; ok { + nc.DefaultIngressClass = ingressClass + } + + nc.DefaultCertificateClass = CertManagerCertificateClassName + if certClass, ok := configMap.Data[DefaultCertificateClassKey]; ok { + nc.DefaultCertificateClass = certClass + } + + // Blank DomainTemplate makes no sense so use our default + if dt, ok := configMap.Data[DomainTemplateKey]; !ok { + nc.DomainTemplate = DefaultDomainTemplate + } else { + t, err := template.New("domain-template").Parse(dt) + if err != nil { + return nil, err + } + if err := checkDomainTemplate(t); err != nil { + return nil, err + } + + nc.DomainTemplate = dt + } + + // Blank TagTemplate makes no sense so use our default + if tt, ok := configMap.Data[TagTemplateKey]; !ok { + nc.TagTemplate = DefaultTagTemplate + } else { + t, err := template.New("tag-template").Parse(tt) + if err != nil { + return nil, err + } + if err := checkTagTemplate(t); err != nil { + return nil, err + } + + nc.TagTemplate = tt + } + + nc.AutoTLS = strings.EqualFold(configMap.Data[AutoTLSKey], "enabled") + + switch strings.ToLower(configMap.Data[HTTPProtocolKey]) { + case string(HTTPEnabled): + nc.HTTPProtocol = HTTPEnabled + case "": + // If HTTPProtocol is not set in the config-network, we set the default value + // to HTTPEnabled. + nc.HTTPProtocol = HTTPEnabled + case string(HTTPDisabled): + nc.HTTPProtocol = HTTPDisabled + case string(HTTPRedirected): + nc.HTTPProtocol = HTTPRedirected + default: + return nil, fmt.Errorf("httpProtocol %s in config-network ConfigMap is not supported", configMap.Data[HTTPProtocolKey]) + } + return nc, nil +} + +// GetDomainTemplate returns the golang Template from the config map +// or panics (the value is validated during CM validation and at +// this point guaranteed to be parseable). +func (c *Config) GetDomainTemplate() *template.Template { + return template.Must(template.New("domain-template").Parse( + c.DomainTemplate)) +} + +func checkDomainTemplate(t *template.Template) error { + // To a test run of applying the template, and see if the + // result is a valid URL. + data := DomainTemplateValues{ + Name: "foo", + Namespace: "bar", + Domain: "baz.com", + Annotations: nil, + } + buf := bytes.Buffer{} + if err := t.Execute(&buf, data); err != nil { + return err + } + u, err := url.Parse("https://" + buf.String()) + if err != nil { + return err + } + + // TODO(mattmoor): Consider validating things like changing + // Name / Namespace changes the resulting hostname. + if u.Hostname() == "" { + return errors.New("empty hostname") + } + if u.RequestURI() != "/" { + return fmt.Errorf("domain template has url path: %s", u.RequestURI()) + } + + return nil +} + +func (c *Config) GetTagTemplate() *template.Template { + return template.Must(template.New("tag-template").Parse( + c.TagTemplate)) +} + +func checkTagTemplate(t *template.Template) error { + // To a test run of applying the template, and see if we + // produce a result without error. + data := TagTemplateValues{ + Name: "foo", + Tag: "v2", + } + return t.Execute(ioutil.Discard, data) +} + +// IsKubeletProbe returns true if the request is a Kubernetes probe. +func IsKubeletProbe(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("User-Agent"), KubeProbeUAPrefix) || + r.Header.Get(KubeletProbeHeaderName) != "" +} + +// KnativeProbeHeader returns the value for key ProbeHeaderName in request headers. +func KnativeProbeHeader(r *http.Request) string { + return r.Header.Get(ProbeHeaderName) +} + +// KnativeProxyHeader returns the value for key ProxyHeaderName in request headers. +func KnativeProxyHeader(r *http.Request) string { + return r.Header.Get(ProxyHeaderName) +} + +// IsProbe returns true if the request is a Kubernetes probe or a Knative probe, +// i.e. non-empty ProbeHeaderName header. +func IsProbe(r *http.Request) bool { + return IsKubeletProbe(r) || KnativeProbeHeader(r) != "" +} + +// RewriteHostIn removes the `Host` header from the inbound (server) request +// and replaces it with our custom header. +// This is done to avoid Istio Host based routing, see #3870. +// Queue-Proxy will execute the reverse process. +func RewriteHostIn(r *http.Request) { + h := r.Host + r.Host = "" + r.Header.Del("Host") + // Don't overwrite an existing OriginalHostHeader. + if r.Header.Get(OriginalHostHeader) == "" { + r.Header.Set(OriginalHostHeader, h) + } +} + +// RewriteHostOut undoes the `RewriteHostIn` action. +// RewriteHostOut checks if network.OriginalHostHeader was set and if it was, +// then uses that as the r.Host (which takes priority over Request.Header["Host"]). +// If the request did not have the OriginalHostHeader header set, the request is untouched. +func RewriteHostOut(r *http.Request) { + if ohh := r.Header.Get(OriginalHostHeader); ohh != "" { + r.Host = ohh + r.Header.Del("Host") + r.Header.Del(OriginalHostHeader) + } +} + +// NameForPortNumber finds the name for a given port as defined by a Service. +func NameForPortNumber(svc *corev1.Service, portNumber int32) (string, error) { + for _, port := range svc.Spec.Ports { + if port.Port == portNumber { + return port.Name, nil + } + } + return "", fmt.Errorf("no port with number %d found", portNumber) +} + +// PortNumberForName resolves a given name to a portNumber as defined by an EndpointSubset. +func PortNumberForName(sub corev1.EndpointSubset, portName string) (int32, error) { + for _, subPort := range sub.Ports { + if subPort.Name == portName { + return subPort.Port, nil + } + } + return 0, fmt.Errorf("no port for name %q found", portName) +} diff --git a/test/vendor/knative.dev/serving/pkg/network/network_test.go b/test/vendor/knative.dev/serving/pkg/network/network_test.go new file mode 100644 index 0000000000..2c75f34518 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/network_test.go @@ -0,0 +1,838 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "bytes" + "errors" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "text/template" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/system" + + . "knative.dev/pkg/configmap/testing" + _ "knative.dev/pkg/system/testing" +) + +func TestOurConfig(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, ConfigName) + + if _, err := NewConfigFromConfigMap(cm); err != nil { + t.Errorf("NewConfigFromConfigMap(actual) = %v", err) + } + if _, err := NewConfigFromConfigMap(example); err != nil { + t.Errorf("NewConfigFromConfigMap(example) = %v", err) + } +} + +func TestConfiguration(t *testing.T) { + const nonDefaultDomainTemplate = "{{.Namespace}}.{{.Name}}.{{.Domain}}" + ignoreDT := cmpopts.IgnoreFields(Config{}, "DomainTemplate") + + networkConfigTests := []struct { + name string + wantErr bool + wantConfig *Config + config *corev1.ConfigMap + }{{ + name: "network configuration with no network input", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + }, + }, { + name: "network configuration with invalid outbound IP range", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "10.10.10.10/33", + }, + }, + }, { + name: "network configuration with empty network", + wantErr: false, + wantConfig: &Config{ + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "", + }, + }, + }, { + name: "network configuration with both valid and some invalid range", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "10.10.10.10/12,invalid", + }, + }, + }, { + name: "network configuration with invalid network range", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "10.10.10.10/12,-1.1.1.1/10", + }, + }, + }, { + name: "network configuration with invalid network key", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "this is not an IP range", + }, + }, + }, { + name: "network configuration with invalid network", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*,*", + }, + }, + }, { + name: "network configuration with incomplete network array", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*,", + }, + }, + }, { + name: "network configuration with invalid network string", + wantErr: false, + wantConfig: &Config{ + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: ", ,", + }, + }, + }, { + name: "network configuration with invalid network string", + wantErr: false, + wantConfig: &Config{ + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: ",,", + }, + }, + }, { + name: "network configuration with invalid network range", + wantErr: false, + wantConfig: &Config{ + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: ",", + }, + }, + }, { + name: "network configuration with valid CIDR network range", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "10.10.10.0/24", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "10.10.10.0/24", + }, + }, + }, { + name: "network configuration with multiple valid network ranges", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "10.10.10.0/24,10.240.10.0/14,192.192.10.0/16", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "10.10.10.0/24,10.240.10.0/14,192.192.10.0/16", + }, + }, + }, { + name: "network configuration with valid network", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + }, + }, + }, { + name: "network configuration with non-Istio ingress type", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "foo-ingress", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + }, + }, + }, { + name: "network configuration with non-Cert-Manager Certificate type", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: "foo-cert", + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultCertificateClassKey: "foo-cert", + }, + }, + }, { + name: "network configuration with diff domain template", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "foo-ingress", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: nonDefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + DomainTemplateKey: nonDefaultDomainTemplate, + }, + }, + }, { + name: "network configuration with blank domain template", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + DomainTemplateKey: "", + }, + }, + }, { + name: "network configuration with bad domain template", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + // This is missing a closing brace. + DomainTemplateKey: "{{.Namespace}.{{.Name}}.{{.Domain}}", + }, + }, + }, { + name: "network configuration with bad domain template", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + // This is missing a closing brace. + DomainTemplateKey: "{{.Namespace}.{{.Name}}.{{.Domain}}", + }, + }, + }, { + name: "network configuration with bad url", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + // Paths are disallowed + DomainTemplateKey: "{{.Domain}}/{{.Namespace}}/{{.Name}}.", + }, + }, + }, { + name: "network configuration with bad variable", + wantErr: true, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + // Bad variable + DomainTemplateKey: "{{.Name}}.{{.NAmespace}}.{{.Domain}}", + }, + }, + }, { + name: "network configuration with Auto TLS enabled", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + AutoTLS: true, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + AutoTLSKey: "enabled", + }, + }, + }, { + name: "network configuration with Auto TLS disabled", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + AutoTLS: false, + HTTPProtocol: HTTPEnabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + AutoTLSKey: "disabled", + }, + }, + }, { + name: "network configuration with HTTPProtocol disabled", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + AutoTLS: true, + HTTPProtocol: HTTPDisabled, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + AutoTLSKey: "enabled", + HTTPProtocolKey: "Disabled", + }, + }, + }, { + name: "network configuration with HTTPProtocol redirected", + wantErr: false, + wantConfig: &Config{ + IstioOutboundIPRanges: "*", + DefaultIngressClass: "istio.ingress.networking.knative.dev", + DefaultCertificateClass: CertManagerCertificateClassName, + DomainTemplate: DefaultDomainTemplate, + TagTemplate: DefaultTagTemplate, + AutoTLS: true, + HTTPProtocol: HTTPRedirected, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + AutoTLSKey: "enabled", + HTTPProtocolKey: "Redirected", + }, + }, + }} + + for _, tt := range networkConfigTests { + t.Run(tt.name, func(t *testing.T) { + actualConfig, err := NewConfigFromConfigMap(tt.config) + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewConfigFromConfigMap() error = %v, WantErr %v", + tt.name, err, tt.wantErr) + } + if tt.wantErr { + return + } + + data := DomainTemplateValues{ + Name: "foo", + Namespace: "bar", + Domain: "baz.com", + } + want := mustExecute(t, tt.wantConfig.GetDomainTemplate(), data) + got := mustExecute(t, actualConfig.GetDomainTemplate(), data) + if got != want { + t.Errorf("DomainTemplate(data) = %s, wanted %s", got, want) + } + + if diff := cmp.Diff(actualConfig, tt.wantConfig, ignoreDT); diff != "" { + t.Fatalf("want %v, but got %v", + tt.wantConfig, actualConfig) + } + }) + } +} + +func TestAnnotationsInDomainTemplate(t *testing.T) { + networkConfigTests := []struct { + name string + wantErr bool + wantDomainTemplate string + config *corev1.ConfigMap + data DomainTemplateValues + }{{ + name: "network configuration with annotations in template", + wantErr: false, + wantDomainTemplate: "foo.sub1.baz.com", + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + DomainTemplateKey: `{{.Name}}.{{ index .Annotations "sub"}}.{{.Domain}}`, + }, + }, + data: DomainTemplateValues{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + "sub": "sub1"}, + Domain: "baz.com"}, + }, { + name: "network configuration without annotations in template", + wantErr: false, + wantDomainTemplate: "foo.bar.baz.com", + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: ConfigName, + }, + Data: map[string]string{ + IstioOutboundIPRangesKey: "*", + DefaultIngressClassKey: "foo-ingress", + DomainTemplateKey: `{{.Name}}.{{.Namespace}}.{{.Domain}}`, + }, + }, + data: DomainTemplateValues{ + Name: "foo", + Namespace: "bar", + Domain: "baz.com"}, + }} + + for _, tt := range networkConfigTests { + t.Run(tt.name, func(t *testing.T) { + actualConfig, err := NewConfigFromConfigMap(tt.config) + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewConfigFromConfigMap() error = %v, WantErr %v", + tt.name, err, tt.wantErr) + } + if tt.wantErr { + return + } + + got := mustExecute(t, actualConfig.GetDomainTemplate(), tt.data) + if got != tt.wantDomainTemplate { + t.Errorf("DomainTemplate(data) = %s, wanted %s", got, tt.wantDomainTemplate) + } + }) + } +} + +func mustExecute(t *testing.T, tmpl *template.Template, data interface{}) string { + t.Helper() + buf := bytes.Buffer{} + if err := tmpl.Execute(&buf, data); err != nil { + t.Errorf("Error executing the DomainTemplate: %v", err) + } + return buf.String() +} + +func TestIsKubeletProbe(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) + if err != nil { + t.Fatalf("Error building request: %v", err) + } + if IsKubeletProbe(req) { + t.Error("Not a kubelet probe but counted as such") + } + req.Header.Set("User-Agent", KubeProbeUAPrefix+"1.14") + if !IsKubeletProbe(req) { + t.Error("kubelet probe but not counted as such") + } + req.Header.Del("User-Agent") + if IsKubeletProbe(req) { + t.Error("Not a kubelet probe but counted as such") + } + req.Header.Set(KubeletProbeHeaderName, "no matter") + if !IsKubeletProbe(req) { + t.Error("kubelet probe but not counted as such") + } +} + +func TestKnativeProbeHeader(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) + if err != nil { + t.Fatalf("Error building request: %v", err) + } + if h := KnativeProbeHeader(req); h != "" { + t.Errorf("KnativeProbeHeader(req)=%v, want empty string", h) + } + want := "activator" + req.Header.Set(ProbeHeaderName, want) + if h := KnativeProbeHeader(req); h != want { + t.Errorf("KnativeProbeHeader(req)=%v, want %v", h, want) + } + req.Header.Set(ProbeHeaderName, "") + if h := KnativeProbeHeader(req); h != "" { + t.Errorf("KnativeProbeHeader(req)=%v, want empty string", h) + } +} + +func TestKnativeProxyHeader(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) + if err != nil { + t.Fatalf("Error building request: %v", err) + } + if h := KnativeProxyHeader(req); h != "" { + t.Errorf("KnativeProxyHeader(req)=%v, want empty string", h) + } + want := "activator" + req.Header.Set(ProxyHeaderName, want) + if h := KnativeProxyHeader(req); h != want { + t.Errorf("KnativeProxyHeader(req)=%v, want %v", h, want) + } + req.Header.Set(ProxyHeaderName, "") + if h := KnativeProxyHeader(req); h != "" { + t.Errorf("KnativeProxyHeader(req)=%v, want empty string", h) + } +} + +func TestIsProbe(t *testing.T) { + // Not a probe + req, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) + if err != nil { + t.Fatalf("Error building request: %v", err) + } + if IsProbe(req) { + t.Error("Not a probe but counted as such") + } + // Kubelet probe + req.Header.Set("User-Agent", KubeProbeUAPrefix+"1.14") + if !IsProbe(req) { + t.Error("Kubelet probe but not counted as such") + } + // Knative probe + req.Header.Del("User-Agent") + req.Header.Set(ProbeHeaderName, "activator") + if !IsProbe(req) { + t.Error("Knative probe but not counted as such") + } +} + +func TestRewriteHost(t *testing.T) { + r := httptest.NewRequest(http.MethodGet, "http://love.is/not-hate", nil) + r.Header.Set("Host", "love.is") + + RewriteHostIn(r) + + if got, want := r.Host, ""; got != want { + t.Errorf("r.Host = %q, want: %q", got, want) + } + + if got, want := r.Header.Get("Host"), ""; got != want { + t.Errorf("r.Header['Host'] = %q, want: %q", got, want) + } + + if got, want := r.Header.Get(OriginalHostHeader), "love.is"; got != want { + t.Errorf("r.Header[%s] = %q, want: %q", OriginalHostHeader, got, want) + } + + // Do it again, but make sure that the ORIGINAL domain is still preserved. + r.Header.Set("Host", "hate.is") + RewriteHostIn(r) + + if got, want := r.Host, ""; got != want { + t.Errorf("r.Host = %q, want: %q", got, want) + } + + if got, want := r.Header.Get("Host"), ""; got != want { + t.Errorf("r.Header['Host'] = %q, want: %q", got, want) + } + + if got, want := r.Header.Get(OriginalHostHeader), "love.is"; got != want { + t.Errorf("r.Header[%s] = %q, want: %q", OriginalHostHeader, got, want) + } + + RewriteHostOut(r) + if got, want := r.Host, "love.is"; got != want { + t.Errorf("r.Host = %q, want: %q", got, want) + } + + if got, want := r.Header.Get("Host"), ""; got != want { + t.Errorf("r.Header['Host'] = %q, want: %q", got, want) + } + + if got, want := r.Header.Get(OriginalHostHeader), ""; got != want { + t.Errorf("r.Header[%s] = %q, want: %q", OriginalHostHeader, got, want) + } +} + +func TestNameForPortNumber(t *testing.T) { + for _, tc := range []struct { + name string + svc *corev1.Service + portNumber int32 + portName string + err error + }{{ + name: "HTTP to 80", + svc: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Port: 80, + Name: "http", + }, { + Port: 443, + Name: "https", + }}, + }, + }, + portName: "http", + portNumber: 80, + }, { + name: "no port", + svc: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Port: 443, + Name: "https", + }}, + }, + }, + portNumber: 80, + err: errors.New("no port with number 80 found"), + }} { + t.Run(tc.name, func(t *testing.T) { + portName, err := NameForPortNumber(tc.svc, tc.portNumber) + if !reflect.DeepEqual(err, tc.err) { // cmp Doesn't work well here due to private fields. + t.Errorf("Err = %v, want: %v", err, tc.err) + } + if tc.err == nil && portName != tc.portName { + t.Errorf("PortName = %s, want: %s", portName, tc.portName) + } + }) + } +} + +func TestPortNumberForName(t *testing.T) { + for _, tc := range []struct { + name string + subset corev1.EndpointSubset + portNumber int32 + portName string + err error + }{{ + name: "HTTP to 80", + subset: corev1.EndpointSubset{ + Ports: []corev1.EndpointPort{{ + Port: 8080, + Name: "http", + }, { + Port: 8443, + Name: "https", + }}, + }, + portName: "http", + portNumber: 8080, + }, { + name: "no port", + subset: corev1.EndpointSubset{ + Ports: []corev1.EndpointPort{{ + Port: 8443, + Name: "https", + }}, + }, + portName: "http", + err: errors.New(`no port for name "http" found`), + }} { + t.Run(tc.name, func(t *testing.T) { + portNumber, err := PortNumberForName(tc.subset, tc.portName) + if !reflect.DeepEqual(err, tc.err) { // cmp Doesn't work well here due to private fields. + t.Errorf("Err = %v, want: %v", err, tc.err) + } + if tc.err == nil && portNumber != tc.portNumber { + t.Errorf("PortNumber = %d, want: %d", portNumber, tc.portNumber) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/network/probe_handler.go b/test/vendor/knative.dev/serving/pkg/network/probe_handler.go new file mode 100644 index 0000000000..be88b57486 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/probe_handler.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "fmt" + "net/http" +) + +// ProbeHeaderValue is the value used in 'K-Network-Probe' +var ProbeHeaderValue = "probe" + +type handler struct { + next http.Handler +} + +// NewProbeHandler wraps a HTTP handler handling probing requests around the provided HTTP handler +func NewProbeHandler(next http.Handler) http.Handler { + return &handler{next: next} +} + +// ServeHTTP handles probing requests +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if ph := r.Header.Get(ProbeHeaderName); ph != ProbeHeaderValue { + r.Header.Del(HashHeaderName) + h.next.ServeHTTP(w, r) + return + } + + hh := r.Header.Get(HashHeaderName) + if hh == "" { + http.Error(w, fmt.Sprintf("a probe request must contain a non-empty %q header", HashHeaderName), http.StatusBadRequest) + return + } + + w.Header().Set(HashHeaderName, hh) + w.WriteHeader(200) +} diff --git a/test/vendor/knative.dev/serving/pkg/network/probe_handler_test.go b/test/vendor/knative.dev/serving/pkg/network/probe_handler_test.go new file mode 100644 index 0000000000..5118b1b7e4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/probe_handler_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "knative.dev/pkg/network" + "knative.dev/pkg/network/prober" + _ "knative.dev/pkg/system/testing" +) + +func TestProbeHandlerSuccessfulProbe(t *testing.T) { + body := "Inner Body" + cases := []struct { + name string + options []interface{} + want bool + expErr bool + }{{ + name: "successful probe when both headers are specified", + options: []interface{}{ + prober.WithHeader(ProbeHeaderName, ProbeHeaderValue), + prober.WithHeader(HashHeaderName, "foo-bar-baz"), + prober.ExpectsStatusCodes([]int{http.StatusOK}), + }, + want: true, + }, { + name: "forwards to inner handler when probe header is not specified", + options: []interface{}{ + prober.WithHeader(HashHeaderName, "foo-bar-baz"), + prober.ExpectsBody(body), + // Validates the header is stripped before forwarding to the inner handler + prober.ExpectsHeader(HashHeaderName, "false"), + prober.ExpectsStatusCodes([]int{http.StatusOK}), + }, + want: true, + }, { + name: "forwards to inner handler when probe header is not 'probe'", + options: []interface{}{ + prober.WithHeader(ProbeHeaderName, "queue"), + prober.WithHeader(HashHeaderName, "foo-bar-baz"), + prober.ExpectsBody(body), + prober.ExpectsHeader(ProbeHeaderName, "true"), + // Validates the header is stripped before forwarding to the inner handler + prober.ExpectsHeader(HashHeaderName, "false"), + prober.ExpectsStatusCodes([]int{http.StatusOK}), + }, + want: true, + }, { + name: "failed probe when hash header is not present", + options: []interface{}{ + prober.WithHeader(ProbeHeaderName, ProbeHeaderValue), + prober.ExpectsStatusCodes([]int{http.StatusOK}), + }, + want: false, + expErr: true, + }} + + var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, ok := r.Header[ProbeHeaderName] + w.Header().Set(ProbeHeaderName, fmt.Sprintf("%t", ok)) + _, ok = r.Header[HashHeaderName] + w.Header().Set(HashHeaderName, fmt.Sprintf("%t", ok)) + w.Write([]byte(body)) + }) + h = NewProbeHandler(h) + ts := httptest.NewServer(h) + defer ts.Close() + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got, err := prober.Do(context.Background(), network.AutoTransport, ts.URL, c.options...) + if err != nil && !c.expErr { + t.Errorf("prober.Do() = %v, no error expected", err) + } + if err == nil && c.expErr { + t.Errorf("prober.Do() = nil, expected an error") + } + if got != c.want { + t.Errorf("unexpected probe result: want: %t, got: %t", c.want, got) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/network/status/status.go b/test/vendor/knative.dev/serving/pkg/network/status/status.go new file mode 100644 index 0000000000..7b9d0128c6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/status/status.go @@ -0,0 +1,424 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "reflect" + "sync" + "sync/atomic" + "time" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + + "knative.dev/pkg/network/prober" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/network/ingress" +) + +const ( + // probeConcurrency defines how many probing calls can be issued simultaneously + probeConcurrency = 15 + // stateExpiration defines how long after being last accessed a state expires + stateExpiration = 5 * time.Minute + // cleanupPeriod defines how often states are cleaned up + cleanupPeriod = 1 * time.Minute + //probeTimeout defines the maximum amount of time a request will wait + probeTimeout = 1 * time.Second +) + +var dialContext = (&net.Dialer{Timeout: probeTimeout}).DialContext + +// ingressState represents the probing state of an Ingress +type ingressState struct { + hash string + ing *v1alpha1.Ingress + + // pendingCount is the number of pods that haven't been successfully probed yet + pendingCount int32 + lastAccessed time.Time + + cancel func() +} + +// podState represents the probing state of a Pod (for a specific Ingress) +type podState struct { + // successCount is the number of successful probes + successCount int32 + + cancel func() +} + +// cancelContext is a pair of a Context and its cancel function +type cancelContext struct { + context context.Context + cancel func() +} + +type workItem struct { + ingressState *ingressState + podState *podState + context context.Context + url *url.URL + podIP string + podPort string +} + +// ProbeTarget contains the URLs to probes for a set of Pod IPs serving out of the same port. +type ProbeTarget struct { + PodIPs sets.String + PodPort string + Port string + URLs []*url.URL +} + +// ProbeTargetLister lists all the targets that requires probing. +type ProbeTargetLister interface { + // ListProbeTargets returns a list of targets to be probed. + ListProbeTargets(ctx context.Context, ingress *v1alpha1.Ingress) ([]ProbeTarget, error) +} + +// Manager provides a way to check if an Ingress is ready +type Manager interface { + IsReady(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) +} + +// Prober provides a way to check if a VirtualService is ready by probing the Envoy pods +// handling that VirtualService. +type Prober struct { + logger *zap.SugaredLogger + + // mu guards ingressStates and podContexts + mu sync.Mutex + ingressStates map[string]*ingressState + podContexts map[string]cancelContext + + workQueue workqueue.RateLimitingInterface + + targetLister ProbeTargetLister + + readyCallback func(*v1alpha1.Ingress) + + probeConcurrency int + stateExpiration time.Duration + cleanupPeriod time.Duration +} + +// NewProber creates a new instance of Prober +func NewProber( + logger *zap.SugaredLogger, + targetLister ProbeTargetLister, + readyCallback func(*v1alpha1.Ingress)) *Prober { + return &Prober{ + logger: logger, + ingressStates: make(map[string]*ingressState), + podContexts: make(map[string]cancelContext), + workQueue: workqueue.NewNamedRateLimitingQueue( + workqueue.DefaultControllerRateLimiter(), + "ProbingQueue"), + targetLister: targetLister, + readyCallback: readyCallback, + probeConcurrency: probeConcurrency, + stateExpiration: stateExpiration, + cleanupPeriod: cleanupPeriod, + } +} + +func ingressKey(ing *v1alpha1.Ingress) string { + return fmt.Sprintf("%s/%s", ing.GetNamespace(), ing.GetName()) +} + +// IsReady checks if the provided Ingress is ready, i.e. the Envoy pods serving the Ingress +// have all been updated. This function is designed to be used by the Ingress controller, i.e. it +// will be called in the order of reconciliation. This means that if IsReady is called on an Ingress, +// this Ingress is the latest known version and therefore anything related to older versions can be ignored. +// Also, it means that IsReady is not called concurrently. +func (m *Prober) IsReady(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) { + ingressKey := ingressKey(ing) + + bytes, err := ingress.ComputeHash(ing) + if err != nil { + return false, fmt.Errorf("failed to compute the hash of the Ingress: %w", err) + } + hash := fmt.Sprintf("%x", bytes) + + if ready, ok := func() (bool, bool) { + m.mu.Lock() + defer m.mu.Unlock() + if state, ok := m.ingressStates[ingressKey]; ok { + if state.hash == hash { + state.lastAccessed = time.Now() + return atomic.LoadInt32(&state.pendingCount) == 0, true + } + + // Cancel the polling for the outdated version + state.cancel() + delete(m.ingressStates, ingressKey) + } + return false, false + }(); ok { + return ready, nil + } + + ingCtx, cancel := context.WithCancel(context.Background()) + ingressState := &ingressState{ + hash: hash, + ing: ing, + lastAccessed: time.Now(), + cancel: cancel, + } + + // Get the probe targets and group them by IP + targets, err := m.targetLister.ListProbeTargets(ctx, ing) + if err != nil { + return false, err + } + workItems := make(map[string][]*workItem) + for _, target := range targets { + for ip := range target.PodIPs { + for _, url := range target.URLs { + workItems[ip] = append(workItems[ip], &workItem{ + ingressState: ingressState, + url: url, + podIP: ip, + podPort: target.PodPort, + }) + } + } + } + + ingressState.pendingCount = int32(len(workItems)) + + for ip, ipWorkItems := range workItems { + // Get or create the context for that IP + ipCtx := func() context.Context { + m.mu.Lock() + defer m.mu.Unlock() + cancelCtx, ok := m.podContexts[ip] + if !ok { + ctx, cancel := context.WithCancel(context.Background()) + cancelCtx = cancelContext{ + context: ctx, + cancel: cancel, + } + m.podContexts[ip] = cancelCtx + } + return cancelCtx.context + }() + + podCtx, cancel := context.WithCancel(ingCtx) + podState := &podState{ + successCount: 0, + cancel: cancel, + } + + // Quick and dirty way to join two contexts (i.e. podCtx is cancelled when either ingCtx or ipCtx are cancelled) + go func() { + select { + case <-podCtx.Done(): + // This is the actual context, there is nothing to do except + // break to avoid leaking this goroutine. + break + case <-ipCtx.Done(): + // Cancel podCtx + cancel() + } + }() + + // Update the states when probing is successful or cancelled + go func() { + <-podCtx.Done() + m.updateStates(ingressState, podState) + }() + + for _, wi := range ipWorkItems { + wi.podState = podState + wi.context = podCtx + m.workQueue.AddRateLimited(wi) + m.logger.Infof("Queuing probe for %s, IP: %s:%s (depth: %d)", + wi.url, wi.podIP, wi.podPort, m.workQueue.Len()) + } + } + + func() { + m.mu.Lock() + defer m.mu.Unlock() + m.ingressStates[ingressKey] = ingressState + }() + return len(workItems) == 0, nil +} + +// Start starts the Manager background operations +func (m *Prober) Start(done <-chan struct{}) chan struct{} { + var wg sync.WaitGroup + + // Start the worker goroutines + for i := 0; i < m.probeConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for m.processWorkItem() { + } + }() + } + + // Cleanup the states periodically + wg.Add(1) + go func() { + defer wg.Done() + wait.Until(m.expireOldStates, m.cleanupPeriod, done) + }() + + // Stop processing the queue when cancelled + go func() { + <-done + m.workQueue.ShutDown() + }() + + // Return a channel closed when all work is done + ch := make(chan struct{}) + go func() { + wg.Wait() + close(ch) + }() + return ch +} + +// CancelIngressProbing cancels probing of the provided Ingress +// TODO(#6270): Use cache.DeletedFinalStateUnknown. +func (m *Prober) CancelIngressProbing(obj interface{}) { + if ing, ok := obj.(*v1alpha1.Ingress); ok { + key := ingressKey(ing) + + m.mu.Lock() + defer m.mu.Unlock() + if state, ok := m.ingressStates[key]; ok { + state.cancel() + delete(m.ingressStates, key) + } + } +} + +// CancelPodProbing cancels probing of the provided Pod IP. +// +// TODO(#6269): make this cancelation based on Pod x port instead of just Pod. +func (m *Prober) CancelPodProbing(obj interface{}) { + if pod, ok := obj.(*corev1.Pod); ok { + m.mu.Lock() + defer m.mu.Unlock() + + if ctx, ok := m.podContexts[pod.Status.PodIP]; ok { + ctx.cancel() + delete(m.podContexts, pod.Status.PodIP) + } + } +} + +// expireOldStates removes the states that haven't been accessed in a while. +func (m *Prober) expireOldStates() { + m.mu.Lock() + defer m.mu.Unlock() + for key, state := range m.ingressStates { + if time.Since(state.lastAccessed) > m.stateExpiration { + state.cancel() + delete(m.ingressStates, key) + } + } +} + +// processWorkItem processes a single work item from workQueue. +// It returns false when there is no more items to process, true otherwise. +func (m *Prober) processWorkItem() bool { + obj, shutdown := m.workQueue.Get() + if shutdown { + return false + } + + defer m.workQueue.Done(obj) + + // Crash if the item is not of the expected type + item, ok := obj.(*workItem) + if !ok { + m.logger.Fatalf("Unexpected work item type: want: %s, got: %s\n", + reflect.TypeOf(&workItem{}).Name(), reflect.TypeOf(obj).Name()) + } + m.logger.Infof("Processing probe for %s, IP: %s:%s (depth: %d)", + item.url, item.podIP, item.podPort, m.workQueue.Len()) + + transport := &http.Transport{ + TLSClientConfig: &tls.Config{ + // We only want to know that the Gateway is configured, not that the configuration is valid. + // Therefore, we can safely ignore any TLS certificate validation. + InsecureSkipVerify: true, + }, + DialContext: func(ctx context.Context, network, addr string) (conn net.Conn, e error) { + // Requests with the IP as hostname and the Host header set do no pass client-side validation + // because the HTTP client validates that the hostname (not the Host header) matches the server + // TLS certificate Common Name or Alternative Names. Therefore, http.Request.URL is set to the + // hostname and it is substituted it here with the target IP. + return dialContext(ctx, network, net.JoinHostPort(item.podIP, item.podPort)) + }} + + ok, err := prober.Do( + item.context, + transport, + item.url.String(), + prober.WithHeader(network.ProbeHeaderName, network.ProbeHeaderValue), + prober.ExpectsStatusCodes([]int{http.StatusOK}), + prober.ExpectsHeader(network.HashHeaderName, item.ingressState.hash)) + + // In case of cancellation, drop the work item + select { + case <-item.context.Done(): + m.workQueue.Forget(obj) + return true + default: + } + + if err != nil || !ok { + // In case of error, enqueue for retry + m.workQueue.AddRateLimited(obj) + m.logger.Errorf("Probing of %s failed, IP: %s:%s, ready: %t, error: %v (depth: %d)", + item.url, item.podIP, item.podPort, ok, err, m.workQueue.Len()) + } else { + m.updateStates(item.ingressState, item.podState) + } + return true +} + +func (m *Prober) updateStates(ingressState *ingressState, podState *podState) { + if atomic.AddInt32(&podState.successCount, 1) == 1 { + // This is the first successful probe call for the pod, cancel all other work items for this pod + podState.cancel() + + // This is the last pod being successfully probed, the Ingress is ready + if atomic.AddInt32(&ingressState.pendingCount, -1) == 0 { + m.readyCallback(ingressState.ing) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/network/status/status_test.go b/test/vendor/knative.dev/serving/pkg/network/status/status_test.go new file mode 100644 index 0000000000..6c292c5a4c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/status/status_test.go @@ -0,0 +1,564 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network/ingress" + + "go.uber.org/zap/zaptest" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/network" +) + +var ( + ingTemplate = &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}, + }, + } +) + +func TestProbeLifecycle(t *testing.T) { + ing := ingTemplate.DeepCopy() + hash, err := ingress.InsertProbe(ing) + if err != nil { + t.Fatalf("Failed to insert probe: %v", err) + } + + // Simulate that the latest configuration is not applied yet by returning a different + // hash once and then the by returning the expected hash. + hashes := make(chan string, 1) + hashes <- "not-the-hash-you-are-looking-for" + go func() { + for { + hashes <- hash + } + }() + + // Dummy handler returning HTTP 500 (it should never be called during probing) + dummyRequests := make(chan *http.Request) + dummyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + dummyRequests <- r + w.WriteHeader(500) + }) + + // Actual probe handler used in Activator and Queue-Proxy + probeHandler := network.NewProbeHandler(dummyHandler) + + // Dummy handler keeping track of received requests, mimicking AppendHeader of K-Network-Hash + // and simulate a non-existing host by returning 404. + probeRequests := make(chan *http.Request) + finalHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Host, "foo.bar.com") { + w.WriteHeader(http.StatusNotFound) + return + } + + probeRequests <- r + r.Header.Set(network.HashHeaderName, <-hashes) + probeHandler.ServeHTTP(w, r) + }) + + ts := httptest.NewServer(finalHandler) + defer ts.Close() + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %q: %v", ts.URL, err) + } + port, err := strconv.Atoi(tsURL.Port()) + if err != nil { + t.Fatalf("Failed to parse port %q: %v", tsURL.Port(), err) + } + hostname := tsURL.Hostname() + + ready := make(chan *v1alpha1.Ingress) + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + fakeProbeTargetLister{{ + PodIPs: sets.NewString(hostname), + PodPort: strconv.Itoa(port), + URLs: []*url.URL{tsURL}, + }}, + func(ing *v1alpha1.Ingress) { + ready <- ing + }) + + prober.stateExpiration = 2 * time.Second + prober.cleanupPeriod = 500 * time.Millisecond + + done := make(chan struct{}) + cancelled := prober.Start(done) + defer func() { + close(done) + <-cancelled + }() + + // The first call to IsReady must succeed and return false + ok, err := prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + const expHostHeader = "foo.bar.com" + + // Wait for the first (failing) and second (success) requests to be executed and validate Host header + for i := 0; i < 2; i++ { + req := <-probeRequests + if req.Host != expHostHeader { + t.Fatalf("Host header = %q, want %q", req.Host, expHostHeader) + } + } + + // Wait for the probing to eventually succeed + <-ready + + // The subsequent calls to IsReady must succeed and return true + for i := 0; i < 5; i++ { + if ok, err = prober.IsReady(context.Background(), ing); err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if !ok { + t.Fatal("IsReady() returned false") + } + time.Sleep(prober.cleanupPeriod) + } + + select { + // Wait for the cleanup to happen + case <-time.After(prober.stateExpiration + prober.cleanupPeriod): + break + // Validate that no probe requests were issued (cached) + case <-probeRequests: + t.Fatal("An unexpected probe request was received") + // Validate that no requests went through the probe handler + case <-dummyRequests: + t.Fatal("An unexpected request went through the probe handler") + } + + // The state has expired and been removed + ok, err = prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + // Wait for the first request (success) to be executed + <-probeRequests + + // Wait for the probing to eventually succeed + <-ready + + select { + // Validate that no requests went through the probe handler + case <-dummyRequests: + t.Fatal("An unexpected request went through the probe handler") + default: + break + } +} + +func TestProbeListerFail(t *testing.T) { + ing := ingTemplate.DeepCopy() + ready := make(chan *v1alpha1.Ingress) + defer close(ready) + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + notFoundLister{}, + func(ing *v1alpha1.Ingress) { + ready <- ing + }) + + // If we can't list, this must fail and return false + ok, err := prober.IsReady(context.Background(), ing) + if err == nil { + t.Fatal("IsReady returned unexpected success") + } + if ok { + t.Fatal("IsReady() returned true") + } +} + +func TestCancelPodProbing(t *testing.T) { + type timedRequest struct { + *http.Request + Time time.Time + } + + // Handler keeping track of received requests and mimicking an Ingress not ready + requests := make(chan *timedRequest, 100) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests <- &timedRequest{ + Time: time.Now(), + Request: r, + } + w.WriteHeader(http.StatusNotFound) + }) + + ts := httptest.NewServer(handler) + defer ts.Close() + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %q: %v", ts.URL, err) + } + port, err := strconv.Atoi(tsURL.Port()) + if err != nil { + t.Fatalf("Failed to parse port %q: %v", tsURL.Port(), err) + } + hostname := tsURL.Hostname() + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Status: v1.PodStatus{ + PodIP: strings.Split(tsURL.Host, ":")[0], + }, + } + + ready := make(chan *v1alpha1.Ingress) + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + fakeProbeTargetLister{{ + PodIPs: sets.NewString(hostname), + PodPort: strconv.Itoa(port), + URLs: []*url.URL{tsURL}, + }}, + func(ing *v1alpha1.Ingress) { + ready <- ing + }) + + done := make(chan struct{}) + cancelled := prober.Start(done) + defer func() { + close(done) + <-cancelled + }() + + ing := ingTemplate.DeepCopy() + ok, err := prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + // Wait for the first probe request + <-requests + + // Create a new version of the Ingress (to replace the original Ingress) + const otherDomain = "blabla.net" + ing = ing.DeepCopy() + ing.Spec.Rules[0].Hosts[0] = otherDomain + + // Create a different Ingress (to be probed in parallel) + const parallelDomain = "parallel.net" + func() { + copy := ing.DeepCopy() + copy.Spec.Rules[0].Hosts[0] = parallelDomain + copy.Name = "something" + + ok, err = prober.IsReady(context.Background(), copy) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + }() + + // Check that probing is unsuccessful + select { + case <-ready: + t.Fatal("Probing succeeded while it should not have succeeded") + default: + } + + ok, err = prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + // Drain requests for the old version + for req := range requests { + t.Logf("req.Host: %s", req.Host) + if strings.HasPrefix(req.Host, otherDomain) { + break + } + } + + // Cancel Pod probing + prober.CancelPodProbing(pod) + cancelTime := time.Now() + + // Check that there are no requests for the old Ingress and the requests predate cancellation + for { + select { + case req := <-requests: + if !strings.HasPrefix(req.Host, otherDomain) && + !strings.HasPrefix(req.Host, parallelDomain) { + t.Fatalf("Host = %s, want: %s or %s", req.Host, otherDomain, parallelDomain) + } else if req.Time.Sub(cancelTime) > 0 { + t.Fatal("Request was made after cancellation") + } + default: + return + } + } +} + +func TestPartialPodCancellation(t *testing.T) { + ing := ingTemplate.DeepCopy() + hash, err := ingress.InsertProbe(ing) + if err != nil { + t.Fatalf("Failed to insert probe: %v", err) + } + + // Simulate a probe target returning HTTP 200 OK and the correct hash + requests := make(chan *http.Request, 100) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests <- r + w.Header().Set(network.HashHeaderName, hash) + w.WriteHeader(http.StatusOK) + }) + ts := httptest.NewServer(handler) + defer ts.Close() + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %q: %v", ts.URL, err) + } + port, err := strconv.Atoi(tsURL.Port()) + if err != nil { + t.Fatalf("Failed to parse port %q: %v", tsURL.Port(), err) + } + + // pods[0] will be probed successfully, pods[1] will never be probed successfully + pods := []*v1.Pod{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "pod0", + }, + Status: v1.PodStatus{ + PodIP: strings.Split(tsURL.Host, ":")[0], + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "pod1", + }, + Status: v1.PodStatus{ + PodIP: "198.51.100.1", + }, + }} + + ready := make(chan *v1alpha1.Ingress) + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + fakeProbeTargetLister{{ + PodIPs: sets.NewString(pods[0].Status.PodIP, pods[1].Status.PodIP), + PodPort: strconv.Itoa(port), + URLs: []*url.URL{tsURL}, + }}, + func(ing *v1alpha1.Ingress) { + ready <- ing + }) + + done := make(chan struct{}) + cancelled := prober.Start(done) + defer func() { + close(done) + <-cancelled + }() + + ok, err := prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + // Wait for the first probe request + <-requests + + // Check that probing is unsuccessful + select { + case <-ready: + t.Fatal("Probing succeeded while it should not have succeeded") + default: + } + + // Cancel probing of pods[1] + prober.CancelPodProbing(pods[1]) + + // Check that probing was successful + select { + case <-ready: + break + case <-time.After(5 * time.Second): + t.Fatal("Probing was not successful even after waiting") + } +} + +func TestCancelIngressProbing(t *testing.T) { + ing := ingTemplate.DeepCopy() + // Handler keeping track of received requests and mimicking an Ingress not ready + requests := make(chan *http.Request, 100) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests <- r + w.WriteHeader(http.StatusNotFound) + }) + + ts := httptest.NewServer(handler) + defer ts.Close() + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %q: %v", ts.URL, err) + } + port, err := strconv.Atoi(tsURL.Port()) + if err != nil { + t.Fatalf("Failed to parse port %q: %v", tsURL.Port(), err) + } + hostname := tsURL.Hostname() + + ready := make(chan *v1alpha1.Ingress) + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + fakeProbeTargetLister{{ + PodIPs: sets.NewString(hostname), + PodPort: strconv.Itoa(port), + URLs: []*url.URL{tsURL}, + }}, + func(ing *v1alpha1.Ingress) { + ready <- ing + }) + + done := make(chan struct{}) + cancelled := prober.Start(done) + defer func() { + close(done) + <-cancelled + }() + + ok, err := prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + // Wait for the first probe request + <-requests + + const domain = "blabla.net" + + // Create a new version of the Ingress + ing = ing.DeepCopy() + ing.Spec.Rules[0].Hosts[0] = domain + + // Check that probing is unsuccessful + select { + case <-ready: + t.Fatal("Probing succeeded while it should not have succeeded") + default: + } + + ok, err = prober.IsReady(context.Background(), ing) + if err != nil { + t.Fatalf("IsReady failed: %v", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + // Drain requests for the old version. + for req := range requests { + t.Logf("req.Host: %s", req.Host) + if strings.HasPrefix(req.Host, domain) { + break + } + } + + // Cancel Ingress probing. + prober.CancelIngressProbing(ing) + + // Check that the requests were for the new version. + close(requests) + for req := range requests { + if !strings.HasPrefix(req.Host, domain) { + t.Fatalf("Host = %s, want: %s", req.Host, domain) + } + } +} + +type fakeProbeTargetLister []ProbeTarget + +func (l fakeProbeTargetLister) ListProbeTargets(ctx context.Context, ing *v1alpha1.Ingress) ([]ProbeTarget, error) { + targets := []ProbeTarget{} + for _, target := range l { + newTarget := ProbeTarget{ + PodIPs: target.PodIPs, + PodPort: target.PodPort, + Port: target.Port, + } + for _, url := range target.URLs { + newURL := *url + newURL.Host = ing.Spec.Rules[0].Hosts[0] + newTarget.URLs = append(newTarget.URLs, &newURL) + } + targets = append(targets, newTarget) + } + return targets, nil +} + +type notFoundLister struct{} + +func (l notFoundLister) ListProbeTargets(ctx context.Context, ing *v1alpha1.Ingress) ([]ProbeTarget, error) { + return nil, errors.New("not found") +} diff --git a/test/vendor/knative.dev/serving/pkg/network/testdata/config-network.yaml b/test/vendor/knative.dev/serving/pkg/network/testdata/config-network.yaml new file mode 120000 index 0000000000..eb11ac0e9f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/testdata/config-network.yaml @@ -0,0 +1 @@ +../../../config/core/configmaps/network.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/network/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/network/zz_generated.deepcopy.go new file mode 100644 index 0000000000..43e67b1931 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/network/zz_generated.deepcopy.go @@ -0,0 +1,76 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package network + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainTemplateValues) DeepCopyInto(out *DomainTemplateValues) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainTemplateValues. +func (in *DomainTemplateValues) DeepCopy() *DomainTemplateValues { + if in == nil { + return nil + } + out := new(DomainTemplateValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagTemplateValues) DeepCopyInto(out *TagTemplateValues) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagTemplateValues. +func (in *TagTemplateValues) DeepCopy() *TagTemplateValues { + if in == nil { + return nil + } + out := new(TagTemplateValues) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/pool/OWNERS b/test/vendor/knative.dev/serving/pkg/pool/OWNERS new file mode 100644 index 0000000000..e57e66dd50 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/pool/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-api-approvers + +reviewers: +- serving-api-reviewers + +labels: +- area/API diff --git a/test/vendor/knative.dev/serving/pkg/pool/doc.go b/test/vendor/knative.dev/serving/pkg/pool/doc.go new file mode 100644 index 0000000000..4cf66210e2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/pool/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pool contains a simple threadpool implementation that accepts +// work in the form of `func() error` function. The intent is for it to +// support similar workloads to errgroup, but with a maximum number of +// concurrent worker threads. +package pool diff --git a/test/vendor/knative.dev/serving/pkg/pool/interface.go b/test/vendor/knative.dev/serving/pkg/pool/interface.go new file mode 100644 index 0000000000..ec239d0635 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/pool/interface.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "golang.org/x/sync/errgroup" +) + +// Interface defines an errgroup-compatible interface for interacting with +// our threadpool. +type Interface interface { + // Go queues a single unit of work for execution on this pool. All calls + // to Go must be finished before Wait is called. + Go(func() error) + + // Wait blocks until all work is complete, returning the first + // error returned by any of the work. + Wait() error +} + +// errgroup.Group implements Interface +var _ Interface = (*errgroup.Group)(nil) diff --git a/test/vendor/knative.dev/serving/pkg/pool/pool.go b/test/vendor/knative.dev/serving/pkg/pool/pool.go new file mode 100644 index 0000000000..1329ff4ba8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/pool/pool.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "context" + "sync" +) + +type impl struct { + wg sync.WaitGroup + workCh chan func() error + + // Ensure that we Wait exactly once and memoize + // the result. + waitOnce sync.Once + + cancel context.CancelFunc + + // We're only interested in the first result so + // only set it once. + resultOnce sync.Once + result error +} + +// impl implements Interface +var _ Interface = (*impl)(nil) + +// defaultCapacity is the number of work items or errors that we +// can queue up before calls to Go will block, or work will +// block until Wait is called. +const defaultCapacity = 50 + +// New creates a fresh worker pool with the specified size. +func New(workers int) Interface { + return NewWithCapacity(workers, defaultCapacity) +} + +// NewWithContext creates a pool that is driven by a cancelable context. +// Just like errgroup.Group on first error the context will be canceled as well. +func NewWithContext(ctx context.Context, workers, capacity int) (Interface, context.Context) { + ctx, cancel := context.WithCancel(ctx) + i := &impl{ + cancel: cancel, + workCh: make(chan func() error, capacity), + } + + // Start a go routine for each worker, which: + // 1. reads off of the work channel, + // 2. (optionally) sends errors on the error channel, + // 3. marks work as done in our sync.WaitGroup. + for idx := 0; idx < workers; idx++ { + go func() { + for work := range i.workCh { + i.exec(work) + } + }() + } + return i, ctx +} + +func (i *impl) exec(w func() error) { + defer i.wg.Done() + if err := w(); err != nil { + i.resultOnce.Do(func() { + if i.cancel != nil { + i.cancel() + } + i.result = err + }) + } +} + +// NewWithCapacity creates a fresh worker pool with the specified size. +func NewWithCapacity(workers, capacity int) Interface { + i, _ := NewWithContext(context.Background(), workers, capacity) + return i +} + +// Go implements Interface. +func (i *impl) Go(w func() error) { + // Increment the amount of outstanding work we're waiting on. + i.wg.Add(1) + // Send the work along the queue. + i.workCh <- w +} + +// Wait implements Interface. +func (i *impl) Wait() error { + i.waitOnce.Do(func() { + // Wait for queued work to complete. + i.wg.Wait() + // Notify the context, that it's done now. + if i.cancel != nil { + i.cancel() + } + + // Now we know there are definitely no new items arriving. + close(i.workCh) + }) + + return i.result +} diff --git a/test/vendor/knative.dev/serving/pkg/pool/pool_test.go b/test/vendor/knative.dev/serving/pkg/pool/pool_test.go new file mode 100644 index 0000000000..47da4dd9b3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/pool/pool_test.go @@ -0,0 +1,239 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestParallelismNoErrors(t *testing.T) { + tests := []struct { + name string + size int + work int + }{{ + name: "single threaded", + size: 1, + work: 3, + }, { + name: "three workers", + size: 3, + work: 10, + }, { + name: "ten workers", + size: 10, + work: 100, + }} + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + // m guards max. + m sync.Mutex + max int32 + active int32 + ) + + // Use our own waitgroup to ensure that the work + // can all complete before we block on the error + // result. + wg := &sync.WaitGroup{} + + worker := func() error { + defer wg.Done() + na := atomic.AddInt32(&active, 1) + defer atomic.AddInt32(&active, -1) + + func() { + m.Lock() + defer m.Unlock() + if max < na { + max = na + } + }() + + // Sleep a small amount to simulate work. This should be + // sufficient to saturate the threadpool before the first + // one wakes up. + time.Sleep(10 * time.Millisecond) + return nil + } + + p := New(tc.size) + for idx := 0; idx < tc.work; idx++ { + wg.Add(1) + p.Go(worker) + } + + // First wait for the waitgroup to finish, so that + // we are sure it isn't the Wait call that flushes + // remaining work. + wg.Wait() + + if err := p.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + + if err := p.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + + if got, want := max, int32(tc.size); got != want { + t.Errorf("max active = %v, wanted %v", got, want) + } + }) + } +} + +func TestParallelismWithErrors(t *testing.T) { + tests := []struct { + name string + size int + work int + }{{ + name: "single threaded", + size: 1, + work: 3, + }, { + name: "three workers", + size: 3, + work: 10, + }, { + name: "ten workers", + size: 10, + // This is the number of errors that we can buffer before + // the test kernel below will deadlock because we need the + // pool's Wait call to drain the buffered errors before more + // than this can be sent. + work: defaultCapacity + 10, /* size */ + }} + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + // m guards max. + m sync.Mutex + max int32 + active int32 + ) + + // Use our own waitgroup to ensure that the work + // can all complete before we block on the error + // result. + wg := &sync.WaitGroup{} + + errExpected := errors.New("this is what I expect") + workerFactory := func(err error) func() error { + return func() error { + defer wg.Done() + na := atomic.AddInt32(&active, 1) + defer atomic.AddInt32(&active, -1) + + func() { + m.Lock() + defer m.Unlock() + if max < na { + max = na + } + }() + + // Make the first piece of work finish quickly. + if err != errExpected { + // Sleep a small amount to simulate work. This should be + // sufficient to saturate the threadpool before the first + // one wakes up. + time.Sleep(10 * time.Millisecond) + } + return err + } + } + + p := New(tc.size) + + // Let the work complete. + wg.Add(1) + p.Go(workerFactory(errExpected)) + time.Sleep(10 * time.Millisecond) + + // Change the error we return and queue the remaining work. + for idx := 1; idx < tc.work; idx++ { + wg.Add(1) + p.Go(workerFactory(errors.New("this is not what I expect"))) + } + + // First wait for the waitgroup to finish, so that + // we are sure it isn't the Wait call that flushes + // remaining work. + wg.Wait() + + if err := p.Wait(); err != errExpected { + t.Errorf("Wait() = %v, wanted %v", err, errExpected) + } + + if got, want := max, int32(tc.size); got != want { + t.Errorf("max active = %v, wanted %v", got, want) + } + }) + } +} + +func TestWithContextWaitCancels(t *testing.T) { + pool, ctx := NewWithContext(context.Background(), 1 /*1 thread*/, 10 /*capacity*/) + for i := 0; i < 10; i++ { + pool.Go(func() error { + time.Sleep(10 * time.Millisecond) + return nil + }) + } + if err := pool.Wait(); err != nil { + t.Fatalf("pool.Wait = %v", err) + } + select { + case <-ctx.Done(): + default: + t.Error("ctx is not canceled") + } +} + +func TestErrorCancelsContext(t *testing.T) { + want := errors.New("i failed, sorry") + pool, ctx := NewWithContext(context.Background(), 1 /*1 thread*/, 10 /*capacity*/) + pool.Go(func() error { + return want + }) + // Those don't matter, but generate load. + for i := 0; i < 10; i++ { + pool.Go(func() error { + time.Sleep(100 * time.Millisecond) + return nil + }) + } + // This should be triggered basically immediately. + select { + case <-ctx.Done(): + case <-time.After(100 * time.Millisecond): + t.Error("ctx is not canceled due to the first error") + } + if err := pool.Wait(); err != want { + t.Fatalf("pool.Wait() = %v, want: %v", err, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/OWNERS b/test/vendor/knative.dev/serving/pkg/queue/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/knative.dev/serving/pkg/queue/breaker.go b/test/vendor/knative.dev/serving/pkg/queue/breaker.go new file mode 100644 index 0000000000..0594034c92 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/breaker.go @@ -0,0 +1,267 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "context" + "errors" + "fmt" + "sync" +) + +var ( + // ErrUpdateCapacity indicates that the capacity could not be updated as wished. + ErrUpdateCapacity = errors.New("failed to add all capacity to the breaker") + // ErrRelease indicates that release was called more often than acquire. + ErrRelease = errors.New("semaphore release error: returned tokens must be <= acquired tokens") + // ErrRequestQueueFull indicates the breaker queue depth was exceeded. + ErrRequestQueueFull = errors.New("pending request queue full") +) + +// BreakerParams defines the parameters of the breaker. +type BreakerParams struct { + QueueDepth int + MaxConcurrency int + InitialCapacity int +} + +// Breaker is a component that enforces a concurrency limit on the +// execution of a function. It also maintains a queue of function +// executions in excess of the concurrency limit. Function call attempts +// beyond the limit of the queue are failed immediately. +type Breaker struct { + pendingRequests chan struct{} + sem *semaphore +} + +// NewBreaker creates a Breaker with the desired queue depth, +// concurrency limit and initial capacity. +func NewBreaker(params BreakerParams) *Breaker { + if params.QueueDepth <= 0 { + panic(fmt.Sprintf("Queue depth must be greater than 0. Got %v.", params.QueueDepth)) + } + if params.MaxConcurrency < 0 { + panic(fmt.Sprintf("Max concurrency must be 0 or greater. Got %v.", params.MaxConcurrency)) + } + if params.InitialCapacity < 0 || params.InitialCapacity > params.MaxConcurrency { + panic(fmt.Sprintf("Initial capacity must be between 0 and max concurrency. Got %v.", params.InitialCapacity)) + } + sem := newSemaphore(params.MaxConcurrency, params.InitialCapacity) + return &Breaker{ + pendingRequests: make(chan struct{}, params.QueueDepth+params.MaxConcurrency), + sem: sem, + } +} + +// Reserve reserves an execution slot in the breaker, to permit +// richer semantics in the caller. +// The caller on success must execute the callback when done with work. +func (b *Breaker) Reserve(ctx context.Context) (func(), bool) { + select { + default: + // Pending request queue is full. Report failure. + return nil, false + case b.pendingRequests <- struct{}{}: + // Pending request has capacity, reserve a slot, if there's one + // available. + if !b.sem.tryAcquire(ctx) { + <-b.pendingRequests + return nil, false + } + return func() { + b.sem.release() + <-b.pendingRequests + }, true + } +} + +// Maybe conditionally executes thunk based on the Breaker concurrency +// and queue parameters. If the concurrency limit and queue capacity are +// already consumed, Maybe returns immediately without calling thunk. If +// the thunk was executed, Maybe returns true, else false. +func (b *Breaker) Maybe(ctx context.Context, thunk func()) error { + select { + default: + // Pending request queue is full. Report failure. + return ErrRequestQueueFull + case b.pendingRequests <- struct{}{}: + // Pending request has capacity. + // Defer releasing pending request queue. + defer func() { + <-b.pendingRequests + }() + + // Wait for capacity in the active queue. + if err := b.sem.acquire(ctx); err != nil { + return err + } + // Defer releasing capacity in the active. + // It's safe to ignore the error returned by release since we + // make sure the semaphore is only manipulated here and acquire + // + release calls are equally paired. + defer b.sem.release() + + // Do the thing. + thunk() + // Report success + return nil + } +} + +// InFlight returns the number of requests currently in flight in this breaker. +func (b *Breaker) InFlight() int { + return len(b.pendingRequests) +} + +// UpdateConcurrency updates the maximum number of in-flight requests. +func (b *Breaker) UpdateConcurrency(size int) error { + return b.sem.updateCapacity(size) +} + +// Capacity returns the number of allowed in-flight requests on this breaker. +func (b *Breaker) Capacity() int { + return b.sem.Capacity() +} + +// newSemaphore creates a semaphore with the desired maximal and initial capacity. +// Maximal capacity is the size of the buffered channel, it defines maximum number of tokens +// in the rotation. Attempting to add more capacity then the max will result in error. +// Initial capacity is the initial number of free tokens. +func newSemaphore(maxCapacity, initialCapacity int) *semaphore { + queue := make(chan struct{}, maxCapacity) + sem := &semaphore{queue: queue} + if initialCapacity > 0 { + sem.updateCapacity(initialCapacity) + } + return sem +} + +// semaphore is an implementation of a semaphore based on Go channels. +// The presence of elements in the `queue` buffered channel correspond to available tokens. +// Hence the max number of tokens to hand out equals to the size of the channel. +// `capacity` defines the current number of tokens in the rotation. +type semaphore struct { + queue chan struct{} + reducers int + capacity int + mux sync.RWMutex +} + +// tryAcquire receives the token from the semaphore if there's one +// otherwise an error is returned. +func (s *semaphore) tryAcquire(ctx context.Context) bool { + select { + case <-s.queue: + return true + default: + return false + } +} + +// acquire receives the token from the semaphore, potentially blocking. +func (s *semaphore) acquire(ctx context.Context) error { + select { + case <-s.queue: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// release potentially puts the token back to the queue. +// If the semaphore capacity was reduced in between and is not yet reflected, +// we remove the tokens from the rotation instead of returning them back. +func (s *semaphore) release() error { + s.mux.Lock() + defer s.mux.Unlock() + + if s.reducers > 0 { + s.capacity-- + s.reducers-- + return nil + } + + // We want to make sure releasing a token is always non-blocking. + select { + case s.queue <- struct{}{}: + return nil + default: + // This only happens if release is called more often than acquire. + return ErrRelease + } +} + +// updateCapacity updates the capacity of the semaphore to the desired +// size. +func (s *semaphore) updateCapacity(size int) error { + if size < 0 || size > cap(s.queue) { + return ErrUpdateCapacity + } + + s.mux.Lock() + defer s.mux.Unlock() + + if s.effectiveCapacity() == size { + return nil + } + + // Add capacity until we reach size, potentially consuming + // outstanding reducers first. + for s.effectiveCapacity() < size { + if s.reducers > 0 { + s.reducers-- + } else { + select { + case s.queue <- struct{}{}: + s.capacity++ + default: + // This indicates that we're operating close to + // MaxCapacity and returned more tokens than we + // acquired. + return ErrUpdateCapacity + } + } + } + + // Reduce capacity until we reach size, potentially adding + // new reducers if the queue channel is empty because of + // requests in-flight. + for s.effectiveCapacity() > size { + select { + case <-s.queue: + s.capacity-- + default: + s.reducers++ + } + } + + return nil +} + +// effectiveCapacity is the capacity with reducers taken into account. +// `mux` must be held to call it. +func (s *semaphore) effectiveCapacity() int { + return s.capacity - s.reducers +} + +// Capacity is the effective capacity after taking reducers into account. +func (s *semaphore) Capacity() int { + s.mux.RLock() + defer s.mux.RUnlock() + + return s.effectiveCapacity() +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/breaker_test.go b/test/vendor/knative.dev/serving/pkg/queue/breaker_test.go new file mode 100644 index 0000000000..b7cc23a252 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/breaker_test.go @@ -0,0 +1,452 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "context" + "testing" + "time" +) + +const ( + // semAcquireTimeout is a timeout for tests that try to acquire + // a token of a semaphore. + semAcquireTimeout = 10 * time.Second + + // semNoChangeTimeout is some additional wait time after a number + // of acquires is reached to assert that no more acquires get through. + semNoChangeTimeout = 50 * time.Millisecond +) + +func TestBreakerInvalidConstructor(t *testing.T) { + tests := []struct { + name string + options BreakerParams + }{{ + "QueueDepth = 0", + BreakerParams{QueueDepth: 0, MaxConcurrency: 1, InitialCapacity: 1}, + }, { + "MaxConcurrency negative", + BreakerParams{QueueDepth: 1, MaxConcurrency: -1, InitialCapacity: 1}, + }, { + "InitialCapacity negative", + BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: -1}, + }, { + "InitialCapacity out-of-bounds", + BreakerParams{QueueDepth: 1, MaxConcurrency: 5, InitialCapacity: 6}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected a panic but the code didn't panic.") + } + }() + + NewBreaker(test.options) + }) + } +} + +func TestBreakerReserveOverload(t *testing.T) { + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 1} + b := NewBreaker(params) // Breaker capacity = 2 + cb1, rr := b.Reserve(context.Background()) + if !rr { + t.Fatal("Reserve1 failed") + } + _, rr = b.Reserve(context.Background()) + if rr { + t.Fatal("Reserve2 was an unexpected success.") + } + // Release a slot. + cb1() + // And reserve it again. + cb2, rr := b.Reserve(context.Background()) + if !rr { + t.Fatal("Reserve2 failed") + } + cb2() +} + +func TestBreakerOverloadMixed(t *testing.T) { + // This tests when reservation and maybe are intermised. + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 1} + b := NewBreaker(params) // Breaker capacity = 2 + reqs := newRequestor(b) + + // Bring breaker to capacity. + reqs.request() + // This happens in go-routine, so spin. + for len(b.sem.queue) > 0 { + time.Sleep(time.Millisecond * 2) + } + _, rr := b.Reserve(context.Background()) + if rr { + t.Fatal("Reserve was an unexpected success.") + } + // Open a slot. + reqs.processSuccessfully(t) + // Now reservation should work. + cb, rr := b.Reserve(context.Background()) + if !rr { + t.Fatal("Reserve unexpectedly failed") + } + // Process the reservation. + cb() +} + +func TestBreakerOverload(t *testing.T) { + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 1} + b := NewBreaker(params) // Breaker capacity = 2 + reqs := newRequestor(b) + + // Bring breaker to capacity. + reqs.request() + reqs.request() + + // Overshoot by one. + reqs.request() + reqs.expectFailure(t) + + // The remainer should succeed. + reqs.processSuccessfully(t) + reqs.processSuccessfully(t) +} + +func TestBreakerQueueing(t *testing.T) { + params := BreakerParams{QueueDepth: 2, MaxConcurrency: 1, InitialCapacity: 0} + b := NewBreaker(params) // Breaker capacity = 2 + reqs := newRequestor(b) + + // Bring breaker to capacity. Doesn't error because queue subsumes these requests. + reqs.request() + reqs.request() + + // Update concurrency to allow the requests to be processed. + b.UpdateConcurrency(1) + + // They should pass just fine. + reqs.processSuccessfully(t) + reqs.processSuccessfully(t) +} + +func TestBreakerNoOverload(t *testing.T) { + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 1} + b := NewBreaker(params) // Breaker capacity = 2 + reqs := newRequestor(b) + + // Bring request to capacity. + reqs.request() + reqs.request() + + // Process one, send a new one in, at capacity again. + reqs.processSuccessfully(t) + reqs.request() + + // Process one, send a new one in, at capacity again. + reqs.processSuccessfully(t) + reqs.request() + + // Process the remainder successfully. + reqs.processSuccessfully(t) + reqs.processSuccessfully(t) +} + +func TestBreakerCancel(t *testing.T) { + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 0} + b := NewBreaker(params) + reqs := newRequestor(b) + + // Cancel a request which cannot get capacity. + ctx1, cancel1 := context.WithCancel(context.Background()) + reqs.requestWithContext(ctx1) + cancel1() + reqs.expectFailure(t) + + // This request cannot get capacity either. This reproduced a bug we had when + // freeing slots on the pendingRequests channel. + ctx2, cancel2 := context.WithCancel(context.Background()) + reqs.requestWithContext(ctx2) + cancel2() + reqs.expectFailure(t) + + // Let through a request with capacity then timeout following request + b.UpdateConcurrency(1) + reqs.request() + + // Exceed capacity and assert one failure. This makes sure the Breaker is consistently + // at capacity. + reqs.request() + reqs.request() + reqs.expectFailure(t) + + // This request cannot get capacity. + ctx3, cancel3 := context.WithCancel(context.Background()) + reqs.requestWithContext(ctx3) + cancel3() + reqs.expectFailure(t) + + // The requests that were put in earlier should succeed. + reqs.processSuccessfully(t) + reqs.processSuccessfully(t) +} + +func TestBreakerUpdateConcurrency(t *testing.T) { + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 0} + b := NewBreaker(params) + b.UpdateConcurrency(1) + if got, want := b.Capacity(), 1; got != want { + t.Errorf("Capacity() = %d, want: %d", got, want) + } + + b.UpdateConcurrency(0) + if got, want := b.Capacity(), 0; got != want { + t.Errorf("Capacity() = %d, want: %d", got, want) + } + + if err := b.UpdateConcurrency(-2); err != ErrUpdateCapacity { + t.Errorf("UpdateConcurrency = %v, want: %v", err, ErrUpdateCapacity) + } +} + +func TestBreakerUpdateConcurrencyOverlow(t *testing.T) { + params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 0} + b := NewBreaker(params) + if err := b.UpdateConcurrency(2); err != ErrUpdateCapacity { + t.Errorf("UpdateConcurrency = %v, want: %v", err, ErrUpdateCapacity) + } +} + +// Test empty semaphore, token cannot be acquired +func TestSemaphoreAcquireHasNoCapacity(t *testing.T) { + gotChan := make(chan struct{}, 1) + + sem := newSemaphore(1, 0) + tryAcquire(sem, gotChan) + + select { + case <-gotChan: + t.Error("Token was acquired but shouldn't have been") + case <-time.After(semNoChangeTimeout): + // Test succeeds, semaphore didn't change in configured time. + } +} + +func TestSemaphoreAcquireNonBlockingHasNoCapacity(t *testing.T) { + sem := newSemaphore(1, 0) + if sem.tryAcquire(context.Background()) { + t.Error("Should have failed immediately") + } +} + +// Test empty semaphore, add capacity, token can be acquired +func TestSemaphoreAcquireHasCapacity(t *testing.T) { + gotChan := make(chan struct{}, 1) + want := 1 + + sem := newSemaphore(1, 0) + tryAcquire(sem, gotChan) + sem.release() // Allows 1 acquire + + for i := 0; i < want; i++ { + select { + case <-gotChan: + // Successfully acquired a token. + case <-time.After(semAcquireTimeout): + t.Error("Was not able to acquire token before timeout") + } + } + + select { + case <-gotChan: + t.Errorf("Got more acquires than wanted, want = %d, got at least %d", want, want+1) + case <-time.After(semNoChangeTimeout): + // No change happened, success. + } +} + +func TestSemaphoreRelease(t *testing.T) { + sem := newSemaphore(1, 1) + sem.acquire(context.Background()) + if err := sem.release(); err != nil { + t.Errorf("release = %v; want: %v", err, nil) + } + if err := sem.release(); err != ErrRelease { + t.Errorf("release = %v; want: %v", err, ErrRelease) + } +} + +func TestSemaphoreReleasesSeveralReducers(t *testing.T) { + const wantAfterFirstrelease = 1 + const wantAfterSecondrelease = 0 + sem := newSemaphore(2, 2) + sem.acquire(context.Background()) + sem.acquire(context.Background()) + sem.updateCapacity(0) + sem.release() + if got := sem.Capacity(); got != wantAfterSecondrelease { + t.Errorf("Capacity = %d, want: %d", got, wantAfterSecondrelease) + } + if sem.reducers != wantAfterFirstrelease { + t.Errorf("sem.reducers = %d, want: %d", sem.reducers, wantAfterFirstrelease) + } + + sem.release() + if got := sem.Capacity(); got != wantAfterSecondrelease { + t.Errorf("Capacity = %d, want: %d", got, wantAfterSecondrelease) + } + if sem.reducers != wantAfterSecondrelease { + t.Errorf("sem.reducers = %d, want: %d", sem.reducers, wantAfterSecondrelease) + } +} + +func TestSemaphoreUpdateCapacity(t *testing.T) { + const initialCapacity = 1 + sem := newSemaphore(3, initialCapacity) + if got, want := sem.Capacity(), 1; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } + sem.acquire(context.Background()) + sem.updateCapacity(initialCapacity + 2) + if got, want := sem.Capacity(), 3; got != want { + t.Errorf("Capacity = %d, want: %d", got, want) + } +} + +// Test the case when we add more capacity then the number of waiting reducers +func TestSemaphoreUpdateCapacityLessThenReducers(t *testing.T) { + const initialCapacity = 2 + sem := newSemaphore(2, initialCapacity) + sem.acquire(context.Background()) + sem.acquire(context.Background()) + sem.updateCapacity(initialCapacity - 2) + if got, want := sem.reducers, 2; got != want { + t.Errorf("sem.reducers = %d, want: %d", got, want) + } + sem.release() + sem.release() + sem.release() + if got, want := sem.reducers, 0; got != want { + t.Errorf("sem.reducers = %d, want: %d", got, want) + } +} + +func TestSemaphoreUpdateCapacityConsumingReducers(t *testing.T) { + const initialCapacity = 2 + sem := newSemaphore(2, initialCapacity) + sem.acquire(context.Background()) + sem.acquire(context.Background()) + sem.updateCapacity(initialCapacity - 2) + if got, want := sem.reducers, 2; got != want { + t.Errorf("sem.reducers = %d, want: %d", got, want) + } + + sem.updateCapacity(initialCapacity) + if got, want := sem.reducers, 0; got != want { + t.Errorf("sem.reducers = %d, want: %d", got, want) + } +} + +func TestSemaphoreUpdateCapacityOverflow(t *testing.T) { + sem := newSemaphore(2, 0) + if err := sem.updateCapacity(3); err != ErrUpdateCapacity { + t.Errorf("updateCapacity = %v, want: %v", err, ErrUpdateCapacity) + } +} + +func TestSemaphoreUpdateCapacityOutOfBound(t *testing.T) { + sem := newSemaphore(1, 1) + sem.acquire(context.Background()) + if err := sem.updateCapacity(-1); err != ErrUpdateCapacity { + t.Errorf("updateCapacity = %v, want: %v", err, ErrUpdateCapacity) + } +} + +func TestSemaphoreUpdateCapacityBrokenState(t *testing.T) { + sem := newSemaphore(1, 0) + sem.release() // This Release is not paired with an acquire + if err := sem.updateCapacity(1); err != ErrUpdateCapacity { + t.Errorf("updateCapacity = %v, want: %v", err, ErrUpdateCapacity) + } +} + +func TestSemaphoreUpdateCapacityDoNothing(t *testing.T) { + sem := newSemaphore(1, 1) + if err := sem.updateCapacity(1); err != nil { + t.Errorf("updateCapacity = %v, want: %v", err, nil) + } +} + +func tryAcquire(sem *semaphore, gotChan chan struct{}) { + go func() { + // blocking until someone puts the token into the semaphore + sem.acquire(context.Background()) + gotChan <- struct{}{} + }() +} + +// requestor is a set of test helpers around breaker testing. +type requestor struct { + breaker *Breaker + acceptedCh chan bool + barrierCh chan struct{} +} + +func newRequestor(breaker *Breaker) *requestor { + return &requestor{ + breaker: breaker, + acceptedCh: make(chan bool), + barrierCh: make(chan struct{}), + } +} + +// request is the same as requestWithContext but with a default context. +func (r *requestor) request() { + r.requestWithContext(context.Background()) +} + +// requestWithContext simulates a request in a separate goroutine. The +// request will either fail immediately (as observable via expectFailure) +// or block until processSuccessfully is called. +func (r *requestor) requestWithContext(ctx context.Context) { + go func() { + err := r.breaker.Maybe(ctx, func() { + <-r.barrierCh + }) + r.acceptedCh <- err == nil + }() +} + +// expectFailure waits for a request to finish and asserts it to be failed. +func (r *requestor) expectFailure(t *testing.T) { + t.Helper() + if <-r.acceptedCh { + t.Error("expected request to fail but it succeeded") + } +} + +// processSuccessfully allows a request to pass the barrier, waits for it to +// be finished and asserts it to succeed. +func (r *requestor) processSuccessfully(t *testing.T) { + t.Helper() + r.barrierCh <- struct{}{} + if !<-r.acceptedCh { + t.Error("expected request to succeed but it failed") + } +} diff --git a/test/vendor/github.com/knative/pkg/apis/duck/interface.go b/test/vendor/knative.dev/serving/pkg/queue/constants.go similarity index 56% rename from test/vendor/github.com/knative/pkg/apis/duck/interface.go rename to test/vendor/knative.dev/serving/pkg/queue/constants.go index f99a636339..4d6b6f0573 100644 --- a/test/vendor/github.com/knative/pkg/apis/duck/interface.go +++ b/test/vendor/knative.dev/serving/pkg/queue/constants.go @@ -14,15 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package duck +package queue -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) +const ( + // Name is the name of the component. + Name = "queue" -// InformerFactory is used to create Informer/Lister pairs for a schema.GroupVersionResource -type InformerFactory interface { - // Get returns a synced Informer/Lister pair for the provided schema.GroupVersionResource. - Get(schema.GroupVersionResource) (cache.SharedIndexInformer, cache.GenericLister, error) -} + // RequestQueueDrainPath specifies the path to wait until the proxy + // server is shut down. Any subsequent calls to this endpoint after + // the server has finished shutting down it will return immediately. + // Main usage is to delay the termination of user-container until all + // accepted requests have been processed. + RequestQueueDrainPath = "/wait-for-drain" +) diff --git a/test/vendor/knative.dev/serving/pkg/queue/doc.go b/test/vendor/knative.dev/serving/pkg/queue/doc.go new file mode 100644 index 0000000000..d2c3f07bc9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queue provides components for the queue-proxy binary. +package queue diff --git a/test/vendor/knative.dev/serving/pkg/queue/forwarded_shim.go b/test/vendor/knative.dev/serving/pkg/queue/forwarded_shim.go new file mode 100644 index 0000000000..5bad25b93e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/forwarded_shim.go @@ -0,0 +1,101 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "fmt" + "net/http" + "strings" +) + +// ForwardedShimHandler attempts to shim a `forwarded` HTTP header from the information +// available in the `x-forwarded-*` headers. When available, each node in the `x-forwarded-for` +// header is combined with the `x-forwarded-proto` and `x-forwarded-host` fields to construct +// a `forwarded` header. The `x-forwarded-by` header is ignored entirely, since it cannot be +// reliably combined with `x-forwarded-for`. No-op if a `forwarded` header is already present. +func ForwardedShimHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer h.ServeHTTP(w, r) + + // Forwarded: by=;for=;host=;proto= + fwd := r.Header.Get("Forwarded") + + // Don't add a shim if the header is already present + if fwd != "" { + return + } + + // X-Forwarded-For: , , + xff := r.Header.Get("X-Forwarded-For") + // X-Forwarded-Proto: + xfp := r.Header.Get("X-Forwarded-Proto") + // X-Forwarded-Host: + xfh := r.Header.Get("X-Forwarded-Host") + + // Nothing to do if we don't have any x-fowarded-* headers + if xff == "" && xfp == "" && xfh == "" { + return + } + + // The forwarded header is a list of forwarded elements + elements := []string{} + + // The x-forwarded-header consists of multiple nodes + nodes := strings.Split(xff, ",") + + // Sanitize nodes + for i, node := range nodes { + // Remove extra whitespace + node = strings.TrimSpace(node) + + // For simplicity, an address is IPv6 it contains a colon (:) + if strings.Contains(node, ":") { + // Convert IPv6 address to "[ipv6 addr]" format + node = fmt.Sprintf("\"[%s]\"", node) + } + + nodes[i] = node + } + + // The first element has a 'for', 'proto' and 'host' pair, as available + pairs := []string{} + + if xff != "" { + pairs = append(pairs, "for="+nodes[0]) + } + if xfh != "" { + pairs = append(pairs, "host="+xfh) + } + if xfp != "" { + pairs = append(pairs, "proto="+xfp) + } + + // The pairs are joined with a semi-colon (;) into a single element + elements = append(elements, strings.Join(pairs, ";")) + + // Each subsequent x-forwarded-for node gets its own pair element + for _, node := range nodes[1:] { + elements = append(elements, "for="+node) + } + + // The elements are joined with a comma (,) to form the header + fwd = strings.Join(elements, ", ") + + // Add forwarded header + r.Header.Set("Forwarded", fwd) + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/forwarded_shim_test.go b/test/vendor/knative.dev/serving/pkg/queue/forwarded_shim_test.go new file mode 100644 index 0000000000..965bfd1367 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/forwarded_shim_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queue + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestForwardedShimHandler(t *testing.T) { + tests := []struct { + name string + xff string + xfh string + xfp string + fwd string + want string + }{{ + name: "multiple xff", + xff: "127.0.0.1, ::1", + xfh: "h", + xfp: "p", + want: "for=127.0.0.1;host=h;proto=p, for=\"[::1]\"", + }, { + name: "single xff", + xff: "127.0.0.1", + xfh: "h", + xfp: "p", + want: "for=127.0.0.1;host=h;proto=p", + }, { + name: "multiple xff, no xfh, no xfp", + xff: "127.0.0.1, ::1", + want: "for=127.0.0.1, for=\"[::1]\"", + }, { + name: "multiple xff, no xfh", + xff: "127.0.0.1, ::1", + xfp: "p", + want: "for=127.0.0.1;proto=p, for=\"[::1]\"", + }, { + name: "multiple xff, no xfp", + xff: "127.0.0.1, ::1", + xfh: "h", + want: "for=127.0.0.1;host=h, for=\"[::1]\"", + }, { + name: "only xfh", + xfh: "h", + want: "host=h", + }, { + name: "only xfp", + xfp: "p", + want: "proto=p", + }, { + name: "only xfp and xfh", + xfh: "h", + xfp: "p", + want: "host=h;proto=p", + }, { + name: "existing fwd", + xff: "127.0.0.1, ::1", + xfh: "h", + xfp: "p", + fwd: "for=a, for=b", + want: "for=a, for=b", + }, { + name: "no xf* headers", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := "" + + req, err := http.NewRequest(http.MethodGet, "/", nil) + if err != nil { + t.Fatal(err) + } + + if test.xff != "" { + req.Header.Set("X-Forwarded-For", test.xff) + } + if test.xfh != "" { + req.Header.Set("X-Forwarded-Host", test.xfh) + } + if test.xfp != "" { + req.Header.Set("X-Forwarded-Proto", test.xfp) + } + if test.fwd != "" { + req.Header.Set("Forwarded", test.fwd) + } + + resp := httptest.NewRecorder() + + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + got = req.Header.Get("Forwarded") + }) + + ForwardedShimHandler(h).ServeHTTP(resp, req) + + if test.want != got { + t.Errorf("Wrong header value. Want %q, got %q", test.want, got) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/health/health_state.go b/test/vendor/knative.dev/serving/pkg/queue/health/health_state.go new file mode 100644 index 0000000000..981e952c3a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/health/health_state.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health + +import ( + "io" + "net/http" + "sync" +) + +const ( + // Return `queue` as body for 200 responses to indicate the response is from queue-proxy. + aliveBody = "queue" + notAliveBody = "queue not ready" +) + +// State holds state about the current healthiness of the component. +type State struct { + alive bool + shuttingDown bool + mutex sync.RWMutex + + drainCh chan struct{} + drainCompleted bool +} + +// IsAlive returns whether or not the health server is in a known +// working state currently. +func (h *State) IsAlive() bool { + h.mutex.RLock() + defer h.mutex.RUnlock() + + return h.alive +} + +// IsShuttingDown returns whether or not the health server is currently +// shutting down. +func (h *State) IsShuttingDown() bool { + h.mutex.RLock() + defer h.mutex.RUnlock() + + return h.shuttingDown +} + +// setAlive updates the state to declare the service alive. +func (h *State) setAlive() { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.alive = true + h.shuttingDown = false +} + +// shutdown updates the state to declare the service shutting down. +func (h *State) shutdown() { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.alive = false + h.shuttingDown = true +} + +// drainFinish updates that we finished draining. +func (h *State) drainFinished() { + h.mutex.Lock() + defer h.mutex.Unlock() + + if !h.drainCompleted && h.drainCh != nil { + close(h.drainCh) + } + + h.drainCompleted = true + +} + +// HandleHealthProbe handles the probe according to the current state of the +// health server. If isAggressive is false and prober has succeeded previously, +// the function return success without probing user-container again (until +// shutdown). +func (h *State) HandleHealthProbe(prober func() bool, isAggressive bool, w http.ResponseWriter) { + sendAlive := func() { + io.WriteString(w, aliveBody) + } + + sendNotAlive := func() { + w.WriteHeader(http.StatusServiceUnavailable) + io.WriteString(w, notAliveBody) + } + + switch { + case !isAggressive && h.IsAlive(): + sendAlive() + case h.IsShuttingDown(): + sendNotAlive() + case prober != nil && !prober(): + sendNotAlive() + default: + h.setAlive() + sendAlive() + } +} + +// DrainHandlerFunc constructs an HTTP handler that waits until the proxy server is shut down. +func (h *State) DrainHandlerFunc() func(_ http.ResponseWriter, _ *http.Request) { + h.mutex.Lock() + defer h.mutex.Unlock() + if h.drainCh == nil { + h.drainCh = make(chan struct{}) + } + + return func(_ http.ResponseWriter, _ *http.Request) { + <-h.drainCh + } +} + +// Shutdown marks the proxy server as no ready and begins its shutdown process. This +// results in unblocking any connections waiting for drain. +func (h *State) Shutdown(drain func()) { + h.shutdown() + + if drain != nil { + drain() + } + + h.drainFinished() +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/health/health_state_test.go b/test/vendor/knative.dev/serving/pkg/queue/health/health_state_test.go new file mode 100644 index 0000000000..3cbd9d256f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/health/health_state_test.go @@ -0,0 +1,217 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestHealthStateSetsState(t *testing.T) { + s := &State{} + + wantAlive := func() { + if !s.IsAlive() { + t.Error("State was not alive but it should have been alive") + } + } + wantNotAlive := func() { + if s.IsAlive() { + t.Error("State was alive but it shouldn't have been") + } + } + wantShuttingDown := func() { + if !s.IsShuttingDown() { + t.Error("State was not shutting down but it should have been") + } + } + wantNotShuttingDown := func() { + if s.IsShuttingDown() { + t.Error("State was shutting down but it shouldn't have been") + } + } + + wantNotAlive() + wantNotShuttingDown() + + s.setAlive() + wantAlive() + wantNotShuttingDown() + + s.shutdown() + wantNotAlive() + wantShuttingDown() +} + +func TestHealthStateHealthHandler(t *testing.T) { + tests := []struct { + name string + state *State + prober func() bool + isAggressive bool + wantStatus int + wantBody string + }{{ + name: "alive: true, K-Probe", + state: &State{alive: true}, + isAggressive: false, + wantStatus: http.StatusOK, + wantBody: aliveBody, + }, { + name: "alive: false, prober: true, K-Probe", + state: &State{alive: false}, + prober: func() bool { return true }, + isAggressive: false, + wantStatus: http.StatusOK, + wantBody: aliveBody, + }, { + name: "alive: false, prober: false, K-Probe", + state: &State{alive: false}, + prober: func() bool { return false }, + isAggressive: false, + wantStatus: http.StatusServiceUnavailable, + wantBody: notAliveBody, + }, { + name: "alive: false, no prober, K-Probe", + state: &State{alive: false}, + isAggressive: false, + wantStatus: http.StatusOK, + wantBody: aliveBody, + }, { + name: "shuttingDown: true, K-Probe", + state: &State{shuttingDown: true}, + isAggressive: false, + wantStatus: http.StatusServiceUnavailable, + wantBody: notAliveBody, + }, { + name: "no prober, shuttingDown: false", + state: &State{}, + isAggressive: true, + wantStatus: http.StatusOK, + wantBody: aliveBody, + }, { + name: "prober: true, shuttingDown: true", + state: &State{shuttingDown: true}, + prober: func() bool { return true }, + isAggressive: true, + wantStatus: http.StatusServiceUnavailable, + wantBody: notAliveBody, + }, { + name: "prober: true, shuttingDown: false", + state: &State{}, + prober: func() bool { return true }, + isAggressive: true, + wantStatus: http.StatusOK, + wantBody: aliveBody, + }, { + name: "prober: false, shuttingDown: false", + state: &State{}, + prober: func() bool { return false }, + isAggressive: true, + wantStatus: http.StatusServiceUnavailable, + wantBody: notAliveBody, + }, { + name: "prober: false, shuttingDown: true", + state: &State{}, + prober: func() bool { return false }, + isAggressive: true, + wantStatus: http.StatusServiceUnavailable, + wantBody: notAliveBody, + }, { + name: "alive: true, prober: false, shuttingDown: false", + state: &State{alive: true}, + prober: func() bool { return false }, + isAggressive: true, + wantStatus: http.StatusServiceUnavailable, + wantBody: notAliveBody, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rr := httptest.NewRecorder() + test.state.HandleHealthProbe(test.prober, test.isAggressive, rr) + + if rr.Code != test.wantStatus { + t.Errorf("handler returned wrong status code: got %v want %v", + rr.Code, test.wantStatus) + } + + if rr.Body.String() != test.wantBody { + t.Errorf("handler returned unexpected body: got %v want %v", + rr.Body.String(), test.wantBody) + } + }) + } +} + +func TestHealthStateDrainHandler(t *testing.T) { + state := &State{} + state.setAlive() + + req, err := http.NewRequest(http.MethodGet, "/", nil) + if err != nil { + t.Fatal(err) + } + + rr := httptest.NewRecorder() + + completedCh := make(chan struct{}, 1) + handler := http.HandlerFunc(state.DrainHandlerFunc()) + go func(handler http.Handler, recorder *httptest.ResponseRecorder) { + handler.ServeHTTP(recorder, req) + close(completedCh) + }(handler, rr) + + state.drainFinished() + <-completedCh + + if rr.Code != http.StatusOK { + t.Errorf("handler returned wrong status code: got %v want %v", + rr.Code, http.StatusOK) + } +} + +func TestHealthStateShutdown(t *testing.T) { + state := &State{} + state.setAlive() + state.drainCh = make(chan struct{}) + + calledCh := make(chan struct{}, 1) + state.Shutdown(func() { + close(calledCh) + }) + + // The channel should be closed as the cleaner is called. + select { + case <-calledCh: + case <-time.After(2 * time.Second): + t.Errorf("drain function not called when shutting down") + } + + if !state.drainCompleted { + t.Error("shutdown did not complete draining") + } + + if !state.shuttingDown { + t.Errorf("wrong shutdown state: got %v want %v", state.shuttingDown, true) + } + + if state.alive { + t.Errorf("wrong alive state: got %v want %v", state.alive, false) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/health/probe.go b/test/vendor/knative.dev/serving/pkg/queue/health/probe.go new file mode 100644 index 0000000000..7f22d85ecc --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/health/probe.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "time" + + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/pkg/network" +) + +// HTTPProbeConfigOptions holds the HTTP probe config options +type HTTPProbeConfigOptions struct { + Timeout time.Duration + *corev1.HTTPGetAction + KubeMajor string + KubeMinor string +} + +// TCPProbeConfigOptions holds the TCP probe config options +type TCPProbeConfigOptions struct { + SocketTimeout time.Duration + Address string +} + +// TCPProbe checks that a TCP socket to the address can be opened. +// Did not reuse k8s.io/kubernetes/pkg/probe/tcp to not create a dependency +// on klog. +func TCPProbe(config TCPProbeConfigOptions) error { + conn, err := net.DialTimeout("tcp", config.Address, config.SocketTimeout) + if err != nil { + return err + } + conn.Close() + return nil +} + +// HTTPProbe checks that HTTP connection can be established to the address. +func HTTPProbe(config HTTPProbeConfigOptions) error { + httpClient := &http.Client{ + Transport: &http.Transport{ + DisableKeepAlives: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + Timeout: config.Timeout, + } + url := url.URL{ + Scheme: string(config.Scheme), + Host: net.JoinHostPort(config.Host, config.Port.String()), + Path: config.Path, + } + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return fmt.Errorf("error constructing probe request %w", err) + } + + req.Header.Add(network.UserAgentKey, network.KubeProbeUAPrefix+config.KubeMajor+"/"+config.KubeMinor) + + for _, header := range config.HTTPHeaders { + req.Header.Add(header.Name, header.Value) + } + + res, err := httpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + if !IsHTTPProbeReady(res) { + return fmt.Errorf("HTTP probe did not respond Ready, got status code: %d", res.StatusCode) + } + + return nil +} + +// IsHTTPProbeReady checks whether we received a successful Response +func IsHTTPProbeReady(res *http.Response) bool { + if res == nil { + return false + } + + // response status code between 200-399 indicates success + return res.StatusCode >= 200 && res.StatusCode < 400 +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/health/probe_test.go b/test/vendor/knative.dev/serving/pkg/queue/health/probe_test.go new file mode 100644 index 0000000000..1df000b760 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/health/probe_test.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package health + +import ( + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/network" +) + +func TestTCPProbe(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + config := TCPProbeConfigOptions{ + Address: server.Listener.Addr().String(), + SocketTimeout: time.Second, + } + // Connecting to the server should work + if err := TCPProbe(config); err != nil { + t.Errorf("Probe failed with: %v", err) + } + + // Close the server so probing fails afterwards + server.Close() + if err := TCPProbe(config); err == nil { + t.Error("Expected probe to fail but it didn't") + } +} + +func TestHTTPProbeSuccess(t *testing.T) { + var gotHeader corev1.HTTPHeader + var gotKubeletHeader bool + expectedHeader := corev1.HTTPHeader{ + Name: "Testkey", + Value: "Testval", + } + var gotPath string + expectedPath := "/health" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for headerKey, headerValue := range r.Header { + // Filtering for expectedHeader.TestKey to avoid other HTTP probe headers + if expectedHeader.Name == headerKey { + gotHeader = corev1.HTTPHeader{Name: headerKey, Value: headerValue[0]} + } + + if headerKey == "User-Agent" && strings.HasPrefix(headerValue[0], network.KubeProbeUAPrefix) { + gotKubeletHeader = true + } + } + + gotPath = r.URL.Path + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + httpGetAction := newHTTPGetAction(t, server.URL) + httpGetAction.Path = expectedPath + httpGetAction.HTTPHeaders = []corev1.HTTPHeader{expectedHeader} + + config := HTTPProbeConfigOptions{ + Timeout: time.Second, + HTTPGetAction: httpGetAction, + } + // Connecting to the server should work + if err := HTTPProbe(config); err != nil { + t.Errorf("Expected probe to succeed but it failed with %v", err) + } + if d := cmp.Diff(gotHeader, expectedHeader); d != "" { + t.Errorf("Expected probe headers to match but got %s", d) + } + if !gotKubeletHeader { + t.Errorf("Expected kubelet probe header to be added to request") + } + if !cmp.Equal(gotPath, expectedPath) { + t.Errorf("Expected %s path to match but got %s", expectedPath, gotPath) + } + // Close the server so probing fails afterwards + server.Close() + if err := HTTPProbe(config); err == nil { + t.Error("Expected probe to fail but it didn't") + } +} + +func TestHTTPsSchemeProbeSuccess(t *testing.T) { + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + config := HTTPProbeConfigOptions{ + Timeout: time.Second, + HTTPGetAction: newHTTPGetAction(t, server.URL), + } + // Connecting to the server should work + if err := HTTPProbe(config); err != nil { + t.Errorf("Expected probe to succeed but failed with error %v", err) + } + + // Close the server so probing fails afterwards + server.Close() + if err := HTTPProbe(config); err == nil { + t.Error("Expected probe to fail but it didn't") + } +} + +func TestHTTPProbeTimeoutFailure(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(2 * time.Second) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + config := HTTPProbeConfigOptions{ + Timeout: time.Second, + HTTPGetAction: newHTTPGetAction(t, server.URL), + } + if err := HTTPProbe(config); err == nil { + t.Error("Expected probe to fail but it succeeded") + } +} + +func TestHTTPProbeResponseStatusCodeFailure(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + })) + defer server.Close() + + config := HTTPProbeConfigOptions{ + Timeout: time.Second, + HTTPGetAction: newHTTPGetAction(t, server.URL), + } + if err := HTTPProbe(config); err == nil { + t.Error("Expected probe to fail but it succeeded") + } +} + +func TestHTTPProbeResponseErrorFailure(t *testing.T) { + config := HTTPProbeConfigOptions{ + HTTPGetAction: newHTTPGetAction(t, "http://localhost:0"), + } + if err := HTTPProbe(config); err == nil { + t.Error("Expected probe to fail but it succeeded") + } +} + +func newHTTPGetAction(t *testing.T, serverURL string) *corev1.HTTPGetAction { + urlParsed, err := url.Parse(serverURL) + if err != nil { + t.Fatalf("Error parsing URL") + } + port := intstr.FromString(urlParsed.Port()) + + var uriScheme corev1.URIScheme + switch urlParsed.Scheme { + case "http": + uriScheme = corev1.URISchemeHTTP + case "https": + uriScheme = corev1.URISchemeHTTPS + default: + t.Fatalf("Unsupported scheme %s", urlParsed.Scheme) + } + + return &corev1.HTTPGetAction{ + Host: urlParsed.Hostname(), + Port: port, + Scheme: uriScheme, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter.go b/test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter.go new file mode 100644 index 0000000000..eb301245b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter.go @@ -0,0 +1,142 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "errors" + "fmt" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +const ( + destinationNsLabel = "destination_namespace" + destinationConfigLabel = "destination_configuration" + destinationRevLabel = "destination_revision" + destinationPodLabel = "destination_pod" +) + +var ( + metricLabelNames = []string{ + destinationNsLabel, + destinationConfigLabel, + destinationRevLabel, + destinationPodLabel, + } + + // For backwards compatibility, the name is kept as `operations_per_second`. + requestsPerSecondGV = newGV( + "queue_requests_per_second", + "Number of requests per second") + proxiedRequestsPerSecondGV = newGV( + "queue_proxied_operations_per_second", + "Number of proxied requests per second") + averageConcurrentRequestsGV = newGV( + "queue_average_concurrent_requests", + "Number of requests currently being handled by this pod") + averageProxiedConcurrentRequestsGV = newGV( + "queue_average_proxied_concurrent_requests", + "Number of proxied requests currently being handled by this pod") + processUptimeGV = newGV( + "process_uptime", + "The number of seconds that the process has been up") +) + +func newGV(n, h string) *prometheus.GaugeVec { + return prometheus.NewGaugeVec( + prometheus.GaugeOpts{Name: n, Help: h}, + metricLabelNames, + ) +} + +// PrometheusStatsReporter structure represents a prometheus stats reporter. +type PrometheusStatsReporter struct { + handler http.Handler + reportingPeriod time.Duration + startTime time.Time + + requestsPerSecond prometheus.Gauge + proxiedRequestsPerSecond prometheus.Gauge + averageConcurrentRequests prometheus.Gauge + averageProxiedConcurrentRequests prometheus.Gauge + processUptime prometheus.Gauge +} + +// NewPrometheusStatsReporter creates a reporter that collects and reports queue metrics. +func NewPrometheusStatsReporter(namespace, config, revision, pod string, reportingPeriod time.Duration) (*PrometheusStatsReporter, error) { + if namespace == "" { + return nil, errors.New("namespace must not be empty") + } + if config == "" { + return nil, errors.New("config must not be empty") + } + if revision == "" { + return nil, errors.New("revision must not be empty") + } + if pod == "" { + return nil, errors.New("pod must not be empty") + } + + registry := prometheus.NewRegistry() + for _, gv := range []*prometheus.GaugeVec{ + requestsPerSecondGV, proxiedRequestsPerSecondGV, + averageConcurrentRequestsGV, averageProxiedConcurrentRequestsGV, + processUptimeGV} { + if err := registry.Register(gv); err != nil { + return nil, fmt.Errorf("register metric failed: %w", err) + } + } + + labels := prometheus.Labels{ + destinationNsLabel: namespace, + destinationConfigLabel: config, + destinationRevLabel: revision, + destinationPodLabel: pod, + } + + return &PrometheusStatsReporter{ + handler: promhttp.HandlerFor(registry, promhttp.HandlerOpts{}), + reportingPeriod: reportingPeriod, + startTime: time.Now(), + + requestsPerSecond: requestsPerSecondGV.With(labels), + proxiedRequestsPerSecond: proxiedRequestsPerSecondGV.With(labels), + averageConcurrentRequests: averageConcurrentRequestsGV.With(labels), + averageProxiedConcurrentRequests: averageProxiedConcurrentRequestsGV.With(labels), + processUptime: processUptimeGV.With(labels), + }, nil +} + +// Report captures request metrics. +func (r *PrometheusStatsReporter) Report(acr float64, apcr float64, rc float64, prc float64) { + // Requests per second is a rate over time while concurrency is not. + rp := r.reportingPeriod.Seconds() + r.requestsPerSecond.Set(rc / rp) + r.proxiedRequestsPerSecond.Set(prc / rp) + r.averageConcurrentRequests.Set(acr) + r.averageProxiedConcurrentRequests.Set(apcr) + r.processUptime.Set(time.Since(r.startTime).Seconds()) +} + +// Handler returns an uninstrumented http.Handler used to serve stats registered by this +// PrometheusStatsReporter. +func (r *PrometheusStatsReporter) Handler() http.Handler { + return r.handler +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter_test.go b/test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter_test.go new file mode 100644 index 0000000000..1781ee9c4f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/prometheus_stats_reporter_test.go @@ -0,0 +1,202 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "errors" + "math" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + dto "github.com/prometheus/client_model/go" +) + +const ( + namespace = "default" + config = "helloworld-go" + revision = "helloworld-go-00001" + pod = "helloworld-go-00001-deployment-8ff587cc9-7g9gc" + + // Except for uptime everything else is integers, so this precision is + // good enough for the tests. + precision = 0.1 +) + +func TestNewPrometheusStatsReporter_negative(t *testing.T) { + tests := []struct { + name string + errorMsg string + result error + namespace string + config string + revision string + pod string + }{ + { + "Empty_Namespace_Value", + "Expected namespace empty error", + errors.New("namespace must not be empty"), + "", + config, + revision, + pod, + }, + { + "Empty_Config_Value", + "Expected config empty error", + errors.New("config must not be empty"), + namespace, + "", + revision, + pod, + }, + { + "Empty_Revision_Value", + "Expected revision empty error", + errors.New("revision must not be empty"), + namespace, + config, + "", + pod, + }, + { + "Empty_Pod_Value", + "Expected pod empty error", + errors.New("pod must not be empty"), + namespace, + config, + revision, + "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if _, err := NewPrometheusStatsReporter(test.namespace, test.config, test.revision, test.pod, 1*time.Second); err.Error() != test.result.Error() { + t.Errorf("Got error msg from NewPrometheusStatsReporter(): '%+v', wanted '%+v'", err, test.errorMsg) + } + }) + } +} + +func TestReporterReport(t *testing.T) { + tests := []struct { + name string + reportingPeriod time.Duration + + concurrency float64 + proxiedConcurrency float64 + reqCount float64 + proxiedReqCount float64 + + expectedReqCount float64 + expectedProxiedRequestCount float64 + expectedConcurrency float64 + expectedProxiedConcurrency float64 + }{{ + name: "no proxy requests", + reportingPeriod: 1 * time.Second, + + reqCount: 39, + concurrency: 3, + + expectedReqCount: 39, + expectedConcurrency: 3, + expectedProxiedRequestCount: 0, + expectedProxiedConcurrency: 0, + }, { + name: "reportingPeriod=10s", + reportingPeriod: 10 * time.Second, + + reqCount: 39, + concurrency: 3, + proxiedReqCount: 15, + proxiedConcurrency: 2, + + expectedReqCount: 3.9, + expectedConcurrency: 3, + expectedProxiedRequestCount: 1.5, + expectedProxiedConcurrency: 2, + }, { + name: "reportingPeriod=2s", + reportingPeriod: 2 * time.Second, + + reqCount: 39, + concurrency: 3, + proxiedReqCount: 15, + proxiedConcurrency: 2, + + expectedReqCount: 19.5, + expectedConcurrency: 3, + expectedProxiedRequestCount: 7.5, + expectedProxiedConcurrency: 2, + }, { + name: "reportingPeriod=1s", + reportingPeriod: 1 * time.Second, + + reqCount: 39, + concurrency: 3, + proxiedReqCount: 15, + proxiedConcurrency: 2, + + expectedReqCount: 39, + expectedConcurrency: 3, + expectedProxiedRequestCount: 15, + expectedProxiedConcurrency: 2, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + reporter, err := NewPrometheusStatsReporter(namespace, config, revision, pod, test.reportingPeriod) + if err != nil { + t.Errorf("Something went wrong with creating a reporter, '%v'.", err) + } + // Make the value slightly more interesting, rather than microseconds. + reporter.startTime = reporter.startTime.Add(-5 * time.Second) + reporter.Report(test.concurrency, test.proxiedConcurrency, test.reqCount, test.proxiedReqCount) + checkData(t, requestsPerSecondGV, test.expectedReqCount) + checkData(t, averageConcurrentRequestsGV, test.expectedConcurrency) + checkData(t, proxiedRequestsPerSecondGV, test.expectedProxiedRequestCount) + checkData(t, averageProxiedConcurrentRequestsGV, test.expectedProxiedConcurrency) + checkData(t, processUptimeGV, 5.0) + }) + } +} + +func checkData(t *testing.T, gv *prometheus.GaugeVec, want float64) { + t.Helper() + g, err := gv.GetMetricWith(prometheus.Labels{ + destinationNsLabel: namespace, + destinationConfigLabel: config, + destinationRevLabel: revision, + destinationPodLabel: pod, + }) + if err != nil { + t.Fatalf("GaugeVec.GetMetricWith() error = %v", err) + } + + m := dto.Metric{} + if err := g.Write(&m); err != nil { + t.Fatalf("Gauge.Write() error = %v", err) + } + if got := *m.Gauge.Value; math.Abs(got-want) > precision { + t.Errorf("Got %v for Gauge value, wanted %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/readiness/probe.go b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe.go new file mode 100644 index 0000000000..deb24c67c7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readiness + +import ( + "fmt" + "os" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/serving/pkg/queue/health" +) + +const ( + aggressiveProbeTimeout = 100 * time.Millisecond + // PollTimeout is set equal to the queue-proxy's ExecProbe timeout to take + // advantage of the full window + PollTimeout = 10 * time.Second + retryInterval = 50 * time.Millisecond +) + +// Probe wraps a corev1.Probe along with a count of consecutive, successful probes +type Probe struct { + *corev1.Probe + count int32 +} + +// NewProbe returns a pointer a new Probe +func NewProbe(v1p *corev1.Probe) *Probe { + return &Probe{ + Probe: v1p, + } +} + +// IsAggressive indicates whether the Knative probe with aggressive retries should be used. +func (p *Probe) IsAggressive() bool { + return p.PeriodSeconds == 0 +} + +// ProbeContainer executes the defined Probe against the user-container +func (p *Probe) ProbeContainer() bool { + var err error + + switch { + case p.HTTPGet != nil: + err = p.httpProbe() + case p.TCPSocket != nil: + err = p.tcpProbe() + case p.Exec != nil: + // Should never be reachable. Exec probes to be translated to + // TCP probes when container is built. + // Using Fprintf for a concise error message in the event log. + fmt.Fprintln(os.Stderr, "exec probe not supported") + return false + default: + // Using Fprintf for a concise error message in the event log. + fmt.Fprintln(os.Stderr, "no probe found") + return false + } + + if err != nil { + // Using Fprintf for a concise error message in the event log. + fmt.Fprint(os.Stderr, err.Error()) + return false + } + return true +} + +func (p *Probe) doProbe(probe func(time.Duration) error) error { + if p.IsAggressive() { + return wait.PollImmediate(retryInterval, PollTimeout, func() (bool, error) { + if tcpErr := probe(aggressiveProbeTimeout); tcpErr != nil { + // reset count of consecutive successes to zero + p.count = 0 + return false, nil + } + + p.count++ + + // return success if count of consecutive successes is equal to or greater + // than the probe's SuccessThreshold. + return p.Count() >= p.SuccessThreshold, nil + }) + } + + return probe(time.Duration(p.TimeoutSeconds) * time.Second) +} + +// tcpProbe function executes TCP probe once if its standard probe +// otherwise TCP probe polls condition function which returns true +// if the probe count is greater than success threshold and false if TCP probe fails +func (p *Probe) tcpProbe() error { + config := health.TCPProbeConfigOptions{ + Address: p.TCPSocket.Host + ":" + p.TCPSocket.Port.String(), + } + + return p.doProbe(func(to time.Duration) error { + config.SocketTimeout = to + return health.TCPProbe(config) + }) +} + +// httpProbe function executes HTTP probe once if its standard probe +// otherwise HTTP probe polls condition function which returns true +// if the probe count is greater than success threshold and false if HTTP probe fails +func (p *Probe) httpProbe() error { + config := health.HTTPProbeConfigOptions{ + HTTPGetAction: p.HTTPGet, + } + + return p.doProbe(func(to time.Duration) error { + config.Timeout = to + return health.HTTPProbe(config) + }) +} + +// Count function fetches current probe count +func (p *Probe) Count() int32 { + return p.count +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding.go b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding.go new file mode 100644 index 0000000000..4f79b2ffb2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readiness + +import ( + "encoding/json" + "errors" + + corev1 "k8s.io/api/core/v1" +) + +// DecodeProbe takes a json serialised *corev1.Probe and returns a Probe or an error. +func DecodeProbe(jsonProbe string) (*corev1.Probe, error) { + p := &corev1.Probe{} + if err := json.Unmarshal([]byte(jsonProbe), p); err != nil { + return nil, err + } + return p, nil +} + +// EncodeProbe takes *corev1.Probe object and returns marshalled Probe JSON string and an error. +func EncodeProbe(rp *corev1.Probe) (string, error) { + if rp == nil { + return "", errors.New("cannot encode nil probe") + } + + probeJSON, err := json.Marshal(rp) + if err != nil { + return "", err + } + return string(probeJSON), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding_test.go b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding_test.go new file mode 100644 index 0000000000..7ae97b4ada --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_encoding_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package readiness + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestParseProbeSuccess(t *testing.T) { + expectedProbe := &corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromString("8080"), + }, + }, + } + probeBytes, err := json.Marshal(expectedProbe) + if err != nil { + t.Fatalf("Failed to decode probe %#v", err) + } + gotProbe, err := DecodeProbe(string(probeBytes)) + if err != nil { + t.Fatalf("Failed DecodeProbe() %#v", err) + } + if d := cmp.Diff(gotProbe, expectedProbe); d != "" { + t.Errorf("Probe diff %s; got %v, want %v", d, gotProbe, expectedProbe) + } +} + +func TestParseProbeFailure(t *testing.T) { + probeBytes, err := json.Marshal("wrongProbeObject") + if err != nil { + t.Fatalf("Failed to decode probe %#v", err) + } + _, err = DecodeProbe(string(probeBytes)) + if err == nil { + t.Fatal("Expected DecodeProbe() to fail") + } +} + +func TestEncodeProbe(t *testing.T) { + probe := &corev1.Probe{ + SuccessThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromString("8080"), + }, + }, + } + + jsonProbe, err := EncodeProbe(probe) + + if err != nil { + t.Fatalf("Expected no errer, got: %#v", err) + } + + wantProbe := `{"tcpSocket":{"port":"8080","host":"127.0.0.1"},"successThreshold":1}` + + if diff := cmp.Diff(jsonProbe, wantProbe); diff != "" { + t.Errorf("Probe diff: %s; got %v, want %v", diff, jsonProbe, wantProbe) + } +} + +func TestEncodeNilProbe(t *testing.T) { + jsonProbe, err := EncodeProbe(nil) + + if err == nil { + t.Errorf("Expected error") + } + + if jsonProbe != "" { + t.Errorf("Expected empty probe string; got %s", jsonProbe) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_test.go b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_test.go new file mode 100644 index 0000000000..22362a6443 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/readiness/probe_test.go @@ -0,0 +1,583 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package readiness + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestNewProbe(t *testing.T) { + v1p := &corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }, + } + + p := NewProbe(v1p) + + if diff := cmp.Diff(p.Probe, v1p); diff != "" { + t.Errorf("NewProbe (-want, +got) = %v", diff) + } + + if c := p.Count(); c != 0 { + t.Errorf("Expected Probe.Count == 0, got: %d", c) + } +} + +func TestTCPFailure(t *testing.T) { + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }, + }) + + if pb.ProbeContainer() { + t.Error("Reported success when no server was available for connection") + } +} + +func TestEmptyHandler(t *testing.T) { + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{}, + }) + + if pb.ProbeContainer() { + t.Error("Reported success when no handler was configured.") + } +} + +func TestExecHandler(t *testing.T) { + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"echo", "hello"}, + }}, + }) + + if pb.ProbeContainer() { + t.Error("Expected ExecProbe to always fail") + } +} + +func TestTCPSuccess(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + t.Log("Port", tsURL.Port()) + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Probe report failure. Expected success.") + } +} + +func TestHTTPFailureToConnect(t *testing.T) { + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if pb.ProbeContainer() { + t.Error("Reported success when no server was available for connection") + } +} + +func TestHTTPBadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if pb.ProbeContainer() { + t.Error("Reported success when server replied with Bad Request") + } +} + +func TestHTTPSuccess(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Probe failed. Expected success.") + } +} + +func TestHTTPTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(3 * time.Second) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + }, + }, + }) + + if pb.ProbeContainer() { + t.Error("Probe succeeded. Expected failure due to timeout.") + } +} + +func TestHTTPSuccessWithDelay(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(200 * time.Millisecond) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 1, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 1, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Probe failed. Wanted success.") + } +} + +func TestKnHTTPSuccessWithRetry(t *testing.T) { + var count int32 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Fail the very first request. + if atomic.AddInt32(&count, 1) == 1 { + w.WriteHeader(http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: 1, + FailureThreshold: 0, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Probe failed. Expected success after retry.") + } +} + +func TestKnHTTPSuccessWithThreshold(t *testing.T) { + var count int32 + var threshold int32 = 3 + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddInt32(&count, 1) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: threshold, + FailureThreshold: 0, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Expected success after second attempt.") + } + + if atomic.LoadInt32(&count) < threshold { + t.Errorf("Expected %d requests before reporting success", threshold) + } +} + +func TestKnHTTPSuccessWithThresholdAndFailure(t *testing.T) { + var count int32 + var threshold int32 = 3 + var requestFailure int32 = 2 + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if atomic.AddInt32(&count, 1) == requestFailure { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: threshold, + FailureThreshold: 0, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + HTTPHeaders: []corev1.HTTPHeader{{ + Name: "Test-key", + Value: "Test-value", + }}, + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Expected success.") + } + + if atomic.LoadInt32(&count) < threshold+requestFailure { + t.Errorf("Wanted %d requests before reporting success, got=%d", threshold+requestFailure, count) + } +} + +func TestKnHTTPTimeoutFailure(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %s: %v", ts.URL, err) + } + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: 1, + FailureThreshold: 0, + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: tsURL.Hostname(), + Port: intstr.FromString(tsURL.Port()), + Scheme: corev1.URISchemeHTTP, + }, + }, + }) + + if pb.ProbeContainer() { + t.Error("Probe succeeded. Expected failure due to timeout.") + } +} + +func TestKnTCPProbeSuccess(t *testing.T) { + listener, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Error setting up tcp listener: %v", err) + } + defer listener.Close() + addr := listener.Addr().(*net.TCPAddr) + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: 1, + FailureThreshold: 0, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(addr.Port), + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Got probe error. Wanted success.") + } +} + +func TestKnUnimplementedProbe(t *testing.T) { + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: 1, + FailureThreshold: 0, + Handler: corev1.Handler{}, + }) + + if pb.ProbeContainer() { + t.Error("Got probe success. Wanted failure.") + } +} +func TestKnTCPProbeFailure(t *testing.T) { + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: 1, + FailureThreshold: 0, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }, + }) + + if pb.ProbeContainer() { + t.Error("Got probe success. Wanted failure.") + } +} + +func TestKnTCPProbeSuccessWithThreshold(t *testing.T) { + listener, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Error setting up tcp listener: %v", err) + } + defer listener.Close() + addr := listener.Addr().(*net.TCPAddr) + + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: 3, + FailureThreshold: 0, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(addr.Port), + }, + }, + }) + + if !pb.ProbeContainer() { + t.Error("Got probe error. Wanted success.") + } + + if pb.Count() < 3 { + t.Errorf("Expected count to be 3, go %d", pb.Count()) + } +} + +func TestKnTCPProbeSuccessThresholdIncludesFailure(t *testing.T) { + listener, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("Error setting up tcp listener: %v", err) + } + addr := listener.Addr().(*net.TCPAddr) + + var successThreshold int32 = 3 + pb := NewProbe(&corev1.Probe{ + PeriodSeconds: 0, + TimeoutSeconds: 0, + SuccessThreshold: successThreshold, + FailureThreshold: 0, + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(addr.Port), + }, + }, + }) + + connCount := 0 + desiredConnCount := 4 // 1 conn from 1st server, 3 from 2nd server + + errChan := make(chan bool, 1) + go func() { + errChan <- pb.ProbeContainer() + }() + + if _, err = listener.Accept(); err != nil { + t.Fatalf("Failed to accept TCP conn: %v", err) + } + connCount++ + + // Close server and sleep to give probe time to fail a few times + // and reset count + listener.Close() + time.Sleep(500 * time.Millisecond) + + listener2, err := net.Listen("tcp", fmt.Sprintf(":%d", addr.Port)) + if err != nil { + t.Fatalf("Error setting up tcp listener: %v", err) + } + + for { + if connCount < desiredConnCount { + if _, err = listener2.Accept(); err != nil { + t.Fatalf("Failed to accept TCP conn: %v", err) + } + connCount++ + } else { + listener2.Close() + break + } + } + + if probeErr := <-errChan; !probeErr { + t.Error("Wanted ProbeContainer() successed but got error") + } + if pb.Count() < successThreshold { + t.Errorf("Expected count to be %d but got %d", successThreshold, pb.Count()) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/request_metric.go b/test/vendor/knative.dev/serving/pkg/queue/request_metric.go new file mode 100644 index 0000000000..53b1bdf4ea --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/request_metric.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "errors" + "net/http" + "time" + + pkghttp "knative.dev/serving/pkg/http" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue/stats" +) + +type requestMetricHandler struct { + handler http.Handler + statsReporter stats.StatsReporter + breaker *Breaker +} + +// NewRequestMetricHandler creates an http.Handler that emits request metrics. +func NewRequestMetricHandler(h http.Handler, r stats.StatsReporter, b *Breaker) (http.Handler, error) { + if r == nil { + return nil, errors.New("StatsReporter must not be nil") + } + + return &requestMetricHandler{ + handler: h, + statsReporter: r, + breaker: b, + }, nil +} + +func (h *requestMetricHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rr := pkghttp.NewResponseRecorder(w, http.StatusOK) + startTime := time.Now() + if h.breaker != nil { + h.statsReporter.ReportQueueDepth(h.breaker.InFlight()) + } + + defer func() { + // Filter probe requests for revision metrics. + if network.IsProbe(r) { + return + } + + // If ServeHTTP panics, recover, record the failure and panic again. + err := recover() + latency := time.Since(startTime) + if err != nil { + h.sendRequestMetrics(http.StatusInternalServerError, latency) + panic(err) + } + h.sendRequestMetrics(rr.ResponseCode, latency) + }() + + h.handler.ServeHTTP(rr, r) +} + +func (h *requestMetricHandler) sendRequestMetrics(respCode int, latency time.Duration) { + h.statsReporter.ReportRequestCount(respCode) + h.statsReporter.ReportResponseTime(respCode, latency) +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/request_metric_test.go b/test/vendor/knative.dev/serving/pkg/queue/request_metric_test.go new file mode 100644 index 0000000000..866c8c13a9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/request_metric_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + "time" + + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue/stats" +) + +func TestNewRequestMetricHandlerFailure(t *testing.T) { + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + var r stats.StatsReporter + _, err := NewRequestMetricHandler(baseHandler, r, nil) + if err == nil { + t.Error("should get error when StatsReporter is empty") + } +} + +func TestRequestMetricHandler(t *testing.T) { + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + for _, b := range []*Breaker{nil, NewBreaker(BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 1})} { + r := &fakeStatsReporter{} + // No breaker is fine. + handler, err := NewRequestMetricHandler(baseHandler, r, b) + if err != nil { + t.Fatalf("failed to create handler: %v", err) + } + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("test")) + handler.ServeHTTP(resp, req) + + // Serve one request, should get 1 request count and none zero latency + if got, want := r.reqCountReportTimes, 1; got != want { + t.Errorf("ReportRequestCount was triggered %v times, want %v", got, want) + } + if got, want := r.respTimeReportTimes, 1; got != want { + t.Errorf("ReportResponseTime was triggered %v times, want %v", got, want) + } + if got, want := r.lastRespCode, http.StatusOK; got != want { + t.Errorf("response code got %v, want %v", got, want) + } + if got, want := r.lastReqCount, 1; got != int64(want) { + t.Errorf("request count got %v, want %v", got, want) + } + if r.lastReqLatency == 0 { + t.Errorf("request latency got %v, want larger than 0", r.lastReqLatency) + } + wantQD := 0 + if b != nil { + wantQD++ + } + if got, want := r.queueDepthTimes, wantQD; got != want { + t.Errorf("QueueDepth report count = %d, want: %d", got, want) + } + + // A probe request should not be recorded. + req.Header.Set(network.ProbeHeaderName, "activator") + handler.ServeHTTP(resp, req) + if got, want := r.reqCountReportTimes, 1; got != want { + t.Errorf("ReportRequestCount was triggered %v times, want %v", got, want) + } + if got, want := r.respTimeReportTimes, 1; got != want { + t.Errorf("ReportResponseTime was triggered %v times, want %v", got, want) + } + } +} + +func TestRequestMetricHandlerPanickingHandler(t *testing.T) { + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + panic("no!") + }) + r := &fakeStatsReporter{} + handler, err := NewRequestMetricHandler(baseHandler, r, nil) + if err != nil { + t.Fatalf("Failed to create handler: %v", err) + } + + resp := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "http://example.com", bytes.NewBufferString("test")) + defer func() { + err := recover() + if err == nil { + t.Error("Want ServeHTTP to panic, got nothing.") + } + + // Serve one request, should get 1 request count and none zero latency + if got, want := r.lastRespCode, http.StatusInternalServerError; got != want { + t.Errorf("Response code got %v, want %v", got, want) + } + if got, want := r.lastReqCount, int64(1); got != want { + t.Errorf("Request count got %d, want %d", got, want) + } + if r.lastReqLatency == 0 { + t.Errorf("Request latency got %v, want larger than 0", r.lastReqLatency) + } + }() + handler.ServeHTTP(resp, req) +} + +// fakeStatsReporter just record the last stat it received and the times it +// calls ReportRequestCount and ReportResponseTime +type fakeStatsReporter struct { + reqCountReportTimes int + respTimeReportTimes int + queueDepthTimes int + lastRespCode int + lastReqCount int64 + lastReqLatency time.Duration +} + +func (r *fakeStatsReporter) ReportQueueDepth(qd int) error { + r.queueDepthTimes++ + return nil +} + +func (r *fakeStatsReporter) ReportRequestCount(responseCode int) error { + r.reqCountReportTimes++ + r.lastRespCode = responseCode + r.lastReqCount = 1 + return nil +} + +func (r *fakeStatsReporter) ReportResponseTime(responseCode int, d time.Duration) error { + r.respTimeReportTimes++ + r.lastRespCode = responseCode + r.lastReqLatency = d + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/stats.go b/test/vendor/knative.dev/serving/pkg/queue/stats.go new file mode 100644 index 0000000000..2878136a27 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/stats.go @@ -0,0 +1,119 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "time" +) + +// ReqEvent represents either an incoming or closed request. +type ReqEvent struct { + Time time.Time + EventType ReqEventType +} + +// ReqEventType denotes the type (incoming/closed) of a ReqEvent. +type ReqEventType int + +const ( + // ReqIn represents an incoming request + ReqIn ReqEventType = iota + // ReqOut represents a finished request + ReqOut + // ProxiedIn represents an incoming request through a proxy. + ProxiedIn + // ProxiedOut represents a finished proxied request. + ProxiedOut +) + +// NewStats instantiates a new instance of Stats. +func NewStats(startedAt time.Time, reqCh chan ReqEvent, reportCh <-chan time.Time, report func(float64, float64, float64, float64)) { + go func() { + var ( + requestCount float64 + proxiedCount float64 + concurrency int + proxiedConcurrency int + ) + + lastChange := startedAt + timeOnConcurrency := make(map[int]time.Duration) + timeOnProxiedConcurrency := make(map[int]time.Duration) + + // Updates the lastChanged/timeOnConcurrency state + // Note: due to nature of the channels used below, the ReportChan + // can race the ReqChan, thus an event can arrive that has a lower + // timestamp than `lastChange`. This is ignored, since it only makes + // for very slight differences. + updateState := func(concurrency int, time time.Time) { + if durationSinceChange := time.Sub(lastChange); durationSinceChange > 0 { + timeOnConcurrency[concurrency] += durationSinceChange + timeOnProxiedConcurrency[proxiedConcurrency] += durationSinceChange + lastChange = time + } + } + + for { + select { + case event := <-reqCh: + updateState(concurrency, event.Time) + + switch event.EventType { + case ProxiedIn: + proxiedConcurrency++ + proxiedCount++ + fallthrough + case ReqIn: + requestCount++ + concurrency++ + case ProxiedOut: + proxiedConcurrency-- + fallthrough + case ReqOut: + concurrency-- + } + case now := <-reportCh: + updateState(concurrency, now) + + report(weightedAverage(timeOnConcurrency), weightedAverage(timeOnProxiedConcurrency), requestCount, proxiedCount) + + // Reset the stat counts which have been reported. + timeOnConcurrency = map[int]time.Duration{} + timeOnProxiedConcurrency = map[int]time.Duration{} + requestCount = 0 + proxiedCount = 0 + } + } + }() +} + +func weightedAverage(times map[int]time.Duration) float64 { + // The sum of times cannot be 0, since `updateState` above only + // pemits positive durations. + if len(times) == 0 { + return 0 + } + var totalTimeUsed time.Duration + for _, val := range times { + totalTimeUsed += val + } + sum := 0.0 + for c, val := range times { + sum += float64(c) * val.Seconds() + } + return sum / totalTimeUsed.Seconds() +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter.go b/test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter.go new file mode 100644 index 0000000000..52c994afe5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "context" + "errors" + "strconv" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + pkgmetrics "knative.dev/pkg/metrics" + "knative.dev/pkg/metrics/metricskey" + "knative.dev/serving/pkg/metrics" +) + +// NOTE: 0 should not be used as boundary. See +// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/98 +var defaultLatencyDistribution = view.Distribution(5, 10, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + +// StatsReporter defines the interface for sending queue proxy metrics. +type StatsReporter interface { + ReportRequestCount(responseCode int) error + ReportResponseTime(responseCode int, d time.Duration) error + ReportQueueDepth(depth int) error +} + +// Reporter holds cached metric objects to report queue proxy metrics. +type Reporter struct { + initialized bool + ctx context.Context + countMetric *stats.Int64Measure + latencyMetric *stats.Float64Measure + queueSizeMetric *stats.Int64Measure // NB: this can be nil, depending on the reporter. +} + +// NewStatsReporter creates a reporter that collects and reports queue proxy metrics. +func NewStatsReporter(ns, service, config, rev, pod string, countMetric *stats.Int64Measure, + latencyMetric *stats.Float64Measure, queueSizeMetric *stats.Int64Measure) (*Reporter, error) { + if ns == "" { + return nil, errors.New("namespace must not be empty") + } + if config == "" { + return nil, errors.New("config must not be empty") + } + if rev == "" { + return nil, errors.New("revision must not be empty") + } + + keys := append(metrics.CommonRevisionKeys, metrics.PodTagKey, metrics.ContainerTagKey, metrics.ResponseCodeKey, metrics.ResponseCodeClassKey) + // Create view to see our measurements. + if err := view.Register( + &view.View{ + Description: "The number of requests that are routed to queue-proxy", + Measure: countMetric, + Aggregation: view.Count(), + TagKeys: keys, + }, + &view.View{ + Description: "The response time in millisecond", + Measure: latencyMetric, + Aggregation: defaultLatencyDistribution, + TagKeys: keys, + }, + ); err != nil { + return nil, err + } + // If queue size reporter is provided register the view for it too. + if queueSizeMetric != nil { + if err := view.Register( + &view.View{ + Description: "The number of items queued at this queue proxy.", + Measure: queueSizeMetric, + Aggregation: view.LastValue(), + TagKeys: keys, + }); err != nil { + return nil, err + } + } + + // Note that service name can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + context.Background(), + tag.Upsert(metrics.NamespaceTagKey, ns), + tag.Upsert(metrics.ServiceTagKey, valueOrUnknown(service)), + tag.Upsert(metrics.ConfigTagKey, config), + tag.Upsert(metrics.RevisionTagKey, rev), + tag.Upsert(metrics.PodTagKey, pod), + tag.Upsert(metrics.ContainerTagKey, "queue-proxy"), + ) + if err != nil { + return nil, err + } + + return &Reporter{ + initialized: true, + ctx: ctx, + countMetric: countMetric, + latencyMetric: latencyMetric, + queueSizeMetric: queueSizeMetric, + }, nil +} + +func valueOrUnknown(v string) string { + if v != "" { + return v + } + return metricskey.ValueUnknown +} + +// ReportRequestCount captures request count metric. +func (r *Reporter) ReportRequestCount(responseCode int) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + // Note that service names can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + r.ctx, + tag.Upsert(metrics.ResponseCodeKey, strconv.Itoa(responseCode)), + tag.Upsert(metrics.ResponseCodeClassKey, responseCodeClass(responseCode))) + if err != nil { + return err + } + + pkgmetrics.Record(ctx, r.countMetric.M(1)) + return nil +} + +// ReportQueueDepth captures queue depth metric. +func (r *Reporter) ReportQueueDepth(d int) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + pkgmetrics.Record(r.ctx, r.queueSizeMetric.M(int64(d))) + return nil +} + +// ReportResponseTime captures response time requests +func (r *Reporter) ReportResponseTime(responseCode int, d time.Duration) error { + if !r.initialized { + return errors.New("StatsReporter is not initialized yet") + } + + // Note that service names can be an empty string, so it needs a special treatment. + ctx, err := tag.New( + r.ctx, + tag.Upsert(metrics.ResponseCodeKey, strconv.Itoa(responseCode)), + tag.Upsert(metrics.ResponseCodeClassKey, responseCodeClass(responseCode))) + if err != nil { + return err + } + + pkgmetrics.Record(ctx, r.latencyMetric.M(float64(d.Milliseconds()))) + return nil +} + +// responseCodeClass converts response code to a string of response code class. +// e.g. The response code class is "5xx" for response code 503. +func responseCodeClass(responseCode int) string { + // Get the hundred digit of the response code and concatenate "xx". + return strconv.Itoa(responseCode/100) + "xx" +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter_test.go b/test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter_test.go new file mode 100644 index 0000000000..7e8dca7023 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/stats/stats_reporter_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "errors" + "testing" + "time" + + "go.opencensus.io/stats" + + "knative.dev/pkg/metrics/metricskey" + "knative.dev/pkg/metrics/metricstest" +) + +const ( + testNs = "test-default" + testSvc = "helloworld-go-service" + testConf = "helloworld-go" + testRev = "helloworld-go-00001" + testPod = "helloworld-go-00001-abcd" + countName = "request_count" + qdepthName = "queue_depth" + latencyName = "request_latencies" +) + +var ( + queueSizeMetric = stats.Int64( + qdepthName, + "Queue size", + stats.UnitDimensionless) + countMetric = stats.Int64( + countName, + "The number of requests that are routed to queue-proxy", + stats.UnitDimensionless) + latencyMetric = stats.Float64( + latencyName, + "The response time in millisecond", + stats.UnitMilliseconds) +) + +func TestNewStatsReporterNegative(t *testing.T) { + tests := []struct { + name string + errorMsg string + result error + namespace string + config string + revision string + }{{ + "Empty_Namespace_Value", + "Expected namespace empty error", + errors.New("namespace must not be empty"), + "", + testConf, + testRev, + }, { + "Empty_Config_Value", + "Expected config empty error", + errors.New("config must not be empty"), + testNs, + "", + testRev, + }, { + "Empty_Revision_Value", + "Expected revision empty error", + errors.New("revision must not be empty"), + testRev, + testConf, + "", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if _, err := NewStatsReporter(test.namespace, testSvc, test.config, test.revision, testPod, + countMetric, latencyMetric, queueSizeMetric); err.Error() != test.result.Error() { + t.Errorf("%+v, got: '%+v'", test.errorMsg, err) + } + }) + } +} + +func TestReporterReport(t *testing.T) { + r := &Reporter{} + if err := r.ReportRequestCount(200); err == nil { + t.Error("Reporter.ReportRequestCount() expected an error for Report call before init. Got success.") + } + if err := r.ReportQueueDepth(200); err == nil { + t.Error("Reporter.ReportQueueDepth() expected an error for Report call before init. Got success.") + } + if err := r.ReportResponseTime(200, time.Second); err == nil { + t.Error("Reporter.ReportRequestCount() expected an error for Report call before init. Got success.") + } + + r, err := NewStatsReporter(testNs, testSvc, testConf, testRev, testPod, countMetric, latencyMetric, queueSizeMetric) + if err != nil { + t.Fatalf("Unexpected error from NewStatsReporter() = %v", err) + } + wantTags := map[string]string{ + metricskey.LabelNamespaceName: testNs, + metricskey.LabelServiceName: testSvc, + metricskey.LabelConfigurationName: testConf, + metricskey.LabelRevisionName: testRev, + "pod_name": testPod, + "container_name": "queue-proxy", + "response_code": "200", + "response_code_class": "2xx", + } + + // Send statistics only once and observe the results + expectSuccess(t, "ReportRequestCount", func() error { return r.ReportRequestCount(200) }) + metricstest.CheckCountData(t, "request_count", wantTags, 1) + + // The stats are cumulative - record multiple entries, should get sum + expectSuccess(t, "ReportRequestCount", func() error { return r.ReportRequestCount(200) }) + expectSuccess(t, "ReportRequestCount", func() error { return r.ReportRequestCount(200) }) + metricstest.CheckCountData(t, "request_count", wantTags, 3) + + // Send statistics only once and observe the results + expectSuccess(t, "ReportResponseTime", func() error { return r.ReportResponseTime(200, 100*time.Millisecond) }) + metricstest.CheckDistributionData(t, "request_latencies", wantTags, 1, 100, 100) + + // The stats are cumulative - record multiple entries, should get count sum + expectSuccess(t, "ReportRequestCount", func() error { return r.ReportResponseTime(200, 200*time.Millisecond) }) + expectSuccess(t, "ReportRequestCount", func() error { return r.ReportResponseTime(200, 300*time.Millisecond) }) + metricstest.CheckDistributionData(t, "request_latencies", wantTags, 3, 100, 300) + + wantTags = map[string]string{ + metricskey.LabelNamespaceName: testNs, + metricskey.LabelServiceName: testSvc, + metricskey.LabelConfigurationName: testConf, + metricskey.LabelRevisionName: testRev, + "pod_name": testPod, + "container_name": "queue-proxy", + } + expectSuccess(t, "QueueDepth", func() error { return r.ReportQueueDepth(1) }) + expectSuccess(t, "QueueDepth", func() error { return r.ReportQueueDepth(2) }) + metricstest.CheckLastValueData(t, "queue_depth", wantTags, 2) + + unregisterViews(r) + + // Test reporter with empty service name + r, err = NewStatsReporter(testNs, "" /*service name*/, testConf, testRev, testPod, countMetric, latencyMetric, queueSizeMetric) + if err != nil { + t.Fatalf("Unexpected error from NewStatsReporter() = %v", err) + } + wantTags = map[string]string{ + metricskey.LabelNamespaceName: testNs, + metricskey.LabelServiceName: "unknown", + metricskey.LabelConfigurationName: testConf, + metricskey.LabelRevisionName: testRev, + "pod_name": testPod, + "container_name": "queue-proxy", + "response_code": "200", + "response_code_class": "2xx", + } + + // Send statistics only once and observe the results + expectSuccess(t, "ReportRequestCount", func() error { return r.ReportRequestCount(200) }) + metricstest.CheckCountData(t, "request_count", wantTags, 1) + + unregisterViews(r) +} + +func expectSuccess(t *testing.T, funcName string, f func() error) { + if err := f(); err != nil { + t.Errorf("Reporter.%v() expected success but got error %v", funcName, err) + } +} + +// unregisterViews unregisters the views registered in NewStatsReporter. +func unregisterViews(r *Reporter) error { + if !r.initialized { + return errors.New("reporter is not initialized") + } + metricstest.Unregister(countName, latencyName, qdepthName) + r.initialized = false + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/stats_test.go b/test/vendor/knative.dev/serving/pkg/queue/stats_test.go new file mode 100644 index 0000000000..f24a7afb98 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/stats_test.go @@ -0,0 +1,349 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +type reportedStat struct { + Concurrency float64 + ProxiedConcurrency float64 + RequestCount float64 + ProxiedRequestCount float64 +} + +func TestNoData(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + got := s.report(now) + want := reportedStat{ + Concurrency: 0.0, + RequestCount: 0, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestSingleRequestWholeTime(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + s.requestStart(now) + now = now.Add(1 * time.Second) + s.requestEnd(now) + + got := s.report(now) + + want := reportedStat{ + Concurrency: 1.0, + RequestCount: 1, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestSingleRequestHalfTime(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + s.requestStart(now) + now = now.Add(1 * time.Second) + s.requestEnd(now) + now = now.Add(1 * time.Second) + got := s.report(now) + + want := reportedStat{ + Concurrency: 0.5, + RequestCount: 1, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestVeryShortLivedRequest(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + s.requestStart(now) + now = now.Add(10 * time.Millisecond) + s.requestEnd(now) + + now = now.Add(990 * time.Millisecond) // make the second full + got := s.report(now) + + want := reportedStat{ + Concurrency: float64(10) / float64(1000), + RequestCount: 1, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestMultipleRequestsWholeTime(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + s.requestStart(now) + now = now.Add(300 * time.Millisecond) + s.requestEnd(now) + + s.requestStart(now) + now = now.Add(300 * time.Millisecond) + s.requestEnd(now) + + s.requestStart(now) + now = now.Add(400 * time.Millisecond) + s.requestEnd(now) + + got := s.report(now) + + want := reportedStat{ + Concurrency: 1.0, + RequestCount: 3, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestMultipleRequestsInterleaved(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + s.requestStart(now) + now = now.Add(100 * time.Millisecond) + s.requestStart(now) + now = now.Add(500 * time.Millisecond) + s.requestEnd(now) + now = now.Add(400 * time.Millisecond) + s.requestEnd(now) + + got := s.report(now) + + want := reportedStat{ + Concurrency: 1.5, + RequestCount: 2, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestOneRequestAcrossReportings(t *testing.T) { + now := time.Now() + s := newTestStats(now) + + s.requestStart(now) + now = now.Add(1 * time.Second) + got1 := s.report(now) + want1 := reportedStat{ + Concurrency: 1.0, + RequestCount: 1, + } + + now = now.Add(500 * time.Millisecond) + s.requestEnd(now) + now = now.Add(500 * time.Millisecond) + got2 := s.report(now) + want2 := reportedStat{ + Concurrency: 0.5, + RequestCount: 0, + } + + if diff := cmp.Diff(want1, got1); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } + if diff := cmp.Diff(want2, got2); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestOneProxiedRequest(t *testing.T) { + now := time.Now() + s := newTestStats(now) + s.proxiedStart(now) + now = now.Add(1 * time.Second) + got := s.report(now) + want := reportedStat{ + Concurrency: 1.0, + ProxiedConcurrency: 1.0, + RequestCount: 1, + ProxiedRequestCount: 1, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func TestOneEndedProxiedRequest(t *testing.T) { + now := time.Now() + s := newTestStats(now) + s.proxiedStart(now) + now = now.Add(500 * time.Millisecond) + s.proxiedEnd(now) + now = now.Add(500 * time.Millisecond) + got := s.report(now) + want := reportedStat{ + Concurrency: 0.5, + ProxiedConcurrency: 0.5, + RequestCount: 1, + ProxiedRequestCount: 1, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +func approxNeq(a, b float64) bool { + return math.Abs(a-b) > 0.0001 +} + +func TestWeightedAverage(t *testing.T) { + // Tests that weightedAverage works correctly, also helps the + // function reader to understand what inputs will result in what + // outputs. + + // Impulse function yields: 1. + in := map[int]time.Duration{ + 1: time.Microsecond, + } + if got, want := weightedAverage(in), 1.; approxNeq(got, want) { + t.Errorf("weightedAverage = %v, want: %v", got, want) + } + + // Step function + // Since the times are the same, we'll return: + // 200*(1+2+3+4+5)/(1000) = 15/5 = 3. + in = map[int]time.Duration{ + 1: 200 * time.Millisecond, + 2: 200 * time.Millisecond, + 3: 200 * time.Millisecond, + 4: 200 * time.Millisecond, + 5: 200 * time.Millisecond, + } + if got, want := weightedAverage(in), 3.; approxNeq(got, want) { + t.Errorf("weightedAverage = %v, want: %v", got, want) + } + + // Weights matter. + in = map[int]time.Duration{ + 1: 800 * time.Millisecond, + 5: 200 * time.Millisecond, + } + // (1*800+5*200)/1000 = 1800/1000 = 1.8 + if got, want := weightedAverage(in), 1.8; approxNeq(got, want) { + t.Errorf("weightedAverage = %v, want: %v", got, want) + } + + // Caret. + in = map[int]time.Duration{ + 1: 100 * time.Millisecond, + 2: 200 * time.Millisecond, + 3: 300 * time.Millisecond, + 4: 200 * time.Millisecond, + 5: 100 * time.Millisecond, + } + // (100+400+900+800+500)/900 = 3 + if got, want := weightedAverage(in), 3.; approxNeq(got, want) { + t.Errorf("weightedAverage = %v, want: %v", got, want) + } + + // Empty. + in = map[int]time.Duration{} + if got, want := weightedAverage(in), 0.; approxNeq(got, want) { + t.Errorf("weightedAverage = %v, want: %v", got, want) + } +} + +func TestTwoRequestsOneProxied(t *testing.T) { + now := time.Now() + s := newTestStats(now) + s.proxiedStart(now) + now = now.Add(500 * time.Millisecond) + s.proxiedEnd(now) + s.requestStart(now) + now = now.Add(500 * time.Millisecond) + got := s.report(now) + want := reportedStat{ + Concurrency: 1.0, + ProxiedConcurrency: 0.5, + RequestCount: 2, + ProxiedRequestCount: 1, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected stat (-want +got): %v", diff) + } +} + +// Test type to hold the bi-directional time channels +type testStats struct { + reqChan chan ReqEvent + reportBiChan chan time.Time + statChan chan reportedStat +} + +func newTestStats(now time.Time) *testStats { + reportBiChan := make(chan time.Time) + reqChan := make(chan ReqEvent) + statChan := make(chan reportedStat) + report := func(acr float64, apcr float64, rc float64, prc float64) { + statChan <- reportedStat{ + Concurrency: acr, + ProxiedConcurrency: apcr, + RequestCount: rc, + ProxiedRequestCount: prc, + } + } + NewStats(now, reqChan, (<-chan time.Time)(reportBiChan), report) + t := &testStats{ + reqChan: reqChan, + reportBiChan: reportBiChan, + statChan: statChan, + } + return t +} + +func (s *testStats) requestStart(now time.Time) { + s.reqChan <- ReqEvent{Time: now, EventType: ReqIn} +} + +func (s *testStats) requestEnd(now time.Time) { + s.reqChan <- ReqEvent{Time: now, EventType: ReqOut} +} + +func (s *testStats) proxiedStart(now time.Time) { + s.reqChan <- ReqEvent{Time: now, EventType: ProxiedIn} +} + +func (s *testStats) proxiedEnd(now time.Time) { + s.reqChan <- ReqEvent{Time: now, EventType: ProxiedOut} +} + +func (s *testStats) report(now time.Time) reportedStat { + s.reportBiChan <- now + return <-s.statChan +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/timeout.go b/test/vendor/knative.dev/serving/pkg/queue/timeout.go new file mode 100644 index 0000000000..364e8f34da --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/timeout.go @@ -0,0 +1,172 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "bufio" + "context" + "io" + "net" + "net/http" + "sync" + "time" + + "knative.dev/pkg/websocket" +) + +// TimeToFirstByteTimeoutHandler returns a Handler that runs `h` with the +// given time limit in which the first byte of the response must be written. +// +// The new Handler calls h.ServeHTTP to handle each request, but if a +// call runs for longer than its time limit, the handler responds with +// a 503 Service Unavailable error and the given message in its body. +// (If msg is empty, a suitable default message will be sent.) +// After such a timeout, writes by h to its ResponseWriter will return +// ErrHandlerTimeout. +// +// A panic from the underlying handler is propagated as-is to be able to +// make use of custom panic behavior by HTTP handlers. See +// https://golang.org/pkg/net/http/#Handler. +// +// The implementation is largely inspired by http.TimeoutHandler. +func TimeToFirstByteTimeoutHandler(h http.Handler, dt time.Duration, msg string) http.Handler { + return &timeoutHandler{ + handler: h, + body: msg, + dt: dt, + } +} + +type timeoutHandler struct { + handler http.Handler + body string + dt time.Duration +} + +func (h *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx, cancelCtx := context.WithCancel(r.Context()) + defer cancelCtx() + + done := make(chan struct{}) + // The recovery value of a panic is written to this channel to be + // propagated (panicked with) again. + panicChan := make(chan interface{}) + defer close(panicChan) + + tw := &timeoutWriter{w: w} + go func() { + // The defer statements are executed in LIFO order, + // so recover will execute first, then only, the channel will be closed. + defer close(done) + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + h.handler.ServeHTTP(tw, r.WithContext(ctx)) + }() + + timeout := time.NewTimer(h.dt) + defer timeout.Stop() + for { + select { + case p := <-panicChan: + panic(p) + case <-done: + return + case <-timeout.C: + if tw.TimeoutAndWriteError(h.body) { + return + } + } + } +} + +// timeoutWriter is a wrapper around an http.ResponseWriter. It guards +// writing an error response to whether or not the underlying writer has +// already been written to. +// +// If the underlying writer has not been written to, an error response is +// returned. If it has already been written to, the error is ignored and +// the response is allowed to continue. +type timeoutWriter struct { + w http.ResponseWriter + + mu sync.Mutex + timedOut bool + wroteOnce bool +} + +var _ http.Flusher = (*timeoutWriter)(nil) + +var _ http.ResponseWriter = (*timeoutWriter)(nil) + +func (tw *timeoutWriter) Flush() { + tw.w.(http.Flusher).Flush() +} + +// Hijack calls Hijack() on the wrapped http.ResponseWriter if it implements +// http.Hijacker interface, which is required for net/http/httputil/reverseproxy +// to handle connection upgrade/switching protocol. Otherwise returns an error. +func (tw *timeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return websocket.HijackIfPossible(tw.w) +} + +func (tw *timeoutWriter) Header() http.Header { return tw.w.Header() } + +func (tw *timeoutWriter) Write(p []byte) (int, error) { + tw.mu.Lock() + defer tw.mu.Unlock() + if tw.timedOut { + return 0, http.ErrHandlerTimeout + } + + tw.wroteOnce = true + return tw.w.Write(p) +} + +func (tw *timeoutWriter) WriteHeader(code int) { + tw.mu.Lock() + defer tw.mu.Unlock() + if tw.timedOut { + return + } + + tw.wroteOnce = true + tw.w.WriteHeader(code) +} + +// TimeoutAndError writes an error to the response write if +// nothing has been written on the writer before. Returns whether +// an error was written or not. +// +// If this writes an error, all subsequent calls to Write will +// result in http.ErrHandlerTimeout. +func (tw *timeoutWriter) TimeoutAndWriteError(msg string) bool { + tw.mu.Lock() + defer tw.mu.Unlock() + + if !tw.wroteOnce { + tw.w.WriteHeader(http.StatusServiceUnavailable) + io.WriteString(tw.w, msg) + + tw.timedOut = true + return true + } + + return false +} diff --git a/test/vendor/knative.dev/serving/pkg/queue/timeout_test.go b/test/vendor/knative.dev/serving/pkg/queue/timeout_test.go new file mode 100644 index 0000000000..4c342b1b3e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/queue/timeout_test.go @@ -0,0 +1,139 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package queue + +import ( + "io" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" +) + +func TestTimeToFirstByteTimeoutHandler(t *testing.T) { + const ( + failingTimeout = 0 * time.Millisecond + longTimeout = 10 * time.Second + ) + + tests := []struct { + name string + timeout time.Duration + handler func(mux *sync.Mutex, writeErrors chan error) http.Handler + timeoutMessage string + wantStatus int + wantBody string + wantWriteError bool + wantPanic bool + }{{ + name: "all good", + timeout: longTimeout, + handler: func(*sync.Mutex, chan error) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + }, + wantStatus: http.StatusOK, + wantBody: "hi", + }, { + name: "custom timeout message", + timeout: failingTimeout, + handler: func(mux *sync.Mutex, writeErrors chan error) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mux.Lock() + defer mux.Unlock() + _, werr := w.Write([]byte("hi")) + writeErrors <- werr + }) + }, + timeoutMessage: "request timeout", + wantStatus: http.StatusServiceUnavailable, + wantBody: "request timeout", + wantWriteError: true, + }, { + name: "propagate panic", + timeout: longTimeout, + handler: func(*sync.Mutex, chan error) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + panic(http.ErrAbortHandler) + }) + }, + wantStatus: http.StatusServiceUnavailable, + wantBody: "request timeout", + wantPanic: true, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "/", nil) + if err != nil { + t.Fatal(err) + } + + var reqMux sync.Mutex + writeErrors := make(chan error, 1) + rr := httptest.NewRecorder() + handler := TimeToFirstByteTimeoutHandler(test.handler(&reqMux, writeErrors), test.timeout, test.timeoutMessage) + + defer func() { + if test.wantPanic { + if recovered := recover(); recovered != http.ErrAbortHandler { + t.Error("Expected the handler to panic, but it didn't.") + } + } + }() + + reqMux.Lock() // Will cause an inner 'Lock' to block. ServeHTTP will exit early if the call times out. + handler.ServeHTTP(rr, req) + reqMux.Unlock() // Allows the inner 'Lock' to go through to complete potential writes. + + if status := rr.Code; status != test.wantStatus { + t.Errorf("Handler returned wrong status code: got %v want %v", status, test.wantStatus) + } + + if rr.Body.String() != test.wantBody { + t.Errorf("Handler returned unexpected body: got %q want %q", rr.Body.String(), test.wantBody) + } + + if test.wantWriteError { + err := <-writeErrors + if err != http.ErrHandlerTimeout { + t.Errorf("Expected a timeout error, got %v", err) + } + } + }) + } +} + +func TestTimeoutWriterAllowsForAdditionalWrites(t *testing.T) { + recorder := httptest.NewRecorder() + handler := &timeoutWriter{ + w: recorder, + } + + handler.WriteHeader(http.StatusOK) + handler.TimeoutAndWriteError("error") + if _, err := io.WriteString(handler, "test"); err != nil { + t.Fatalf("handler.Write() = %v, want no error", err) + } + + if got, want := recorder.Code, http.StatusOK; got != want { + t.Errorf("recorder.Status = %d, want %d", got, want) + } + if got, want := recorder.Body.String(), "test"; got != want { + t.Errorf("recorder.Body = %s, want %s", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/OWNERS b/test/vendor/knative.dev/serving/pkg/reconciler/OWNERS new file mode 100644 index 0000000000..e57e66dd50 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-api-approvers + +reviewers: +- serving-api-reviewers + +labels: +- area/API diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret.go new file mode 100644 index 0000000000..2213e035f8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" +) + +// SecretAccessor is an interface for accessing Secret. +type SecretAccessor interface { + GetKubeClient() kubernetes.Interface + GetSecretLister() corev1listers.SecretLister +} + +// ReconcileSecret reconciles Secret to the desired status. +func ReconcileSecret(ctx context.Context, owner kmeta.Accessor, desired *corev1.Secret, accessor SecretAccessor) (*corev1.Secret, error) { + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + return nil, fmt.Errorf("recoder for reconciling Secret %s/%s is not created", desired.Namespace, desired.Name) + } + secret, err := accessor.GetSecretLister().Secrets(desired.Namespace).Get(desired.Name) + if apierrs.IsNotFound(err) { + secret, err = accessor.GetKubeClient().CoreV1().Secrets(desired.Namespace).Create(desired) + if err != nil { + recorder.Eventf(owner, corev1.EventTypeWarning, "CreationFailed", + "Failed to create Secret %s/%s: %v", desired.Namespace, desired.Name, err) + return nil, fmt.Errorf("failed to create Secret: %w", err) + } + recorder.Eventf(owner, corev1.EventTypeNormal, "Created", "Created Secret %s/%s", desired.Namespace, desired.Name) + } else if err != nil { + return nil, fmt.Errorf("failed to get Secret: %w", err) + } else if !metav1.IsControlledBy(secret, owner) { + // Return an error with NotControlledBy information. + return nil, kaccessor.NewAccessorError( + fmt.Errorf("owner: %s with Type %T does not own Secret: %s", owner.GetName(), owner, secret.Name), + kaccessor.NotOwnResource) + } else if !equality.Semantic.DeepEqual(secret.Data, desired.Data) { + // Don't modify the informers copy + copy := secret.DeepCopy() + copy.Data = desired.Data + secret, err = accessor.GetKubeClient().CoreV1().Secrets(copy.Namespace).Update(copy) + if err != nil { + recorder.Eventf(owner, corev1.EventTypeWarning, "UpdateFailed", "Failed to update Secret %s/%s: %v", desired.Namespace, desired.Name, err) + return nil, fmt.Errorf("failed to update Secret: %w", err) + } + recorder.Eventf(owner, corev1.EventTypeNormal, "Updated", "Updated Secret %s/%s", copy.Namespace, copy.Name) + } + return secret, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret_test.go new file mode 100644 index 0000000000..36ce7eafca --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/core/secret_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package core + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakesecretinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/secret/fake" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" + + . "knative.dev/pkg/reconciler/testing" +) + +var ( + ownerObj = &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ownerObj", + Namespace: "default", + UID: "abcd", + }, + } + + ownerRef = metav1.OwnerReference{ + Kind: ownerObj.Kind, + Name: ownerObj.Name, + UID: ownerObj.UID, + Controller: ptr.Bool(true), + } + + origin = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Data: map[string][]byte{ + "test-secret": []byte("origin"), + }, + } + + desired = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Data: map[string][]byte{ + "test-secret": []byte("desired"), + }, + } + + notOwnedSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "test-secret": []byte("origin"), + }, + } +) + +type FakeAccessor struct { + client kubernetes.Interface + secretLister corev1listers.SecretLister +} + +func (f *FakeAccessor) GetKubeClient() kubernetes.Interface { + return f.client +} + +func (f *FakeAccessor) GetSecretLister() corev1listers.SecretLister { + return f.secretLister +} + +func TestReconcileSecretCreate(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + kubeClient := fakekubeclient.Get(ctx) + + h := NewHooks() + h.OnCreate(&kubeClient.Fake, "secrets", func(obj runtime.Object) HookResult { + got := obj.(*corev1.Secret) + if diff := cmp.Diff(got, desired); diff != "" { + t.Logf("Unexpected Secret (-want, +got): %v", diff) + return HookIncomplete + } + return HookComplete + }) + + accessor, waitInformers := setup(ctx, []*corev1.Secret{}, kubeClient, t) + defer func() { + cancel() + waitInformers() + }() + + ReconcileSecret(ctx, ownerObj, desired, accessor) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("Failed to Reconcile Secret: %v", err) + } +} + +func TestReconcileSecretUpdate(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + + kubeClient := fakekubeclient.Get(ctx) + accessor, waitInformers := setup(ctx, []*corev1.Secret{origin}, kubeClient, t) + defer func() { + cancel() + waitInformers() + }() + + h := NewHooks() + h.OnUpdate(&kubeClient.Fake, "secrets", func(obj runtime.Object) HookResult { + got := obj.(*corev1.Secret) + if diff := cmp.Diff(got, desired); diff != "" { + t.Logf("Unexpected Secret (-want, +got): %v", diff) + return HookIncomplete + } + return HookComplete + }) + + ReconcileSecret(ctx, ownerObj, desired, accessor) + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("Failed to Reconcile Secret: %v", err) + } +} + +func TestNotOwnedFailure(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + + kubeClient := fakekubeclient.Get(ctx) + accessor, waitInformers := setup(ctx, []*corev1.Secret{notOwnedSecret}, kubeClient, t) + defer func() { + cancel() + waitInformers() + }() + + _, err := ReconcileSecret(ctx, ownerObj, desired, accessor) + if err == nil { + t.Error("Expected to get error when calling ReconcileSecret, but got no error.") + } + if !kaccessor.IsNotOwned(err) { + t.Errorf("Expected to get NotOwnedError but got %v", err) + } +} + +func setup(ctx context.Context, secrets []*corev1.Secret, + kubeClient kubernetes.Interface, t *testing.T) (*FakeAccessor, func()) { + + secretInformer := fakesecretinformer.Get(ctx) + + fake := fakekubeclient.Get(ctx) + for _, secret := range secrets { + fake.CoreV1().Secrets(secret.Namespace).Create(secret) + secretInformer.Informer().GetIndexer().Add(secret) + } + + waitInformers, err := controller.RunInformers(ctx.Done(), secretInformer.Informer()) + if err != nil { + t.Fatalf("failed to start secret informer: %v", err) + } + + return &FakeAccessor{ + client: kubeClient, + secretLister: secretInformer.Lister(), + }, waitInformers +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors.go new file mode 100644 index 0000000000..2fffa9e094 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package accessor + +import "strings" + +// Error defines a type of error coming from Accessor. +type Error struct { + err error + errorReason string +} + +const ( + // NotOwnResource means the accessor does not own the resource. + NotOwnResource string = "NotOwned" +) + +// NewAccessorError creates a new accessor Error +func NewAccessorError(err error, reason string) Error { + return Error{ + err: err, + errorReason: reason, + } +} + +func (a Error) Error() string { + return strings.ToLower(string(a.errorReason)) + ": " + a.err.Error() +} + +// IsNotOwned returns true if the error is caused by NotOwnResource. +func IsNotOwned(err error) bool { + accessorError, ok := err.(Error) + if !ok { + return false + } + return accessorError.errorReason == NotOwnResource +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors_test.go new file mode 100644 index 0000000000..0fcc2693c7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/errors_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package accessor + +import ( + "errors" + "fmt" + "testing" +) + +func TestIsNotOwned(t *testing.T) { + cases := []struct { + name string + err error + want bool + }{{ + name: "IsNotOwned error", + err: Error{ + err: fmt.Errorf("test error"), + errorReason: NotOwnResource, + }, + want: true, + }, { + name: "other error", + err: errors.New("test error"), + want: false, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if got := IsNotOwned(tc.err); tc.want != got { + t.Errorf("IsNotOwned(%v) = %v, want = %v", tc.err, got, tc.want) + } + }) + } +} + +func TestError(t *testing.T) { + err := Error{ + err: fmt.Errorf("test error"), + errorReason: NotOwnResource, + } + if got, want := err.Error(), "notowned: test error"; got != want { + t.Errorf("Error() = %q, want = %q", got, want) + } +} + +func TestNewAccessorError(t *testing.T) { + cases := []struct { + name string + err error + reason string + want string + }{{ + name: "error with reason", + err: errors.New("test error"), + reason: NotOwnResource, + want: "notowned: test error", + }, { + name: "error with no reason", + err: errors.New("test error"), + reason: "", + want: ": test error", + }, { + name: "error with no message and with reason", + err: errors.New(""), + reason: NotOwnResource, + want: "notowned: ", + }, { + name: "error with no message and reason", + err: errors.New(""), + reason: "", + want: ": ", + }} + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if got := NewAccessorError(tc.err, tc.reason); got.Error() != tc.want { + t.Errorf("NewAccessorError() = %q, want = %q", got.Error(), tc.want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice.go new file mode 100644 index 0000000000..85425ede8f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package istio + +import ( + "context" + "fmt" + + "istio.io/client-go/pkg/apis/networking/v1alpha3" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + istioclientset "knative.dev/serving/pkg/client/istio/clientset/versioned" + istiolisters "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" +) + +// VirtualServiceAccessor is an interface for accessing VirtualService. +type VirtualServiceAccessor interface { + GetIstioClient() istioclientset.Interface + GetVirtualServiceLister() istiolisters.VirtualServiceLister +} + +func hasDesiredDiff(current, desired *v1alpha3.VirtualService) bool { + return !equality.Semantic.DeepEqual(current.Spec, desired.Spec) || + !equality.Semantic.DeepEqual(current.Labels, desired.Labels) || + !equality.Semantic.DeepEqual(current.Annotations, desired.Annotations) +} + +// ReconcileVirtualService reconciles VirtiualService to the desired status. +func ReconcileVirtualService(ctx context.Context, owner kmeta.Accessor, desired *v1alpha3.VirtualService, + vsAccessor VirtualServiceAccessor) (*v1alpha3.VirtualService, error) { + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + return nil, fmt.Errorf("recoder for reconciling VirtualService %s/%s is not created", desired.Namespace, desired.Name) + } + ns := desired.Namespace + name := desired.Name + vs, err := vsAccessor.GetVirtualServiceLister().VirtualServices(ns).Get(name) + if apierrs.IsNotFound(err) { + vs, err = vsAccessor.GetIstioClient().NetworkingV1alpha3().VirtualServices(ns).Create(desired) + if err != nil { + recorder.Eventf(owner, corev1.EventTypeWarning, "CreationFailed", + "Failed to create VirtualService %s/%s: %v", ns, name, err) + return nil, fmt.Errorf("failed to create VirtualService: %w", err) + } + recorder.Eventf(owner, corev1.EventTypeNormal, "Created", "Created VirtualService %q", desired.Name) + } else if err != nil { + return nil, err + } else if !metav1.IsControlledBy(vs, owner) { + // Return an error with NotControlledBy information. + return nil, kaccessor.NewAccessorError( + fmt.Errorf("owner: %s with Type %T does not own VirtualService: %q", owner.GetName(), owner, name), + kaccessor.NotOwnResource) + } else if hasDesiredDiff(vs, desired) { + // Don't modify the informers copy + existing := vs.DeepCopy() + existing.Spec = desired.Spec + existing.Labels = desired.Labels + existing.Annotations = desired.Annotations + vs, err = vsAccessor.GetIstioClient().NetworkingV1alpha3().VirtualServices(ns).Update(existing) + if err != nil { + return nil, fmt.Errorf("failed to update VirtualService: %w", err) + } + recorder.Eventf(owner, corev1.EventTypeNormal, "Updated", "Updated VirtualService %s/%s", ns, name) + } + return vs, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice_test.go new file mode 100644 index 0000000000..1b42c26f8b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/istio/virtualservice_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package istio + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + istioclientset "knative.dev/serving/pkg/client/istio/clientset/versioned" + istiofake "knative.dev/serving/pkg/client/istio/clientset/versioned/fake" + istioinformers "knative.dev/serving/pkg/client/istio/informers/externalversions" + fakeistioclient "knative.dev/serving/pkg/client/istio/injection/client/fake" + istiolisters "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" + + . "knative.dev/pkg/reconciler/testing" +) + +var ( + ownerObj = &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ownerObj", + Namespace: "default", + UID: "abcd", + }, + } + + ownerRef = metav1.OwnerReference{ + Kind: ownerObj.Kind, + Name: ownerObj.Name, + UID: ownerObj.UID, + Controller: ptr.Bool(true), + } + + origin = &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vs", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: istiov1alpha3.VirtualService{ + Hosts: []string{"origin.example.com"}, + }, + } + + desired = &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vs", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: istiov1alpha3.VirtualService{ + Hosts: []string{"desired.example.com"}, + }, + } +) + +type FakeAccessor struct { + client istioclientset.Interface + vsLister istiolisters.VirtualServiceLister +} + +func (f *FakeAccessor) GetIstioClient() istioclientset.Interface { + return f.client +} + +func (f *FakeAccessor) GetVirtualServiceLister() istiolisters.VirtualServiceLister { + return f.vsLister +} + +func TestReconcileVirtualService_Create(t *testing.T) { + ctx, _ := SetupFakeContext(t) + ctx, cancel := context.WithCancel(ctx) + + istioClient := fakeistioclient.Get(ctx) + + h := NewHooks() + h.OnCreate(&istioClient.Fake, "virtualservices", func(obj runtime.Object) HookResult { + got := obj.(*v1alpha3.VirtualService) + if diff := cmp.Diff(got, desired); diff != "" { + t.Logf("Unexpected VirtualService (-want, +got): %v", diff) + return HookIncomplete + } + return HookComplete + }) + + accessor, waitInformers := setup(ctx, []*v1alpha3.VirtualService{}, istioClient, t) + defer func() { + cancel() + waitInformers() + }() + + ReconcileVirtualService(ctx, ownerObj, desired, accessor) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("Failed to Reconcile VirtualService: %v", err) + } +} + +func TestReconcileVirtualService_Update(t *testing.T) { + ctx, _ := SetupFakeContext(t) + ctx, cancel := context.WithCancel(ctx) + + istioClient := fakeistioclient.Get(ctx) + accessor, waitInformers := setup(ctx, []*v1alpha3.VirtualService{origin}, istioClient, t) + defer func() { + cancel() + waitInformers() + }() + + h := NewHooks() + h.OnUpdate(&istioClient.Fake, "virtualservices", func(obj runtime.Object) HookResult { + got := obj.(*v1alpha3.VirtualService) + if diff := cmp.Diff(got, desired); diff != "" { + t.Logf("Unexpected VirtualService (-want, +got): %v", diff) + return HookIncomplete + } + return HookComplete + }) + + ReconcileVirtualService(ctx, ownerObj, desired, accessor) + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("Failed to Reconcile VirtualService: %v", err) + } +} + +func setup(ctx context.Context, vses []*v1alpha3.VirtualService, + istioClient istioclientset.Interface, t *testing.T) (*FakeAccessor, func()) { + + fake := istiofake.NewSimpleClientset() + informer := istioinformers.NewSharedInformerFactory(fake, 0) + vsInformer := informer.Networking().V1alpha3().VirtualServices() + + for _, vs := range vses { + fake.NetworkingV1alpha3().VirtualServices(vs.Namespace).Create(vs) + vsInformer.Informer().GetIndexer().Add(vs) + } + + waitInformers, err := controller.RunInformers(ctx.Done(), vsInformer.Informer()) + if err != nil { + t.Fatalf("failed to start virtualservice informer: %v", err) + } + + return &FakeAccessor{ + client: istioClient, + vsLister: vsInformer.Lister(), + }, waitInformers +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go new file mode 100644 index 0000000000..fb495b7c19 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networking + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + clientset "knative.dev/serving/pkg/client/clientset/versioned" + listers "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" +) + +// CertificateAccessor is an interface for accessing Knative Certificate. +type CertificateAccessor interface { + GetServingClient() clientset.Interface + GetCertificateLister() listers.CertificateLister +} + +// ReconcileCertificate reconciles Certificate to the desired status. +func ReconcileCertificate(ctx context.Context, owner kmeta.Accessor, desired *v1alpha1.Certificate, + certAccessor CertificateAccessor) (*v1alpha1.Certificate, error) { + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + return nil, fmt.Errorf("recoder for reconciling Certificate %s/%s is not created", desired.Namespace, desired.Name) + } + cert, err := certAccessor.GetCertificateLister().Certificates(desired.Namespace).Get(desired.Name) + if apierrs.IsNotFound(err) { + cert, err = certAccessor.GetServingClient().NetworkingV1alpha1().Certificates(desired.Namespace).Create(desired) + if err != nil { + recorder.Eventf(owner, corev1.EventTypeWarning, "CreationFailed", + "Failed to create Certificate %s/%s: %v", desired.Namespace, desired.Name, err) + return nil, fmt.Errorf("failed to create Certificate: %w", err) + } + recorder.Eventf(owner, corev1.EventTypeNormal, "Created", "Created Certificate %s/%s", cert.Namespace, cert.Name) + return cert, nil + } else if err != nil { + return nil, fmt.Errorf("failed to get Certificate: %w", err) + } else if !metav1.IsControlledBy(cert, owner) { + // Return an error with NotControlledBy information. + return nil, kaccessor.NewAccessorError( + fmt.Errorf("owner: %s with Type %T does not own Certificate: %q", owner.GetName(), owner, cert.Name), + kaccessor.NotOwnResource) + } else if !equality.Semantic.DeepEqual(cert.Spec, desired.Spec) { + // Don't modify the informers copy + existing := cert.DeepCopy() + existing.Spec = desired.Spec + cert, err = certAccessor.GetServingClient().NetworkingV1alpha1().Certificates(existing.Namespace).Update(existing) + if err != nil { + recorder.Eventf(owner, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update Certificate %s/%s: %v", existing.Namespace, existing.Name, err) + return nil, fmt.Errorf("failed to update Certificate: %w", err) + } + recorder.Eventf(owner, corev1.EventTypeNormal, "Updated", + "Updated Spec for Certificate %s/%s", existing.Namespace, existing.Name) + } + return cert, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate_test.go new file mode 100644 index 0000000000..f8d8f11ac8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networking + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + clientset "knative.dev/serving/pkg/client/clientset/versioned" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakecertinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake" + listers "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + + . "knative.dev/pkg/reconciler/testing" +) + +var ( + ownerObj = &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ownerObj", + Namespace: "default", + UID: "abcd", + }, + } + + ownerRef = metav1.OwnerReference{ + Kind: ownerObj.Kind, + Name: ownerObj.Name, + UID: ownerObj.UID, + Controller: ptr.Bool(true), + } + + origin = &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cert", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: []string{"origin.example.com"}, + SecretName: "secret0", + }, + } + + desired = &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cert", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: []string{"desired.example.com"}, + SecretName: "secret0", + }, + } +) + +type FakeAccessor struct { + client clientset.Interface + certLister listers.CertificateLister +} + +func (f *FakeAccessor) GetServingClient() clientset.Interface { + return f.client +} + +func (f *FakeAccessor) GetCertificateLister() listers.CertificateLister { + return f.certLister +} + +func TestReconcileCertificateCreate(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + + client := fakeservingclient.Get(ctx) + + h := NewHooks() + h.OnCreate(&client.Fake, "certificates", func(obj runtime.Object) HookResult { + got := obj.(*v1alpha1.Certificate) + if diff := cmp.Diff(got, desired); diff != "" { + t.Logf("Unexpected Certificate (-want, +got): %v", diff) + return HookIncomplete + } + return HookComplete + }) + + accessor, waitInformers := setup(ctx, []*v1alpha1.Certificate{}, client, t) + defer func() { + cancel() + waitInformers() + }() + + ReconcileCertificate(ctx, ownerObj, desired, accessor) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("Failed to Reconcile Certificate: %v", err) + } +} + +func TestReconcileCertificateUpdate(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + + client := fakeservingclient.Get(ctx) + accessor, waitInformers := setup(ctx, []*v1alpha1.Certificate{origin}, client, t) + defer func() { + cancel() + waitInformers() + }() + + h := NewHooks() + h.OnUpdate(&client.Fake, "certificates", func(obj runtime.Object) HookResult { + got := obj.(*v1alpha1.Certificate) + if diff := cmp.Diff(got, desired); diff != "" { + t.Logf("Unexpected Certificate (-want, +got): %v", diff) + return HookIncomplete + } + return HookComplete + }) + + ReconcileCertificate(ctx, ownerObj, desired, accessor) + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("Failed to Reconcile Certificate: %v", err) + } +} + +func setup(ctx context.Context, certs []*v1alpha1.Certificate, + client clientset.Interface, t *testing.T) (*FakeAccessor, func()) { + + fake := fakeservingclient.Get(ctx) + certInformer := fakecertinformer.Get(ctx) + + for _, cert := range certs { + fake.NetworkingV1alpha1().Certificates(cert.Namespace).Create(cert) + certInformer.Informer().GetIndexer().Add(cert) + } + + waitInformers, err := controller.RunInformers(ctx.Done(), certInformer.Informer()) + if err != nil { + t.Fatalf("failed to start Certificate informer: %v", err) + } + + return &FakeAccessor{ + client: client, + certLister: certInformer.Lister(), + }, waitInformers +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/OWNERS b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/OWNERS new file mode 100644 index 0000000000..690ff0e48e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- autoscaling-approvers + +reviewers: +- autoscaling-reviewers + +labels: +- area/autoscale diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/doc.go new file mode 100644 index 0000000000..4a0605f660 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Autoscaling controller depends. +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store.go new file mode 100644 index 0000000000..3b6c5a93c4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store.go @@ -0,0 +1,84 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/pkg/configmap" + "knative.dev/serving/pkg/autoscaler" +) + +type cfgKey struct{} + +// Config of the Autoscaler. +// +k8s:deepcopy-gen=false +type Config struct { + Autoscaler *autoscaler.Config +} + +// FromContext fetch config from context. +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +// ToContext adds config to given context. +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// Store is configmap.UntypedStore based config store. +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a configmap.UntypedStore based config store. +// +// logger must be non-nil implementation of configmap.Logger (commonly used +// loggers conform) +// +// onAfterStore is a variadic list of callbacks to run +// after the ConfigMap has been processed and stored. +// +// See also: configmap.NewUntypedStore(). +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "autoscaler", + logger, + configmap.Constructors{ + autoscaler.ConfigName: autoscaler.NewConfigFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +// ToContext adds Store contents to given context. +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +// Load fetches config from Store. +func (s *Store) Load() *Config { + return &Config{ + Autoscaler: s.UntypedLoad(autoscaler.ConfigName).(*autoscaler.Config).DeepCopy(), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store_test.go new file mode 100644 index 0000000000..3acb51780f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/store_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + logtesting "knative.dev/pkg/logging/testing" + + . "knative.dev/pkg/configmap/testing" + "knative.dev/serving/pkg/autoscaler" +) + +func TestStoreLoadWithContext(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + autoscalerConfig := ConfigMapFromTestFile(t, autoscaler.ConfigName) + store.OnConfigChanged(autoscalerConfig) + config := FromContext(store.ToContext(context.Background())) + + want, _ := autoscaler.NewConfigFromConfigMap(autoscalerConfig) + if diff := cmp.Diff(want, config.Autoscaler); diff != "" { + t.Errorf("Unexpected TLS mode (-want, +got): %s", diff) + } +} + +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + store.OnConfigChanged(ConfigMapFromTestFile(t, autoscaler.ConfigName)) + + config := store.Load() + config.Autoscaler.MaxScaleUpRate = 100.0 + newConfig := store.Load() + + if newConfig.Autoscaler.MaxScaleUpRate == 100.0 { + t.Error("Autoscaler config is not immuable") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/testdata/config-autoscaler.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/testdata/config-autoscaler.yaml new file mode 100644 index 0000000000..37c61bf2b7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/config/testdata/config-autoscaler.yaml @@ -0,0 +1,91 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-autoscaler + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this block and unindented to actually change the configuration. + + # The Revision ContainerConcurrency field specifies the maximum number + # of requests the Container can handle at once. Container concurrency + # target percentage is how much of that maximum to use in a stable + # state. E.g. if a Revision specifies ContainerConcurrency of 10, then + # the Autoscaler will try to maintain 7 concurrent connections per pod + # on average. A value of 0.7 is chosen because the Autoscaler panics + # when concurrency exceeds 2x the desired set point. So we will panic + # before we reach the limit. + container-concurrency-target-percentage: "1.0" + + # The container concurrency target default is what the Autoscaler will + # try to maintain when the Revision specifies unlimited concurrency. + # Even when specifying unlimited concurrency, the autoscaler will + # horizontally scale the application based on this target concurrency. + container-concurrency-target-default: "100" + + # When operating in a stable mode, the autoscaler operates on the + # average concurrency over the stable window. + stable-window: "60s" + + # When observed average concurrency during the panic window reaches + # panic-threshold-percentage the target concurrency, the autoscaler + # enters panic mode. When operating in panic mode, the autoscaler + # scales on the average concurrency over the panic window which is + # panic-window-percentage of the stable-window. + panic-window-percentage: "10.0" + + # Absolute panic window duration. + # Deprecated in favor of panic-window-percentage. + # Existing revisions will continue to scale based on panic-window + # but new revisions will default to panic-window-percentage. + panic-window: "6s" + + # The percentage of the container concurrency target at which to + # enter panic mode when reached within the panic window. + panic-threshold-percentage: "200.0" + + # Max scale up rate limits the rate at which the autoscaler will + # increase pod count. It is the maximum ratio of desired pods versus + # observed pods. + max-scale-up-rate: "10" + + # Scale to zero feature flag + enable-scale-to-zero: "true" + + # Tick interval is the time between autoscaling calculations. + tick-interval: "2s" + + # Dynamic parameters (take effect when config map is updated): + + # Scale to zero grace period is the time an inactive revision is left + # running before it is scaled to zero (min: 30s). + scale-to-zero-grace-period: "30s" diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/controller.go new file mode 100644 index 0000000000..77c83e3eca --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/controller.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hpa + +import ( + "context" + + hpainformer "knative.dev/pkg/client/injection/kube/informers/autoscaling/v2beta1/horizontalpodautoscaler" + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + metricinformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric" + painformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler" + sksinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/reconciler" + areconciler "knative.dev/serving/pkg/reconciler/autoscaling" + "knative.dev/serving/pkg/reconciler/autoscaling/config" +) + +const ( + controllerAgentName = "hpa-class-podautoscaler-controller" +) + +// NewController returns a new HPA reconcile controller. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + + paInformer := painformer.Get(ctx) + sksInformer := sksinformer.Get(ctx) + hpaInformer := hpainformer.Get(ctx) + serviceInformer := serviceinformer.Get(ctx) + metricInformer := metricinformer.Get(ctx) + + c := &Reconciler{ + Base: &areconciler.Base{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + PALister: paInformer.Lister(), + SKSLister: sksInformer.Lister(), + ServiceLister: serviceInformer.Lister(), + MetricLister: metricInformer.Lister(), + PSInformerFactory: podscalable.Get(ctx), + }, + hpaLister: hpaInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, "HPA-Class Autoscaling") + + c.Logger.Info("Setting up hpa-class event handlers") + onlyHpaClass := reconciler.AnnotationFilterFunc(autoscaling.ClassAnnotationKey, autoscaling.HPA, false) + paHandler := cache.FilteringResourceEventHandler{ + FilterFunc: onlyHpaClass, + Handler: controller.HandleAll(impl.Enqueue), + } + paInformer.Informer().AddEventHandler(paHandler) + + hpaInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: onlyHpaClass, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + sksInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: onlyHpaClass, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + metricInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: onlyHpaClass, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + c.Logger.Info("Setting up ConfigMap receivers") + configsToResync := []interface{}{ + &autoscaler.Config{}, + } + resync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + impl.FilteredGlobalResync(onlyHpaClass, paInformer.Informer()) + }) + configStore := config.NewStore(c.Logger.Named("config-store"), resync) + configStore.WatchConfigs(cmw) + c.ConfigStore = configStore + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa.go new file mode 100644 index 0000000000..187a7e0288 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hpa + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + autoscalingv2beta1listers "k8s.io/client-go/listers/autoscaling/v2beta1" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + nv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + areconciler "knative.dev/serving/pkg/reconciler/autoscaling" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + "knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources" +) + +// Reconciler implements the control loop for the HPA resources. +type Reconciler struct { + *areconciler.Base + hpaLister autoscalingv2beta1listers.HorizontalPodAutoscalerLister +} + +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile is the entry point to the reconciliation control loop. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.ConfigStore.ToContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + logger.Debug("Reconcile hpa-class PodAutoscaler") + + original, err := c.PALister.PodAutoscalers(namespace).Get(name) + if errors.IsNotFound(err) { + logger.Info("PA in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informer's copy. + pa := original.DeepCopy() + // Reconcile this copy of the pa and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := c.reconcile(ctx, key, pa) + if equality.Semantic.DeepEqual(original.Status, pa.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err = c.UpdateStatus(original, pa); err != nil { + logger.Warnw("Failed to update pa status", zap.Error(err)) + c.Recorder.Eventf(pa, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for PA %q: %v", pa.Name, err) + return err + } + if reconcileErr != nil { + c.Recorder.Event(pa, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + } + return reconcileErr +} + +func (c *Reconciler) reconcile(ctx context.Context, key string, pa *pav1alpha1.PodAutoscaler) error { + logger := logging.FromContext(ctx) + + if pa.GetDeletionTimestamp() != nil { + return nil + } + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + pa.SetDefaults(ctx) + + pa.Status.InitializeConditions() + logger.Debug("PA exists") + + // HPA-class PAs don't yet support scale-to-zero + pa.Status.MarkActive() + + // HPA-class PA delegates autoscaling to the Kubernetes Horizontal Pod Autoscaler. + desiredHpa := resources.MakeHPA(pa, config.FromContext(ctx).Autoscaler) + hpa, err := c.hpaLister.HorizontalPodAutoscalers(pa.Namespace).Get(desiredHpa.Name) + if errors.IsNotFound(err) { + logger.Infof("Creating HPA %q", desiredHpa.Name) + if hpa, err = c.KubeClientSet.AutoscalingV2beta1().HorizontalPodAutoscalers(pa.Namespace).Create(desiredHpa); err != nil { + pa.Status.MarkResourceFailedCreation("HorizontalPodAutoscaler", desiredHpa.Name) + return fmt.Errorf("failed to create HPA: %w", err) + } + } else if err != nil { + return fmt.Errorf("failed to get HPA: %w", err) + } else if !metav1.IsControlledBy(hpa, pa) { + // Surface an error in the PodAutoscaler's status, and return an error. + pa.Status.MarkResourceNotOwned("HorizontalPodAutoscaler", desiredHpa.Name) + return fmt.Errorf("PodAutoscaler: %q does not own HPA: %q", pa.Name, desiredHpa.Name) + } + if !equality.Semantic.DeepEqual(desiredHpa.Spec, hpa.Spec) { + logger.Infof("Updating HPA %q", desiredHpa.Name) + if _, err := c.KubeClientSet.AutoscalingV2beta1().HorizontalPodAutoscalers(pa.Namespace).Update(desiredHpa); err != nil { + return fmt.Errorf("failed to update HPA: %w", err) + } + } + + sks, err := c.ReconcileSKS(ctx, pa, nv1alpha1.SKSOperationModeServe) + if err != nil { + return fmt.Errorf("error reconciling SKS: %w", err) + } + + // Only create metrics service and metric entity if we actually need to gather metrics. + pa.Status.MetricsServiceName = sks.Status.PrivateServiceName + if pa.Status.MetricsServiceName != "" && pa.Metric() == autoscaling.Concurrency || pa.Metric() == autoscaling.RPS { + if err := c.ReconcileMetric(ctx, pa, pa.Status.MetricsServiceName); err != nil { + return fmt.Errorf("error reconciling metric: %w", err) + } + } + + // Propagate the service name regardless of the status. + pa.Status.ServiceName = sks.Status.ServiceName + if !sks.Status.IsReady() { + pa.Status.MarkInactive("ServicesNotReady", "SKS Services are not ready yet") + } else { + pa.Status.MarkActive() + } + + // Metrics services are no longer needed as we use the private services now. + if err := c.DeleteMetricsServices(ctx, pa); err != nil { + return err + } + + pa.Status.ObservedGeneration = pa.Generation + pa.Status.DesiredScale = ptr.Int32(hpa.Status.DesiredReplicas) + pa.Status.ActualScale = ptr.Int32(hpa.Status.CurrentReplicas) + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa_test.go new file mode 100644 index 0000000000..c75adfdd82 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/hpa_test.go @@ -0,0 +1,585 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hpa + +import ( + "context" + "testing" + + // Inject our fake informers + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + _ "knative.dev/pkg/client/injection/kube/informers/autoscaling/v2beta1/horizontalpodautoscaler/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + "knative.dev/pkg/ptr" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + _ "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake" + _ "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake" + fakepainformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake" + _ "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake" + + appsv1 "k8s.io/api/apps/v1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ktesting "k8s.io/client-go/testing" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/autoscaling" + asv1a1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/reconciler" + areconciler "knative.dev/serving/pkg/reconciler/autoscaling" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + "knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources" + aresources "knative.dev/serving/pkg/reconciler/autoscaling/resources" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing" +) + +const ( + testNamespace = "test-namespace" + testRevision = "test-revision" +) + +func TestControllerCanReconcile(t *testing.T) { + ctx, _ := SetupFakeContext(t) + ctl := NewController(ctx, configmap.NewStaticWatcher(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: autoscaler.ConfigName, + }, + Data: map[string]string{}, + })) + + podAutoscaler := pa(testNamespace, testRevision, WithHPAClass) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(podAutoscaler) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(podAutoscaler) + + err := ctl.Reconciler.Reconcile(context.Background(), testNamespace+"/"+testRevision) + if err != nil { + t.Errorf("Reconcile() = %v", err) + } + + _, err = fakekubeclient.Get(ctx).AutoscalingV2beta1().HorizontalPodAutoscalers(testNamespace).Get(testRevision, metav1.GetOptions{}) + if err != nil { + t.Errorf("error getting hpa: %v", err) + } +} + +func TestReconcile(t *testing.T) { + const ( + deployName = testRevision + "-deployment" + privateSvc = testRevision + "-private" + ) + + table := TableTest{{ + Name: "no op", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + pa(testNamespace, testRevision, WithHPAClass, WithTraffic, WithPAStatusService(testRevision), + WithPAMetricsService(privateSvc), withScales(0, 0)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }, + Key: key(testNamespace, testRevision), + }, { + Name: "metric-change", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency))), + pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency), + WithTraffic, WithPAStatusService(testRevision), WithPAMetricsService(privateSvc), withScales(0, 0)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metric(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency)), testRevision+"-metrics2"), + }, + Key: key(testNamespace, testRevision), + WantCreates: []runtime.Object{}, + WantUpdates: []ktesting.UpdateActionImpl{{ + Object: metric(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency)), privateSvc), + }}, + }, { + Name: "create hpa & sks", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass), + deploy(testNamespace, testRevision), + }, + Key: key(testNamespace, testRevision), + WantCreates: []runtime.Object{ + sks(testNamespace, testRevision, WithDeployRef(deployName)), + hpa(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation("cpu"))), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, withScales(0, 0), + WithNoTraffic("ServicesNotReady", "SKS Services are not ready yet")), + }}, + }, { + Name: "create metric when Concurrency used", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, + WithMetricAnnotation(autoscaling.Concurrency), withScales(0, 0)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithPrivateService), + }, + Key: key(testNamespace, testRevision), + WantCreates: []runtime.Object{ + metric(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency)), privateSvc), + hpa(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency))), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency), + WithNoTraffic("ServicesNotReady", "SKS Services are not ready yet"), withScales(0, 0), + WithPAMetricsService(privateSvc)), + }}, + }, { + Name: "create metric when RPS used", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.RPS)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithPrivateService), + }, + Key: key(testNamespace, testRevision), + WantCreates: []runtime.Object{ + metric(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.RPS)), privateSvc), + hpa(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.RPS))), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.RPS), + WithNoTraffic("ServicesNotReady", "SKS Services are not ready yet"), withScales(0, 0), + WithPAMetricsService(privateSvc)), + }}, + }, { + Name: "reconcile sks is still not ready", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + pa(testNamespace, testRevision, WithHPAClass), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithPubService, + WithPrivateService), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, WithTraffic, withScales(0, 0), + WithNoTraffic("ServicesNotReady", "SKS Services are not ready yet"), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + Key: key(testNamespace, testRevision), + }, { + Name: "reconcile sks becomes ready", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + pa(testNamespace, testRevision, WithHPAClass, WithPAStatusService("the-wrong-one")), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, withScales(0, 0), + WithTraffic, WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + Key: key(testNamespace, testRevision), + }, { + Name: "reconcile sks", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu")), withHPAScaleStatus(5, 3)), + pa(testNamespace, testRevision, WithHPAClass, withScales(1, 4), WithTraffic), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef("bar"), WithSKSReady), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, WithTraffic, withScales(5, 3), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + Key: key(testNamespace, testRevision), + WantUpdates: []ktesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }}, + }, { + Name: "reconcile unhappy sks", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + pa(testNamespace, testRevision, WithHPAClass, WithTraffic), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName+"-hairy"), + WithPubService, WithPrivateService), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, withScales(0, 0), + WithNoTraffic("ServicesNotReady", "SKS Services are not ready yet"), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + Key: key(testNamespace, testRevision), + WantUpdates: []ktesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName), + WithPubService, WithPrivateService), + }}, + }, { + Name: "reconcile sks - update fails", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, WithTraffic, withScales(0, 0)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef("bar"), WithSKSReady), + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + }, + Key: key(testNamespace, testRevision), + WithReactors: []ktesting.ReactionFunc{ + InduceFailure("update", "serverlessservices"), + }, + WantErr: true, + WantUpdates: []ktesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "error reconciling SKS: error updating SKS test-revision: inducing failure for update serverlessservices"), + }, + }, { + Name: "create sks - create fails", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, withScales(0, 0), WithTraffic), + deploy(testNamespace, testRevision), + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + }, + Key: key(testNamespace, testRevision), + WithReactors: []ktesting.ReactionFunc{ + InduceFailure("create", "serverlessservices"), + }, + WantErr: true, + WantCreates: []runtime.Object{ + sks(testNamespace, testRevision, WithDeployRef(deployName)), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "error reconciling SKS: error creating SKS test-revision: inducing failure for create serverlessservices"), + }, + }, { + Name: "sks is disowned", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSOwnersRemoved, WithSKSReady), + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + }, + Key: key(testNamespace, testRevision), + WantErr: true, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, MarkResourceNotOwnedByPA("ServerlessService", testRevision)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `error reconciling SKS: PA: test-revision does not own SKS: test-revision`), + }, + }, { + Name: "pa is disowned", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName)), + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"), WithPAOwnersRemoved), withHPAOwnersRemoved), + }, + Key: key(testNamespace, testRevision), + WantErr: true, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, MarkResourceNotOwnedByPA("HorizontalPodAutoscaler", testRevision)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `PodAutoscaler: "test-revision" does not own HPA: "test-revision"`), + }, + }, { + Name: "metric is disowned", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency))), + pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency), + WithPAMetricsService(privateSvc), WithTraffic, WithPAStatusService(testRevision)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metric(pa(testNamespace, testRevision, + WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency)), privateSvc, WithMetricOwnersRemoved), + }, + Key: key(testNamespace, testRevision), + WantErr: true, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation(autoscaling.Concurrency), + WithPAMetricsService(privateSvc), WithTraffic, WithPAStatusService(testRevision), + MarkResourceNotOwnedByPA("Metric", testRevision)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `error reconciling metric: PA: test-revision does not own Metric: test-revision`), + }, + }, { + Name: "nop deletion reconcile", + // Test that with a DeletionTimestamp we do nothing. + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, WithPADeletionTimestamp), + deploy(testNamespace, testRevision), + }, + Key: key(testNamespace, testRevision), + }, { + Name: "update pa fails", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu")), withHPAScaleStatus(19, 18)), + pa(testNamespace, testRevision, WithHPAClass, WithPAStatusService("the-wrong-one"), withScales(42, 84)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, withScales(19, 18), + WithTraffic, WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + Key: key(testNamespace, testRevision), + WantErr: true, + WithReactors: []ktesting.ReactionFunc{ + InduceFailure("update", "podautoscalers"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", `Failed to update status for PA "test-revision": inducing failure for update podautoscalers`), + }, + }, { + Name: "update hpa fails", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, WithTraffic, withScales(0, 0), + WithPAStatusService(testRevision), WithTargetAnnotation("1")), + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + deploy(testNamespace, testRevision), + }, + Key: key(testNamespace, testRevision), + WantUpdates: []ktesting.UpdateActionImpl{{ + Object: hpa(pa(testNamespace, testRevision, WithHPAClass, WithTargetAnnotation("1"), WithMetricAnnotation("cpu"))), + }}, + WantErr: true, + WithReactors: []ktesting.ReactionFunc{ + InduceFailure("update", "horizontalpodautoscalers"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "failed to update HPA: inducing failure for update horizontalpodautoscalers"), + }, + }, { + Name: "update hpa with target usage", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass, WithTraffic, withScales(0, 0), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc), WithTargetAnnotation("1")), + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }, + Key: key(testNamespace, testRevision), + WantUpdates: []ktesting.UpdateActionImpl{{ + Object: hpa(pa(testNamespace, testRevision, WithHPAClass, WithTargetAnnotation("1"), WithMetricAnnotation("cpu"))), + }}, + }, { + Name: "invalid key", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, WithHPAClass), + }, + Key: "sandwich///", + }, { + Name: "failure to create HPA", + Objects: []runtime.Object{ + pa(testNamespace, testRevision, withScales(0, 0), WithHPAClass), + deploy(testNamespace, testRevision), + }, + Key: key(testNamespace, testRevision), + WantCreates: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + }, + WithReactors: []ktesting.ReactionFunc{ + InduceFailure("create", "horizontalpodautoscalers"), + }, + WantStatusUpdates: []ktesting.UpdateActionImpl{{ + Object: pa(testNamespace, testRevision, WithHPAClass, withScales(0, 0), + WithNoTraffic( + "FailedCreate", `Failed to create HorizontalPodAutoscaler "test-revision".`)), + }}, + WantErr: true, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create HPA: inducing failure for create horizontalpodautoscalers"), + }, + }, { + Name: "remove metric service", + Objects: []runtime.Object{ + hpa(pa(testNamespace, testRevision, WithHPAClass, WithMetricAnnotation("cpu"))), + pa(testNamespace, testRevision, WithHPAClass, WithTraffic, + withScales(0, 0), WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + deploy(testNamespace, testRevision), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metricService(pa(testNamespace, testRevision)), + }, + WantDeletes: []ktesting.DeleteActionImpl{{ + Name: testRevision + "-bogus", + ActionImpl: ktesting.ActionImpl{ + Namespace: testNamespace, + Verb: "delete", + }, + }}, + Key: key(testNamespace, testRevision), + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + ctx = podscalable.WithDuck(ctx) + + return &Reconciler{ + Base: &areconciler.Base{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + PALister: listers.GetPodAutoscalerLister(), + SKSLister: listers.GetServerlessServiceLister(), + MetricLister: listers.GetMetricLister(), + ConfigStore: &testConfigStore{config: defaultConfig()}, + ServiceLister: listers.GetK8sServiceLister(), + PSInformerFactory: podscalable.Get(ctx), + }, + hpaLister: listers.GetHorizontalPodAutoscalerLister(), + } + })) +} + +func sks(ns, n string, so ...SKSOption) *nv1a1.ServerlessService { + hpa := pa(ns, n, WithHPAClass) + s := aresources.MakeSKS(hpa, nv1a1.SKSOperationModeServe) + for _, opt := range so { + opt(s) + } + return s +} + +func key(namespace, name string) string { + return namespace + "/" + name +} + +func pa(namespace, name string, options ...PodAutoscalerOption) *asv1a1.PodAutoscaler { + pa := &asv1a1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: asv1a1.PodAutoscalerSpec{ + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: name + "-deployment", + }, + ProtocolType: networking.ProtocolHTTP1, + }, + } + for _, opt := range options { + opt(pa) + } + return pa +} + +type hpaOption func(*autoscalingv2beta1.HorizontalPodAutoscaler) + +func withHPAOwnersRemoved(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) { + hpa.OwnerReferences = nil +} + +func withScales(d, a int32) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.DesiredScale, pa.Status.ActualScale = ptr.Int32(d), ptr.Int32(a) + } +} +func withHPAScaleStatus(d, a int32) hpaOption { + return func(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) { + hpa.Status.DesiredReplicas, hpa.Status.CurrentReplicas = d, a + } +} + +func hpa(pa *asv1a1.PodAutoscaler, options ...hpaOption) *autoscalingv2beta1.HorizontalPodAutoscaler { + h := resources.MakeHPA(pa, defaultConfig().Autoscaler) + for _, o := range options { + o(h) + } + return h +} + +type deploymentOption func(*appsv1.Deployment) + +func deploy(namespace, name string, opts ...deploymentOption) *appsv1.Deployment { + s := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-deployment", + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "b", + }, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 42, + }, + } + for _, opt := range opts { + opt(s) + } + return s +} + +type metricOption func(*asv1a1.Metric) + +func metric(pa *asv1a1.PodAutoscaler, msvcName string, opts ...metricOption) *asv1a1.Metric { + m := aresources.MakeMetric(context.Background(), pa, msvcName, defaultConfig().Autoscaler) + for _, o := range opts { + o(m) + } + return m +} + +// TODO(5900): Remove after 0.12 is cut. +func metricService(pa *asv1a1.PodAutoscaler) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: pa.Name + "-bogus", + Namespace: pa.Namespace, + Labels: map[string]string{ + autoscaling.KPALabelKey: pa.Name, + networking.ServiceTypeKey: string(networking.ServiceTypeMetrics), + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pa)}, + }, + } +} + +func defaultConfig() *config.Config { + autoscalerConfig, _ := autoscaler.NewConfigFromMap(nil) + return &config.Config{ + Autoscaler: autoscalerConfig, + } +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ reconciler.ConfigStore = (*testConfigStore)(nil) diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa.go new file mode 100644 index 0000000000..68d8c2807f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "math" + + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + aresources "knative.dev/serving/pkg/reconciler/autoscaling/resources" +) + +// MakeHPA creates an HPA resource from a PA resource. +func MakeHPA(pa *v1alpha1.PodAutoscaler, config *autoscaler.Config) *autoscalingv2beta1.HorizontalPodAutoscaler { + min, max := pa.ScaleBounds() + if max == 0 { + max = math.MaxInt32 // default to no limit + } + hpa := &autoscalingv2beta1.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: pa.Name, + Namespace: pa.Namespace, + Labels: pa.Labels, + Annotations: pa.Annotations, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pa)}, + }, + Spec: autoscalingv2beta1.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: pa.Spec.ScaleTargetRef.APIVersion, + Kind: pa.Spec.ScaleTargetRef.Kind, + Name: pa.Spec.ScaleTargetRef.Name, + }, + }, + } + hpa.Spec.MaxReplicas = max + if min > 0 { + hpa.Spec.MinReplicas = &min + } + + switch pa.Metric() { + case autoscaling.CPU: + if target, ok := pa.Target(); ok { + hpa.Spec.Metrics = []autoscalingv2beta1.MetricSpec{{ + Type: autoscalingv2beta1.ResourceMetricSourceType, + Resource: &autoscalingv2beta1.ResourceMetricSource{ + Name: corev1.ResourceCPU, + TargetAverageUtilization: ptr.Int32(int32(math.Ceil(target))), + }, + }} + } + case autoscaling.Concurrency, autoscaling.RPS: + t, _ := aresources.ResolveMetricTarget(pa, config) + target := int64(math.Ceil(t)) + hpa.Spec.Metrics = []autoscalingv2beta1.MetricSpec{{ + Type: autoscalingv2beta1.ObjectMetricSourceType, + Object: &autoscalingv2beta1.ObjectMetricSource{ + Target: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: servingv1alpha1.SchemeGroupVersion.String(), + Kind: "revision", + Name: pa.Name, + }, + MetricName: pa.Metric(), + AverageValue: resource.NewQuantity(target, resource.DecimalSI), + TargetValue: *resource.NewQuantity(target, resource.DecimalSI), + }, + }} + } + return hpa +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa_test.go new file mode 100644 index 0000000000..319204c741 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/hpa/resources/hpa_test.go @@ -0,0 +1,278 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "math" + "testing" + "time" + + "knative.dev/pkg/kmp" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "knative.dev/serving/pkg/testing" +) + +const ( + testNamespace = "test-namespace" + testName = "test-name" +) + +func TestMakeHPA(t *testing.T) { + cases := []struct { + name string + pa *v1alpha1.PodAutoscaler + want *autoscalingv2beta1.HorizontalPodAutoscaler + }{{ + name: "defaults", + pa: pa(), + want: hpa(), + }, { + name: "with lower bound", + pa: pa(WithLowerScaleBound(5)), + want: hpa(withMinReplicas(5), withAnnotationValue(autoscaling.MinScaleAnnotationKey, "5")), + }, { + name: "with upper bound", + pa: pa(WithUpperScaleBound(5)), + want: hpa(withMaxReplicas(5), withAnnotationValue(autoscaling.MaxScaleAnnotationKey, "5")), + }, { + name: "with an actual target", + pa: pa(WithTargetAnnotation("50"), WithMetricAnnotation(autoscaling.CPU)), + want: hpa( + withAnnotationValue(autoscaling.MetricAnnotationKey, autoscaling.CPU), + withAnnotationValue(autoscaling.TargetAnnotationKey, "50"), + withMetric(autoscalingv2beta1.MetricSpec{ + Type: autoscalingv2beta1.ResourceMetricSourceType, + Resource: &autoscalingv2beta1.ResourceMetricSource{ + Name: corev1.ResourceCPU, + TargetAverageUtilization: ptr.Int32(50), + }, + })), + }, { + name: "with an actual fractional target", + pa: pa(WithTargetAnnotation("1982.4"), WithMetricAnnotation(autoscaling.CPU)), + want: hpa( + withAnnotationValue(autoscaling.MetricAnnotationKey, autoscaling.CPU), + withAnnotationValue(autoscaling.TargetAnnotationKey, "1982.4"), + withMetric(autoscalingv2beta1.MetricSpec{ + Type: autoscalingv2beta1.ResourceMetricSourceType, + Resource: &autoscalingv2beta1.ResourceMetricSource{ + Name: corev1.ResourceCPU, + TargetAverageUtilization: ptr.Int32(1983), + }, + })), + }, { + name: "with metric=concurrency", + pa: pa(WithMetricAnnotation(autoscaling.Concurrency)), + want: hpa( + withAnnotationValue(autoscaling.MetricAnnotationKey, autoscaling.Concurrency), + withMetric(autoscalingv2beta1.MetricSpec{ + Type: autoscalingv2beta1.ObjectMetricSourceType, + Object: &autoscalingv2beta1.ObjectMetricSource{ + Target: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: servingv1alpha1.SchemeGroupVersion.String(), + Kind: "revision", + Name: testName, + }, + MetricName: autoscaling.Concurrency, + AverageValue: resource.NewQuantity(100, resource.DecimalSI), + TargetValue: *resource.NewQuantity(100, resource.DecimalSI), + }, + })), + }, { + name: "with metric=concurrency and target=50", + pa: pa(WithTargetAnnotation("50"), WithMetricAnnotation(autoscaling.Concurrency)), + want: hpa( + withAnnotationValue(autoscaling.MetricAnnotationKey, autoscaling.Concurrency), + withAnnotationValue(autoscaling.TargetAnnotationKey, "50"), + withMetric(autoscalingv2beta1.MetricSpec{ + Type: autoscalingv2beta1.ObjectMetricSourceType, + Object: &autoscalingv2beta1.ObjectMetricSource{ + Target: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: servingv1alpha1.SchemeGroupVersion.String(), + Kind: "revision", + Name: testName, + }, + MetricName: autoscaling.Concurrency, + AverageValue: resource.NewQuantity(50, resource.DecimalSI), + TargetValue: *resource.NewQuantity(50, resource.DecimalSI), + }, + })), + }, { + name: "with metric=RPS", + pa: pa(WithMetricAnnotation(autoscaling.RPS)), + want: hpa( + withAnnotationValue(autoscaling.MetricAnnotationKey, autoscaling.RPS), + withMetric(autoscalingv2beta1.MetricSpec{ + Type: autoscalingv2beta1.ObjectMetricSourceType, + Object: &autoscalingv2beta1.ObjectMetricSource{ + Target: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: servingv1alpha1.SchemeGroupVersion.String(), + Kind: "revision", + Name: testName, + }, + MetricName: autoscaling.RPS, + AverageValue: resource.NewQuantity(200, resource.DecimalSI), + TargetValue: *resource.NewQuantity(200, resource.DecimalSI), + }, + })), + }, { + name: "with metric=RPS and target=50", + pa: pa(WithTargetAnnotation("50"), WithMetricAnnotation(autoscaling.RPS)), + want: hpa( + withAnnotationValue(autoscaling.MetricAnnotationKey, autoscaling.RPS), + withAnnotationValue(autoscaling.TargetAnnotationKey, "50"), + withMetric(autoscalingv2beta1.MetricSpec{ + Type: autoscalingv2beta1.ObjectMetricSourceType, + Object: &autoscalingv2beta1.ObjectMetricSource{ + Target: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: servingv1alpha1.SchemeGroupVersion.String(), + Kind: "revision", + Name: testName, + }, + MetricName: autoscaling.RPS, + AverageValue: resource.NewQuantity(50, resource.DecimalSI), + TargetValue: *resource.NewQuantity(50, resource.DecimalSI), + }, + })), + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := MakeHPA(tc.pa, config) + if equal, err := kmp.SafeEqual(tc.want, got); err != nil { + t.Errorf("Got error comparing output, err = %v", err) + } else if !equal { + if diff, err := kmp.SafeDiff(tc.want, got); err != nil { + t.Errorf("Got error diffing output, err = %v", err) + } else { + t.Errorf("MakeHPA() = (-want, +got):\n%v", diff) + } + } + }) + } +} + +func pa(options ...PodAutoscalerOption) *v1alpha1.PodAutoscaler { + p := &v1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testName, + UID: "2006", + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.HPA, + }, + }, + Spec: v1alpha1.PodAutoscalerSpec{ + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps", + Kind: "Deployment", + Name: "some-name", + }, + }, + } + for _, fn := range options { + fn(p) + } + return p +} + +func hpa(options ...hpaOption) *autoscalingv2beta1.HorizontalPodAutoscaler { + h := &autoscalingv2beta1.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: testName, + Namespace: testNamespace, + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.HPA, + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "PodAutoscaler", + Name: testName, + UID: "2006", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: autoscalingv2beta1.HorizontalPodAutoscalerSpec{ + MaxReplicas: math.MaxInt32, + ScaleTargetRef: autoscalingv2beta1.CrossVersionObjectReference{ + APIVersion: "apps", + Kind: "Deployment", + Name: "some-name", + }, + }, + } + + for _, o := range options { + o(h) + } + return h +} + +type hpaOption func(*autoscalingv2beta1.HorizontalPodAutoscaler) + +func withAnnotationValue(key, value string) hpaOption { + return func(pa *autoscalingv2beta1.HorizontalPodAutoscaler) { + if pa.Annotations == nil { + pa.Annotations = make(map[string]string) + } + pa.Annotations[key] = value + } +} + +func withMinReplicas(i int) hpaOption { + return func(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) { + hpa.Spec.MinReplicas = ptr.Int32(int32(i)) + } +} + +func withMaxReplicas(i int) hpaOption { + return func(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) { + hpa.Spec.MaxReplicas = int32(i) + } +} + +func withMetric(m autoscalingv2beta1.MetricSpec) hpaOption { + return func(hpa *autoscalingv2beta1.HorizontalPodAutoscaler) { + hpa.Spec.Metrics = []autoscalingv2beta1.MetricSpec{m} + } +} + +var config = &autoscaler.Config{ + EnableScaleToZero: true, + ContainerConcurrencyTargetFraction: 1.0, + ContainerConcurrencyTargetDefault: 100.0, + RPSTargetDefault: 200.0, + TargetUtilization: 1.0, + MaxScaleUpRate: 10.0, + StableWindow: 60 * time.Second, + PanicThresholdPercentage: 200, + PanicWindowPercentage: 10, + TickInterval: 2 * time.Second, + ScaleToZeroGracePeriod: 30 * time.Second, +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/controller.go new file mode 100644 index 0000000000..53178cc4b2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/controller.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kpa + +import ( + "context" + + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + metricinformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric" + painformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler" + sksinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/reconciler" + areconciler "knative.dev/serving/pkg/reconciler/autoscaling" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + "knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources" +) + +const controllerAgentName = "kpa-class-podautoscaler-controller" + +// NewController returns a new KPA reconcile controller. +// TODO(mattmoor): Fix the signature to adhere to the injection type. +func NewController( + ctx context.Context, + cmw configmap.Watcher, + deciders resources.Deciders, +) *controller.Impl { + + paInformer := painformer.Get(ctx) + sksInformer := sksinformer.Get(ctx) + serviceInformer := serviceinformer.Get(ctx) + endpointsInformer := endpointsinformer.Get(ctx) + metricInformer := metricinformer.Get(ctx) + psInformerFactory := podscalable.Get(ctx) + + c := &Reconciler{ + Base: &areconciler.Base{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + PALister: paInformer.Lister(), + SKSLister: sksInformer.Lister(), + ServiceLister: serviceInformer.Lister(), + MetricLister: metricInformer.Lister(), + PSInformerFactory: psInformerFactory, + }, + endpointsLister: endpointsInformer.Lister(), + deciders: deciders, + } + impl := controller.NewImpl(c, c.Logger, "KPA-Class Autoscaling") + c.scaler = newScaler(ctx, psInformerFactory, impl.EnqueueAfter) + + c.Logger.Info("Setting up KPA-Class event handlers") + // Handle only PodAutoscalers that have KPA annotation. + onlyKpaClass := reconciler.AnnotationFilterFunc( + autoscaling.ClassAnnotationKey, autoscaling.KPA, false /*allowUnset*/) + paHandler := cache.FilteringResourceEventHandler{ + FilterFunc: onlyKpaClass, + Handler: controller.HandleAll(impl.Enqueue), + } + paInformer.Informer().AddEventHandler(paHandler) + + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: reconciler.LabelExistsFilterFunc(autoscaling.KPALabelKey), + Handler: controller.HandleAll(impl.EnqueueLabelOfNamespaceScopedResource("", autoscaling.KPALabelKey)), + }) + + // Watch all the services that we have created. + serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: onlyKpaClass, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + sksInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: onlyKpaClass, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + metricInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: onlyKpaClass, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + // Have the Deciders enqueue the PAs whose decisions have changed. + deciders.Watch(impl.EnqueueKey) + + c.Logger.Info("Setting up ConfigMap receivers") + configsToResync := []interface{}{ + &autoscaler.Config{}, + } + resync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + impl.FilteredGlobalResync(onlyKpaClass, paInformer.Informer()) + }) + configStore := config.NewStore(c.Logger.Named("config-store"), resync) + configStore.WatchConfigs(cmw) + c.ConfigStore = configStore + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/doc.go new file mode 100644 index 0000000000..cbdc01f80f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +Package kpa implements a kubernetes controller which tracks revisions and +notifies a callback interface. + +*/ +package kpa diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa.go new file mode 100644 index 0000000000..7846e3e858 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa.go @@ -0,0 +1,300 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kpa + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/ptr" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + nv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/autoscaler" + areconciler "knative.dev/serving/pkg/reconciler/autoscaling" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + "knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources" + anames "knative.dev/serving/pkg/reconciler/autoscaling/resources/names" + resourceutil "knative.dev/serving/pkg/resources" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" +) + +// Reconciler tracks PAs and right sizes the ScaleTargetRef based on the +// information from Deciders. +type Reconciler struct { + *areconciler.Base + endpointsLister corev1listers.EndpointsLister + deciders resources.Deciders + scaler *scaler +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile right sizes PA ScaleTargetRefs based on the state of decisions in Deciders. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.ConfigStore.ToContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + logger.Debug("Reconcile kpa-class PodAutoscaler") + + original, err := c.PALister.PodAutoscalers(namespace).Get(name) + if errors.IsNotFound(err) { + logger.Info("PA in work queue no longer exists") + if err := c.deciders.Delete(ctx, namespace, name); err != nil { + return err + } + return nil + } else if err != nil { + return err + } + + // Don't modify the informer's copy. + pa := original.DeepCopy() + + // Reconcile this copy of the pa and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := c.reconcile(ctx, pa) + if equality.Semantic.DeepEqual(original.Status, pa.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err = c.UpdateStatus(original, pa); err != nil { + logger.Warnw("Failed to update pa status", zap.Error(err)) + c.Recorder.Eventf(pa, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for PA %q: %v", pa.Name, err) + return err + } + if reconcileErr != nil { + c.Recorder.Event(pa, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + } + return reconcileErr +} + +func (c *Reconciler) reconcile(ctx context.Context, pa *pav1alpha1.PodAutoscaler) error { + logger := logging.FromContext(ctx) + + if pa.GetDeletionTimestamp() != nil { + return nil + } + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + pa.SetDefaults(ctx) + + pa.Status.InitializeConditions() + logger.Debug("PA exists") + + // We need the SKS object in order to optimize scale to zero + // performance. It is OK if SKS is nil at this point. + sksName := anames.SKS(pa.Name) + sks, err := c.SKSLister.ServerlessServices(pa.Namespace).Get(sksName) + if err != nil && !errors.IsNotFound(err) { + logger.Warnw("Error retrieving SKS for Scaler", zap.Error(err)) + } + + // Having an SKS and its PrivateServiceName is a prerequisite for all upcoming steps. + if sks == nil || (sks != nil && sks.Status.PrivateServiceName == "") { + if _, err = c.ReconcileSKS(ctx, pa, nv1alpha1.SKSOperationModeServe); err != nil { + return fmt.Errorf("error reconciling SKS: %w", err) + } + return computeStatus(pa, scaleUnknown, 0) + } + + pa.Status.MetricsServiceName = sks.Status.PrivateServiceName + decider, err := c.reconcileDecider(ctx, pa, pa.Status.MetricsServiceName) + if err != nil { + return fmt.Errorf("error reconciling Decider: %w", err) + } + + if err := c.ReconcileMetric(ctx, pa, pa.Status.MetricsServiceName); err != nil { + return fmt.Errorf("error reconciling Metric: %w", err) + } + + // Metrics services are no longer needed as we use the private services now. + if err := c.DeleteMetricsServices(ctx, pa); err != nil { + return err + } + + // Get the appropriate current scale from the metric, and right size + // the scaleTargetRef based on it. + want, err := c.scaler.Scale(ctx, pa, sks, decider.Status.DesiredScale) + if err != nil { + return fmt.Errorf("error scaling target: %w", err) + } + + mode := nv1alpha1.SKSOperationModeServe + // We put activator in the serving path in the following cases: + // 1. The revision is scaled to 0: + // a. want == 0 + // b. want == -1 && PA is inactive (Autoscaler has no previous knowledge of + // this revision, e.g. after a restart) but PA status is inactive (it was + // already scaled to 0). + // 2. The excess burst capacity is negative. + if want == 0 || decider.Status.ExcessBurstCapacity < 0 || want == -1 && pa.Status.IsInactive() { + logger.Infof("SKS should be in proxy mode: want = %d, ebc = %d, PA Inactive? = %v", + want, decider.Status.ExcessBurstCapacity, pa.Status.IsInactive()) + mode = nv1alpha1.SKSOperationModeProxy + } + + sks, err = c.ReconcileSKS(ctx, pa, mode) + if err != nil { + return fmt.Errorf("error reconciling SKS: %w", err) + } + + // Compare the desired and observed resources to determine our situation. + // We fetch private endpoints here, since for scaling we're interested in the actual + // state of the deployment. + got := 0 + + // Propagate service name. + pa.Status.ServiceName = sks.Status.ServiceName + // Currently, SKS.IsReady==True when revision has >0 ready pods. + if sks.Status.IsReady() { + podCounter := resourceutil.NewScopedEndpointsCounter(c.endpointsLister, pa.Namespace, sks.Status.PrivateServiceName) + got, err = podCounter.ReadyCount() + if err != nil { + return fmt.Errorf("error checking endpoints %s: %w", sks.Status.PrivateServiceName, err) + } + } + logger.Infof("PA scale got=%d, want=%d, ebc=%d", got, want, decider.Status.ExcessBurstCapacity) + return computeStatus(pa, want, got) +} + +func (c *Reconciler) reconcileDecider(ctx context.Context, pa *pav1alpha1.PodAutoscaler, k8sSvc string) (*autoscaler.Decider, error) { + desiredDecider := resources.MakeDecider(ctx, pa, config.FromContext(ctx).Autoscaler, k8sSvc) + decider, err := c.deciders.Get(ctx, desiredDecider.Namespace, desiredDecider.Name) + if errors.IsNotFound(err) { + decider, err = c.deciders.Create(ctx, desiredDecider) + if err != nil { + return nil, fmt.Errorf("error creating Decider: %w", err) + } + } else if err != nil { + return nil, fmt.Errorf("error fetching Decider: %w", err) + } + + // Ignore status when reconciling + desiredDecider.Status = decider.Status + if !equality.Semantic.DeepEqual(desiredDecider, decider) { + decider, err = c.deciders.Update(ctx, desiredDecider) + if err != nil { + return nil, fmt.Errorf("error updating decider: %w", err) + } + } + + return decider, nil +} + +func computeStatus(pa *pav1alpha1.PodAutoscaler, want int32, got int) error { + pa.Status.DesiredScale, pa.Status.ActualScale = &want, ptr.Int32(int32(got)) + + if err := reportMetrics(pa, want, got); err != nil { + return fmt.Errorf("error reporting metrics: %w", err) + } + + computeActiveCondition(pa, want, got) + + pa.Status.ObservedGeneration = pa.Generation + return nil +} + +func reportMetrics(pa *pav1alpha1.PodAutoscaler, want int32, got int) error { + var serviceLabel string + var configLabel string + if pa.Labels != nil { + serviceLabel = pa.Labels[serving.ServiceLabelKey] + configLabel = pa.Labels[serving.ConfigurationLabelKey] + } + reporter, err := autoscaler.NewStatsReporter(pa.Namespace, serviceLabel, configLabel, pa.Name) + if err != nil { + return err + } + + reporter.ReportActualPodCount(int64(got)) + // Negative "want" values represent an empty metrics pipeline and thus no specific request is being made. + if want >= 0 { + reporter.ReportRequestedPodCount(int64(want)) + } + return nil +} + +// computeActiveCondition updates the status of a PA given the current scale (got), desired scale (want) +// and the current status, as per the following table: +// +// | Want | Got | Status | New status | +// | 0 | | | inactive | +// | >0 | < min | | activating | +// | >0 | >= min | | active | +// | -1 | < min | inactive | inactive | +// | -1 | < min | activating | activating | +// | -1 | < min | active | activating | +// | -1 | >= min | inactive | inactive | +// | -1 | >= min | activating | active | +// | -1 | >= min | active | active | +func computeActiveCondition(pa *pav1alpha1.PodAutoscaler, want int32, got int) { + minReady := activeThreshold(pa) + + switch { + case want == 0: + if pa.Status.IsActivating() { + // We only ever scale to zero while activating if we fail to activate within the progress deadline. + pa.Status.MarkInactive("TimedOut", "The target could not be activated.") + } else { + pa.Status.MarkInactive("NoTraffic", "The target is not receiving traffic.") + } + + case got < minReady: + if want > 0 || !pa.Status.IsInactive() { + pa.Status.MarkActivating( + "Queued", "Requests to the target are being buffered as resources are provisioned.") + } + + case got >= minReady: + if want > 0 || !pa.Status.IsInactive() { + // SKS should already be active. + pa.Status.MarkActive() + } + } +} + +// activeThreshold returns the scale required for the pa to be marked Active +func activeThreshold(pa *pav1alpha1.PodAutoscaler) int { + min, _ := pa.ScaleBounds() + if min < 1 { + min = 1 + } + + return int(min) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa_test.go new file mode 100644 index 0000000000..1ac81d5f01 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/kpa_test.go @@ -0,0 +1,1538 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kpa + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + "sync" + "testing" + "time" + + // These are the fake informers we want setup. + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakeendpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + fakedynamicclient "knative.dev/pkg/injection/clients/dynamicclient/fake" + "knative.dev/pkg/kmeta" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + _ "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake" + fakemetricinformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake" + fakepainformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake" + fakesksinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + "knative.dev/serving/pkg/reconciler" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/google/go-cmp/cmp" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" + "knative.dev/serving/pkg/apis/autoscaling" + asv1a1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + areconciler "knative.dev/serving/pkg/reconciler/autoscaling" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + "knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources" + aresources "knative.dev/serving/pkg/reconciler/autoscaling/resources" + revisionresources "knative.dev/serving/pkg/reconciler/revision/resources" + presources "knative.dev/serving/pkg/resources" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing" +) + +const ( + gracePeriod = 60 * time.Second + stableWindow = 5 * time.Minute + paStableWindow = 45 * time.Second + defaultConcurrencyTarget = 10.0 + defaultTU = 0.5 +) + +func defaultConfigMapData() map[string]string { + return map[string]string{ + "max-scale-up-rate": "12.0", + "container-concurrency-target-percentage": fmt.Sprintf("%f", defaultTU), + "container-concurrency-target-default": fmt.Sprintf("%f", defaultConcurrencyTarget), + "stable-window": stableWindow.String(), + "panic-window": "10s", + "scale-to-zero-grace-period": gracePeriod.String(), + "tick-interval": "2s", + } +} + +func defaultConfig() *config.Config { + autoscalerConfig, _ := autoscaler.NewConfigFromMap(defaultConfigMapData()) + return &config.Config{ + Autoscaler: autoscalerConfig, + } +} + +func newConfigWatcher() configmap.Watcher { + return configmap.NewStaticWatcher(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: autoscaler.ConfigName, + }, + Data: defaultConfigMapData(), + }) +} + +func withScales(g, w int32) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.DesiredScale, pa.Status.ActualScale = ptr.Int32(w), ptr.Int32(g) + } +} + +func metricWithDiffSvc(ns, n string) *asv1a1.Metric { + m := metric(ns, n) + m.Spec.ScrapeTarget = "something-else" + return m +} + +type metricOption func(*asv1a1.Metric) + +func metric(ns, n string, opts ...metricOption) *asv1a1.Metric { + pa := kpa(ns, n) + m := aresources.MakeMetric(context.Background(), pa, + kmeta.ChildName(n, "-private"), defaultConfig().Autoscaler) + for _, o := range opts { + o(m) + } + return m +} + +func sks(ns, n string, so ...SKSOption) *nv1a1.ServerlessService { + kpa := kpa(ns, n) + s := aresources.MakeSKS(kpa, nv1a1.SKSOperationModeServe) + for _, opt := range so { + opt(s) + } + return s +} + +func markOld(pa *asv1a1.PodAutoscaler) { + pa.Status.Conditions[0].LastTransitionTime.Inner.Time = time.Now().Add(-1 * time.Hour) +} + +func markActivating(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkActivating("Queued", "Requests to the target are being buffered as resources are provisioned.") +} + +func markActive(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkActive() +} + +func markUnknown(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkActivating("", "") +} + +func markInactive(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkInactive("NoTraffic", "The target is not receiving traffic.") +} + +func kpa(ns, n string, opts ...PodAutoscalerOption) *asv1a1.PodAutoscaler { + rev := newTestRevision(ns, n) + kpa := revisionresources.MakePA(rev) + kpa.Annotations["autoscaling.knative.dev/class"] = "kpa.autoscaling.knative.dev" + kpa.Annotations["autoscaling.knative.dev/metric"] = "concurrency" + for _, opt := range opts { + opt(kpa) + } + return kpa +} + +func markResourceNotOwned(rType, name string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkResourceNotOwned(rType, name) + } +} + +// TODO(5900): Remove after 0.12 is cut. +func metricService(pa *asv1a1.PodAutoscaler) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: pa.Name + "-bogus", + Namespace: pa.Namespace, + Labels: map[string]string{ + autoscaling.KPALabelKey: pa.Name, + networking.ServiceTypeKey: string(networking.ServiceTypeMetrics), + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pa)}, + }, + } +} + +func TestReconcile(t *testing.T) { + const ( + deployName = testRevision + "-deployment" + privateSvc = testRevision + "-private" + defaultScale = 11 + unknownScale = scaleUnknown + underscale = defaultScale - 1 + overscale = defaultScale + 1 + ) + + // Set up a default deployment with the appropriate scale so that we don't + // see patches to correct that scale. + defaultDeployment := deploy(testNamespace, testRevision, func(d *appsv1.Deployment) { + d.Spec.Replicas = ptr.Int32(defaultScale) + }) + + // Setup underscaled and overscsaled deployment + underscaledDeployment := deploy(testNamespace, testRevision, func(d *appsv1.Deployment) { + d.Spec.Replicas = ptr.Int32(underscale) + }) + overscaledDeployment := deploy(testNamespace, testRevision, func(d *appsv1.Deployment) { + d.Spec.Replicas = ptr.Int32(overscale) + }) + + minScalePatch := clientgotesting.PatchActionImpl{ + ActionImpl: clientgotesting.ActionImpl{Namespace: testNamespace}, + Name: deployName, + Patch: []byte(fmt.Sprintf(`[{"op":"replace","path":"/spec/replicas","value":%d}]`, defaultScale)), + } + + inactiveKPAMinScale := func(g int32) *asv1a1.PodAutoscaler { + return kpa( + testNamespace, testRevision, markInactive, withScales(g, unknownScale), WithReachabilityReachable, + withMinScale(defaultScale), WithPAStatusService(testRevision), WithPAMetricsService(privateSvc), + ) + } + activatingKPAMinScale := func(g int32) *asv1a1.PodAutoscaler { + return kpa( + testNamespace, testRevision, markActivating, withScales(g, defaultScale), WithReachabilityReachable, + withMinScale(defaultScale), WithPAStatusService(testRevision), WithPAMetricsService(privateSvc), + ) + } + activeKPAMinScale := func(g, w int32) *asv1a1.PodAutoscaler { + return kpa( + testNamespace, testRevision, markActive, withScales(g, w), WithReachabilityReachable, + withMinScale(defaultScale), WithPAStatusService(testRevision), WithPAMetricsService(privateSvc), + ) + } + + defaultSKS := sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady) + defaultMetric := metric(testNamespace, testRevision) + + underscaledEndpoints := makeSKSPrivateEndpoints(underscale, testNamespace, testRevision) + overscaledEndpoints := makeSKSPrivateEndpoints(overscale, testNamespace, testRevision) + defaultEndpoints := makeSKSPrivateEndpoints(1, testNamespace, testRevision) + zeroEndpoints := makeSKSPrivateEndpoints(0, testNamespace, testRevision) + + deciderKey := struct{}{} + + // Note: due to how KPA reconciler works we are dependent on the + // two constant objects above, which means, that all tests must share + // the same namespace and revision name. + table := TableTest{{ + Name: "bad workqueue key, Part I", + Key: "too/many/parts", + }, { + Name: "bad workqueue key, Part II", + Key: "too-few-parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "steady state", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + }, { + Name: "no endpoints", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, WithPAMetricsService(privateSvc), WithPAStatusService(testRevision), + withScales(1, defaultScale)), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markUnknown, WithPAMetricsService(privateSvc), withScales(1, defaultScale), + WithPAStatusService(testRevision)), + }}, + WantErr: true, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `error checking endpoints test-revision-private: endpoints "test-revision-private" not found`), + }, + }, { + Name: "failure-creating-metric-object", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + defaultDeployment, defaultEndpoints, + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "metrics"), + }, + WantCreates: []runtime.Object{ + metric(testNamespace, testRevision), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `error reconciling Metric: error creating metric: inducing failure for create metrics`), + }, + WantErr: true, + }, { + Name: "failure-updating-metric-object", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + defaultDeployment, defaultEndpoints, + metricWithDiffSvc(testNamespace, testRevision), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "metrics"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: metric(testNamespace, testRevision), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `error reconciling Metric: error updating metric: inducing failure for update metrics`), + }, + WantErr: true, + }, { + Name: "create metric", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, + withScales(1, defaultScale), WithPAStatusService(testRevision)), + defaultSKS, + defaultDeployment, + defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActive, withScales(1, defaultScale), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + WantCreates: []runtime.Object{ + metric(testNamespace, testRevision), + }, + }, { + Name: "scale up deployment", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + defaultSKS, + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision), + defaultEndpoints, + }, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: testNamespace, + }, + Name: deployName, + Patch: []byte(`[{"op":"add","path":"/spec/replicas","value":11}]`), + }}, + }, { + Name: "scale up deployment failure", + Key: key, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("patch", "deployments"), + }, + WantErr: true, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + defaultSKS, + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision), + defaultEndpoints, + }, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: testNamespace, + }, + Name: deployName, + Patch: []byte(`[{"op":"add","path":"/spec/replicas","value":11}]`), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `error scaling target: failed to apply scale to scale target test-revision-deployment: inducing failure for patch deployments`), + }, + }, { + Name: "can't read endpoints", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, + }, + WantErr: true, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `error checking endpoints test-revision-private: endpoints "test-revision-private" not found`), + }, + }, { + Name: "pa activates", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, WithNoTraffic("NoTraffic", "The target is not receiving traffic."), + withScales(0, defaultScale), WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + // SKS is ready here, since its endpoints are populated with Activator endpoints. + sks(testNamespace, testRevision, WithProxyMode, WithDeployRef(deployName), WithSKSReady), + metric(testNamespace, testRevision), + defaultDeployment, + // When PA is passive num private endpoints must be 0. + zeroEndpoints, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithSKSReady, + WithDeployRef(deployName)), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActivating, withScales(0, defaultScale), + WithPAMetricsService(privateSvc), + WithPAStatusService(testRevision)), + }}, + }, { + Name: "sks is still not ready", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, WithTraffic, WithPAMetricsService(privateSvc), + withScales(0, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithPubService, + WithPrivateService), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActivating, withScales(0, defaultScale), + WithPAMetricsService(privateSvc), WithPAStatusService(testRevision)), + }}, + }, { + Name: "sks becomes ready", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, WithPAMetricsService(privateSvc)), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActive, WithPAStatusService(testRevision), + WithPAMetricsService(privateSvc), withScales(1, defaultScale)), + }}, + }, { + Name: "kpa does not become ready without minScale endpoints when reachable", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withMinScale(2), withScales(1, defaultScale), + WithReachabilityReachable, WithPAMetricsService(privateSvc)), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActivating, withMinScale(2), WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision), WithReachabilityReachable), + }}, + }, { + Name: "kpa does not become ready without minScale endpoints when reachability is unknown", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withMinScale(2), withScales(1, defaultScale), + WithPAMetricsService(privateSvc), WithReachabilityUnknown), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActivating, withMinScale(2), WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision), WithReachabilityUnknown), + }}, + }, { + Name: "kpa becomes ready without minScale endpoints when unreachable", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withMinScale(2), withScales(1, defaultScale), + WithPAMetricsService(privateSvc), WithReachabilityUnreachable), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActive, withMinScale(2), WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision), WithReachabilityUnreachable), + }}, + }, { + Name: "kpa becomes ready with minScale endpoints when reachable", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActivating, withMinScale(2), WithPAStatusService(testRevision), + WithPAMetricsService(privateSvc), withScales(1, defaultScale), WithReachabilityReachable), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, + makeSKSPrivateEndpoints(2, testNamespace, testRevision), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActive, withMinScale(2), WithPAMetricsService(privateSvc), + withScales(2, defaultScale), WithPAStatusService(testRevision), WithReachabilityReachable), + }}, + }, { + Name: "kpa becomes ready with minScale endpoints when reachability is unknown", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActivating, withMinScale(2), WithPAStatusService(testRevision), + WithPAMetricsService(privateSvc), withScales(1, defaultScale), WithReachabilityUnknown), + defaultSKS, + metric(testNamespace, testRevision), + defaultDeployment, + makeSKSPrivateEndpoints(2, testNamespace, testRevision), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, markActive, withMinScale(2), WithPAMetricsService(privateSvc), + withScales(2, defaultScale), WithPAStatusService(testRevision), WithReachabilityUnknown), + }}, + }, { + Name: "sks does not exist", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), withScales(1, defaultScale)), + defaultDeployment, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // SKS does not exist, so we're just creating and have no status. + Object: kpa(testNamespace, testRevision, markActivating, WithPAMetricsService(privateSvc), withScales(0, unknownScale)), + }}, + WantCreates: []runtime.Object{ + sks(testNamespace, testRevision, WithDeployRef(deployName)), + }, + }, { + Name: "sks is out of whack", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withScales(0, defaultScale), WithPAMetricsService(privateSvc), markActive), + sks(testNamespace, testRevision, WithDeployRef("bar"), WithPubService, WithPrivateService), + metric(testNamespace, testRevision), + defaultDeployment, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // SKS just got updated and we don't have up to date status. + Object: kpa(testNamespace, testRevision, markActivating, + withScales(0, defaultScale), WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithPubService, WithPrivateService, + WithDeployRef(deployName)), + }}, + }, { + Name: "sks cannot be created", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), withScales(1, defaultScale)), + metric(testNamespace, testRevision), + defaultDeployment, + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "serverlessservices"), + }, + WantErr: true, + WantCreates: []runtime.Object{ + sks(testNamespace, testRevision, WithDeployRef(deployName)), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + "error reconciling SKS: error creating SKS test-revision: inducing failure for create serverlessservices"), + }, + }, { + Name: "sks cannot be updated", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withScales(1, defaultScale), WithPAMetricsService(privateSvc), markActive), + sks(testNamespace, testRevision, WithDeployRef("bar")), + metric(testNamespace, testRevision), + defaultDeployment, + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "serverlessservices"), + }, + WantErr: true, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "error reconciling SKS: error updating SKS test-revision: inducing failure for update serverlessservices"), + }, + }, { + Name: "sks is disowned", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withScales(1, defaultScale), WithPAMetricsService(privateSvc), markActive), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady, + WithSKSOwnersRemoved), + metric(testNamespace, testRevision), + defaultDeployment, + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, withScales(1, defaultScale), + WithPAMetricsService(privateSvc), markResourceNotOwned("ServerlessService", testRevision)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "error reconciling SKS: PA: test-revision does not own SKS: test-revision"), + }, + }, { + Name: "metric is disowned", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withScales(1, defaultScale), WithPAMetricsService(privateSvc), markActive), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metric(testNamespace, testRevision, WithMetricOwnersRemoved), + defaultDeployment, + }, + WantErr: true, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, withScales(1, defaultScale), + WithPAMetricsService(privateSvc), markResourceNotOwned("Metric", testRevision)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `error reconciling Metric: PA: test-revision does not own Metric: test-revision`), + }, + }, { + Name: "steady not serving", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 0 /* desiredScale */, 0 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withScales(0, 0), + WithNoTraffic("NoTraffic", "The target is not receiving traffic."), + markOld, WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, WithSKSReady), + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision, func(d *appsv1.Deployment) { + d.Spec.Replicas = ptr.Int32(0) + }), + // Should be present, but empty. + zeroEndpoints, + }, + }, { + Name: "steady not serving (scale to zero)", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 0 /* desiredScale */, 0 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, withScales(0, 0), + WithNoTraffic("NoTraffic", "The target is not receiving traffic."), + markOld, WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, WithSKSReady), + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision), + // Should be present, but empty. + zeroEndpoints, + }, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: testNamespace, + }, + Name: deployName, + Patch: []byte(`[{"op":"add","path":"/spec/replicas","value":0}]`), + }}, + }, { + Name: "from serving to proxy", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, decider(testNamespace, testRevision, 0 /* desiredScale */, 0 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, markOld, withScales(0, 0), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + defaultSKS, + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision), defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, withScales(1, 0), + WithPAMetricsService(privateSvc), + WithNoTraffic("NoTraffic", "The target is not receiving traffic."), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithSKSReady, + WithDeployRef(deployName), WithProxyMode), + }}, + }, { + Name: "scaling to 0, but not stable for long enough, so no-op", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, decider(testNamespace, testRevision, 0 /* desiredScale */, 0 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, withScales(1, 1), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + defaultSKS, + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision), defaultEndpoints, + }, + }, { + Name: "activation failure", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, decider(testNamespace, + testRevision, 0 /* desiredScale */, 0 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActivating, markOld, + WithPAStatusService(testRevision), withScales(0, 0), + WithPAMetricsService(privateSvc)), + defaultSKS, + metric(testNamespace, testRevision), + deploy(testNamespace, testRevision), defaultEndpoints, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: kpa(testNamespace, testRevision, WithPAMetricsService(privateSvc), + WithNoTraffic("TimedOut", "The target could not be activated."), withScales(1, 0), + WithPAStatusService(testRevision), WithPAMetricsService(privateSvc)), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithSKSReady, + WithDeployRef(deployName), WithProxyMode), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: testNamespace, + }, + Name: deployName, + Patch: []byte(`[{"op":"add","path":"/spec/replicas","value":0}]`), + }}, + }, { + Name: "want=-1, underscaled, PA inactive", + // No-op + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, unknownScale, 0 /* ebc */)), + Objects: []runtime.Object{ + inactiveKPAMinScale(0), underscaledEndpoints, underscaledDeployment, + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, + WithPubService, WithPrivateService), + defaultMetric, + }, + }, { + Name: "want=1, underscaled, PA inactive", + // Status -> Activating and Deployment has to be patched. + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 1, 0 /* ebc */)), + Objects: []runtime.Object{ + inactiveKPAMinScale(0), underscaledEndpoints, underscaledDeployment, + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, + WithPubService, WithPrivateService), + defaultMetric, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: activatingKPAMinScale(0), + }}, + WantPatches: []clientgotesting.PatchActionImpl{ + minScalePatch, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName), + WithPubService, WithPrivateService), + }}, + }, { + Name: "underscaled, PA activating", + // Scale to `minScale` + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 2 /*autoscaler desired scale*/, 0 /* ebc */)), + Objects: []runtime.Object{ + activatingKPAMinScale(underscale), underscaledEndpoints, underscaledDeployment, + defaultSKS, defaultMetric, + }, + WantPatches: []clientgotesting.PatchActionImpl{ + minScalePatch, + }, + }, { + Name: "underscaled, PA active", + // Mark PA "activating" + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, decider(testNamespace, testRevision, defaultScale, 0 /* ebc */)), + Objects: []runtime.Object{ + activeKPAMinScale(underscale, defaultScale), underscaledEndpoints, underscaledDeployment, + defaultSKS, defaultMetric, + }, + WantPatches: []clientgotesting.PatchActionImpl{ + minScalePatch, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: activatingKPAMinScale(underscale), + }}, + }, { + // Scale to `minScale` and mark PA "active" + Name: "overscaled, PA inactive", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 0 /*wantScale*/, 0 /* ebc */)), + Objects: []runtime.Object{ + inactiveKPAMinScale(overscale), overscaledEndpoints, overscaledDeployment, + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, WithSKSReady), + defaultMetric, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }}, + WantPatches: []clientgotesting.PatchActionImpl{ + minScalePatch, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: activeKPAMinScale(overscale, defaultScale), + }}, + }, { + Name: "overscaled, PA activating", + // Scale to `minScale` and mark PA "active" + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 1 /*wantScale*/, 0 /* ebc */)), + Objects: []runtime.Object{ + inactiveKPAMinScale(overscale), overscaledEndpoints, overscaledDeployment, + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, WithSKSReady), + defaultMetric, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + }}, + WantPatches: []clientgotesting.PatchActionImpl{ + minScalePatch, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: activeKPAMinScale(overscale, defaultScale), + }}, + }, { + Name: "over maxScale for real, PA active", + // No-op. + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, overscale, /*want more than minScale*/ + 0 /* ebc */)), + Objects: []runtime.Object{ + activeKPAMinScale(overscale, overscale), overscaledEndpoints, overscaledDeployment, + defaultSKS, defaultMetric, + }, + }, { + Name: "over maxScale, need to scale down, PA active", + // No-op. + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, 1, /*less than minScale*/ + 0 /* ebc */)), + Objects: []runtime.Object{ + activeKPAMinScale(overscale, overscale), overscaledEndpoints, overscaledDeployment, + defaultSKS, defaultMetric, + }, + WantPatches: []clientgotesting.PatchActionImpl{ + minScalePatch, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: activeKPAMinScale(overscale, defaultScale), + }}, + }, { + Name: "scaled-to-0-no-scale-data", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, unknownScale /* desiredScale */, 0 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markInactive, WithPAMetricsService(privateSvc), + withScales(0, -1), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, WithPubService, WithPrivateService), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + }, { + Name: "steady not enough capacity", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, defaultScale /* desiredScale */, -42 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithProxyMode, WithSKSReady), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + }, { + Name: "traffic increased, no longer enough burst capacity", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, defaultScale /* desiredScale */, -18 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithSKSReady, + WithDeployRef(deployName), WithProxyMode), + }}, + }, { + Name: "traffic decreased, now we have enough burst capacity", + Key: key, + Ctx: context.WithValue(context.Background(), deciderKey, + decider(testNamespace, testRevision, defaultScale /* desiredScale */, 1 /* ebc */)), + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady, WithProxyMode), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: sks(testNamespace, testRevision, WithSKSReady, + WithDeployRef(deployName)), + }}, + }, { + Name: "remove metric service", + Key: key, + Objects: []runtime.Object{ + kpa(testNamespace, testRevision, markActive, WithPAMetricsService(privateSvc), + withScales(1, defaultScale), WithPAStatusService(testRevision)), + sks(testNamespace, testRevision, WithDeployRef(deployName), WithSKSReady), + metric(testNamespace, testRevision), + defaultDeployment, defaultEndpoints, + metricService(kpa(testNamespace, testRevision)), + }, + WantDeletes: []clientgotesting.DeleteActionImpl{{ + Name: testRevision + "-bogus", + ActionImpl: clientgotesting.ActionImpl{ + Namespace: testNamespace, + Verb: "delete", + }, + }}, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + ctx = podscalable.WithDuck(ctx) + + fakeDeciders := newTestDeciders() + // TODO(vagababov): see if we can get rid of the static piece of configuration and + // constant namespace and revision names. + + // Make new decider if it's not in the context + if d := ctx.Value(deciderKey); d == nil { + decider := resources.MakeDecider( + ctx, kpa(testNamespace, testRevision), defaultConfig().Autoscaler, "trying-hard-to-care-in-this-test") + decider.Status.DesiredScale = defaultScale + decider.Generation = 2112 + fakeDeciders.Create(ctx, decider) + } else { + fakeDeciders.Create(ctx, d.(*autoscaler.Decider)) + } + + psf := podscalable.Get(ctx) + scaler := newScaler(ctx, psf, func(interface{}, time.Duration) {}) + scaler.activatorProbe = func(*asv1a1.PodAutoscaler, http.RoundTripper) (bool, error) { return true, nil } + return &Reconciler{ + Base: &areconciler.Base{ + Base: reconciler.NewBase(ctx, controllerAgentName, newConfigWatcher()), + PALister: listers.GetPodAutoscalerLister(), + SKSLister: listers.GetServerlessServiceLister(), + ServiceLister: listers.GetK8sServiceLister(), + MetricLister: listers.GetMetricLister(), + ConfigStore: &testConfigStore{config: defaultConfig()}, + PSInformerFactory: psf, + }, + endpointsLister: listers.GetEndpointsLister(), + deciders: fakeDeciders, + scaler: scaler, + } + })) +} + +type deploymentOption func(*appsv1.Deployment) + +func deploy(namespace, name string, opts ...deploymentOption) *appsv1.Deployment { + s := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-deployment", + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "b", + }, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 42, + }, + } + for _, opt := range opts { + opt(s) + } + return s +} + +func TestGlobalResyncOnUpdateAutoscalerConfigMap(t *testing.T) { + ctx, cancel, informers := SetupFakeContextWithCancel(t) + watcher := &configmap.ManualWatcher{Namespace: system.Namespace()} + + fakeDeciders := newTestDeciders() + ctl := NewController(ctx, watcher, fakeDeciders) + + // Load default config + watcher.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscaler.ConfigName, + Namespace: system.Namespace(), + }, + Data: defaultConfigMapData(), + }) + + grp := errgroup.Group{} + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("failed to start informers: %v", err) + } + defer func() { + cancel() + if err := grp.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + waitInformers() + }() + + if err := watcher.Start(ctx.Done()); err != nil { + t.Fatalf("failed to start configmap watcher: %v", err) + } + + grp.Go(func() error { controller.StartAll(ctx.Done(), ctl); return nil }) + + rev := newTestRevision(testNamespace, testRevision) + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + kpa := revisionresources.MakePA(rev) + sks := aresources.MakeSKS(kpa, nv1a1.SKSOperationModeServe) + sks.Status.PrivateServiceName = "bogus" + + fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(sks) + fakesksinformer.Get(ctx).Informer().GetIndexer().Add(sks) + + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + // Wait for decider to be created. + if decider, err := pollDeciders(fakeDeciders, testNamespace, testRevision, nil); err != nil { + t.Fatalf("Failed to get decider: %v", err) + } else if got, want := decider.Spec.TargetValue, defaultConcurrencyTarget*defaultTU; got != want { + t.Fatalf("TargetValue = %f, want %f", got, want) + } + + const concurrencyTargetAfterUpdate = 100.0 + data := defaultConfigMapData() + data["container-concurrency-target-default"] = fmt.Sprintf("%f", concurrencyTargetAfterUpdate) + watcher.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: autoscaler.ConfigName, + Namespace: system.Namespace(), + }, + Data: data, + }) + + // Wait for decider to be updated with the new values from the configMap. + cond := func(d *autoscaler.Decider) bool { + return d.Spec.TargetValue == concurrencyTargetAfterUpdate + } + if decider, err := pollDeciders(fakeDeciders, testNamespace, testRevision, cond); err != nil { + t.Fatalf("Failed to get decider: %v", err) + } else if got, want := decider.Spec.TargetValue, concurrencyTargetAfterUpdate*defaultTU; got != want { + t.Fatalf("TargetValue = %f, want %f", got, want) + } +} + +func TestControllerSynchronizesCreatesAndDeletes(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + defer cancel() + + fakeDeciders := newTestDeciders() + ctl := NewController(ctx, newConfigWatcher(), fakeDeciders) + + rev := newTestRevision(testNamespace, testRevision) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + ep := makeSKSPrivateEndpoints(1, testNamespace, testRevision) + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Create(ep) + fakeendpointsinformer.Get(ctx).Informer().GetIndexer().Add(ep) + + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + kpa := revisionresources.MakePA(rev) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + sks := sks(testNamespace, testRevision, WithDeployRef(kpa.Spec.ScaleTargetRef.Name), + WithSKSReady) + fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(sks) + fakesksinformer.Get(ctx).Informer().GetIndexer().Add(sks) + + // Wait for the Reconcile to complete. + if err := ctl.Reconciler.Reconcile(context.Background(), testNamespace+"/"+testRevision); err != nil { + t.Errorf("Reconcile() = %v", err) + } + + if count := fakeDeciders.createCallCount.Load(); count != 1 { + t.Fatalf("Create called %d times instead of once", count) + } + + newKPA, err := fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(kpa.Namespace).Get( + kpa.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Get() = %v", err) + } + if !newKPA.Status.IsReady() { + t.Error("Status.IsReady() was false") + } + + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Delete(testRevision, nil) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Delete(rev) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Delete(testRevision, nil) + fakepainformer.Get(ctx).Informer().GetIndexer().Delete(kpa) + if err := ctl.Reconciler.Reconcile(context.Background(), testNamespace+"/"+testRevision); err != nil { + t.Errorf("Reconcile() = %v", err) + } + + if fakeDeciders.deleteCallCount.Load() == 0 { + t.Fatal("Decider was not deleted") + } + + if fakeDeciders.deleteBeforeCreate.Load() { + t.Fatal("Deciders.Delete ran before OnPresent") + } +} + +func TestUpdate(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + defer cancel() + + fakeDeciders := newTestDeciders() + ctl := NewController(ctx, newConfigWatcher(), fakeDeciders) + + rev := newTestRevision(testNamespace, testRevision) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + ep := makeSKSPrivateEndpoints(1, testNamespace, testRevision) + fakekubeclient.Get(ctx).CoreV1().Endpoints(testNamespace).Create(ep) + fakeendpointsinformer.Get(ctx).Informer().GetIndexer().Add(ep) + + kpa := revisionresources.MakePA(rev) + kpa.SetDefaults(context.Background()) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + metric := aresources.MakeMetric(ctx, kpa, "", defaultConfig().Autoscaler) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().Metrics(testNamespace).Create(metric) + fakemetricinformer.Get(ctx).Informer().GetIndexer().Add(metric) + + sks := sks(testNamespace, testRevision, WithDeployRef(kpa.Spec.ScaleTargetRef.Name), + WithSKSReady) + fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(sks) + fakesksinformer.Get(ctx).Informer().GetIndexer().Add(sks) + + decider := resources.MakeDecider(context.Background(), kpa, defaultConfig().Autoscaler, sks.Status.PrivateServiceName) + + // Wait for the Reconcile to complete. + if err := ctl.Reconciler.Reconcile(context.Background(), testNamespace+"/"+testRevision); err != nil { + t.Errorf("Reconcile() = %v", err) + } + + if count := fakeDeciders.createCallCount.Load(); count != 1 { + t.Fatalf("Deciders.Create called %d times instead of once", count) + } + + // Verify decider shape. + if got, want := fakeDeciders.decider, decider; !cmp.Equal(got, want) { + t.Errorf("decider mismatch: diff(+got, -want): %s", cmp.Diff(got, want)) + } + + newKPA, err := fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(kpa.Namespace).Get( + kpa.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Get() = %v", err) + } + if cond := newKPA.Status.GetCondition("Ready"); cond == nil || cond.Status != "True" { + t.Errorf("GetCondition(Ready) = %v, wanted True", cond) + } + + // Update the KPA container concurrency. + kpa.Spec.ContainerConcurrency = 2 + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Update(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Update(kpa) + + if err := ctl.Reconciler.Reconcile(context.Background(), testNamespace+"/"+testRevision); err != nil { + t.Errorf("Reconcile() = %v", err) + } + + if fakeDeciders.updateCallCount.Load() == 0 { + t.Fatal("Deciders.Update was not called") + } +} + +func TestControllerCreateError(t *testing.T) { + ctx, cancel, infs := SetupFakeContextWithCancel(t) + waitInformers, err := controller.RunInformers(ctx.Done(), infs...) + if err != nil { + t.Fatalf("Error starting up informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + want := apierrors.NewBadRequest("asdf") + + ctl := NewController(ctx, newConfigWatcher(), + &failingDeciders{ + getErr: apierrors.NewNotFound(asv1a1.Resource("Deciders"), key), + createErr: want, + }) + + kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision)) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + sks := aresources.MakeSKS(kpa, nv1a1.SKSOperationModeServe) + sks.Status.PrivateServiceName = "bogus" + fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(sks) + fakesksinformer.Get(ctx).Informer().GetIndexer().Add(sks) + + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + got := ctl.Reconciler.Reconcile(context.Background(), key) + if !errors.Is(got, want) { + t.Errorf("Reconcile() = %v, wanted %v wrapped", got, want) + } +} + +func TestControllerUpdateError(t *testing.T) { + ctx, cancel, infs := SetupFakeContextWithCancel(t) + waitInformers, err := controller.RunInformers(ctx.Done(), infs...) + if err != nil { + t.Fatalf("Error starting up informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + want := apierrors.NewBadRequest("asdf") + + ctl := NewController(ctx, newConfigWatcher(), + &failingDeciders{ + getErr: apierrors.NewNotFound(asv1a1.Resource("Deciders"), key), + createErr: want, + }) + + kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision)) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + sks := aresources.MakeSKS(kpa, nv1a1.SKSOperationModeServe) + sks.Status.PrivateServiceName = "bogus" + fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(sks) + fakesksinformer.Get(ctx).Informer().GetIndexer().Add(sks) + + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + got := ctl.Reconciler.Reconcile(context.Background(), key) + if !errors.Is(got, want) { + t.Errorf("Reconcile() = %v, wanted %v wrapped", got, want) + } +} + +func TestControllerGetError(t *testing.T) { + ctx, cancel, infs := SetupFakeContextWithCancel(t) + waitInformers, err := controller.RunInformers(ctx.Done(), infs...) + if err != nil { + t.Fatalf("Error starting up informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + want := apierrors.NewBadRequest("asdf") + + ctl := NewController(ctx, newConfigWatcher(), + &failingDeciders{ + getErr: want, + }) + + kpa := revisionresources.MakePA(newTestRevision(testNamespace, testRevision)) + fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(kpa) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + sks := aresources.MakeSKS(kpa, nv1a1.SKSOperationModeServe) + sks.Status.PrivateServiceName = "bogus" + fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(testNamespace).Create(sks) + fakesksinformer.Get(ctx).Informer().GetIndexer().Add(sks) + + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + got := ctl.Reconciler.Reconcile(context.Background(), key) + if !errors.Is(got, want) { + t.Errorf("Reconcile() = %v, wanted %v wrapped", got, want) + } +} + +func TestScaleFailure(t *testing.T) { + ctx, cancel, infs := SetupFakeContextWithCancel(t) + waitInformers, err := controller.RunInformers(ctx.Done(), infs...) + if err != nil { + t.Fatalf("Error starting up informers: %v", err) + } + defer func() { + cancel() + waitInformers() + }() + + ctl := NewController(ctx, newConfigWatcher(), newTestDeciders()) + + // Only put the KPA in the lister, which will prompt failures scaling it. + rev := newTestRevision(testNamespace, testRevision) + kpa := revisionresources.MakePA(rev) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(kpa) + + newDeployment(t, fakedynamicclient.Get(ctx), testRevision+"-deployment", 3) + + if err := ctl.Reconciler.Reconcile(context.Background(), testNamespace+"/"+testRevision); err == nil { + t.Error("Reconcile() = nil, wanted error") + } +} + +func pollDeciders(deciders *testDeciders, namespace, name string, cond func(*autoscaler.Decider) bool) (decider *autoscaler.Decider, err error) { + wait.PollImmediate(10*time.Millisecond, 3*time.Second, func() (bool, error) { + decider, err = deciders.Get(context.Background(), namespace, name) + if err != nil { + return false, nil + } + if cond == nil { + return true, nil + } + return cond(decider), nil + }) + return decider, err +} + +func newTestDeciders() *testDeciders { + return &testDeciders{ + createCallCount: atomic.NewUint32(0), + deleteCallCount: atomic.NewUint32(0), + updateCallCount: atomic.NewUint32(0), + deleteBeforeCreate: atomic.NewBool(false), + } +} + +type testDeciders struct { + createCallCount *atomic.Uint32 + deleteCallCount *atomic.Uint32 + updateCallCount *atomic.Uint32 + deleteBeforeCreate *atomic.Bool + decider *autoscaler.Decider + mutex sync.Mutex +} + +func (km *testDeciders) Get(ctx context.Context, namespace, name string) (*autoscaler.Decider, error) { + km.mutex.Lock() + defer km.mutex.Unlock() + + if km.decider == nil { + return nil, apierrors.NewNotFound(asv1a1.Resource("Deciders"), types.NamespacedName{Namespace: namespace, Name: name}.String()) + } + return km.decider, nil +} + +func (km *testDeciders) Create(ctx context.Context, decider *autoscaler.Decider) (*autoscaler.Decider, error) { + km.mutex.Lock() + defer km.mutex.Unlock() + + km.decider = decider + km.createCallCount.Add(1) + return decider, nil +} + +func (km *testDeciders) Delete(ctx context.Context, namespace, name string) error { + km.mutex.Lock() + defer km.mutex.Unlock() + + km.decider = nil + km.deleteCallCount.Add(1) + if km.createCallCount.Load() == 0 { + km.deleteBeforeCreate.Store(true) + } + return nil +} + +func (km *testDeciders) Update(ctx context.Context, decider *autoscaler.Decider) (*autoscaler.Decider, error) { + km.mutex.Lock() + defer km.mutex.Unlock() + + km.decider = decider + km.updateCallCount.Add(1) + return decider, nil +} + +func (km *testDeciders) Watch(fn func(types.NamespacedName)) {} + +type failingDeciders struct { + getErr error + createErr error + deleteErr error +} + +func (km *failingDeciders) Get(ctx context.Context, namespace, name string) (*autoscaler.Decider, error) { + return nil, km.getErr +} + +func (km *failingDeciders) Create(ctx context.Context, decider *autoscaler.Decider) (*autoscaler.Decider, error) { + return nil, km.createErr +} + +func (km *failingDeciders) Delete(ctx context.Context, namespace, name string) error { + return km.deleteErr +} + +func (km *failingDeciders) Watch(fn func(types.NamespacedName)) { +} + +func (km *failingDeciders) Update(ctx context.Context, decider *autoscaler.Decider) (*autoscaler.Decider, error) { + return decider, nil +} + +func newTestRevision(namespace string, name string) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: fmt.Sprintf("/apis/ela/v1alpha1/namespaces/%s/revisions/%s", namespace, name), + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + }, + }, + Spec: v1alpha1.RevisionSpec{}, + } +} + +func makeSKSPrivateEndpoints(num int, ns, n string) *corev1.Endpoints { + eps := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: n + "-private", + }, + } + for i := 0; i < num; i++ { + eps = addEndpoint(eps) + } + return eps +} + +func addEndpoint(ep *corev1.Endpoints) *corev1.Endpoints { + if ep.Subsets == nil { + ep.Subsets = []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{}, + }} + } + + ep.Subsets[0].Addresses = append(ep.Subsets[0].Addresses, corev1.EndpointAddress{IP: "127.0.0.1"}) + return ep +} + +func withMinScale(minScale int) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Annotations = presources.UnionMaps( + pa.Annotations, + map[string]string{autoscaling.MinScaleAnnotationKey: strconv.Itoa(minScale)}, + ) + } +} + +func decider(ns, name string, desiredScale, ebc int32) *autoscaler.Decider { + return &autoscaler.Decider{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + }, + }, + Spec: autoscaler.DeciderSpec{ + MaxScaleUpRate: 10.0, + TickInterval: 2 * time.Second, + TargetValue: 100, + TotalValue: 100, + TargetBurstCapacity: 211, + PanicThreshold: 200, + StableWindow: 60 * time.Second, + }, + Status: autoscaler.DeciderStatus{ + DesiredScale: desiredScale, + ExcessBurstCapacity: ebc, + }, + } +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ reconciler.ConfigStore = (*testConfigStore)(nil) diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider.go new file mode 100644 index 0000000000..7fcf41973f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/reconciler/autoscaling/resources" +) + +// Deciders is an interface for notifying the presence or absence of autoscaling deciders. +type Deciders interface { + // Get accesses the Decider resource for this key, returning any errors. + Get(ctx context.Context, namespace, name string) (*autoscaler.Decider, error) + + // Create adds a Decider resource for a given key, returning any errors. + Create(ctx context.Context, decider *autoscaler.Decider) (*autoscaler.Decider, error) + + // Delete removes the Decider resource for a given key, returning any errors. + Delete(ctx context.Context, namespace, name string) error + + // Watch registers a function to call when Decider change. + Watch(watcher func(types.NamespacedName)) + + // Update update the Decider resource, return the new Decider or any errors. + Update(ctx context.Context, decider *autoscaler.Decider) (*autoscaler.Decider, error) +} + +// MakeDecider constructs a Decider resource from a PodAutoscaler taking +// into account the PA's ContainerConcurrency and the relevant +// autoscaling annotation. +func MakeDecider(ctx context.Context, pa *v1alpha1.PodAutoscaler, config *autoscaler.Config, svc string) *autoscaler.Decider { + panicThresholdPercentage := config.PanicThresholdPercentage + if x, ok := pa.PanicThresholdPercentage(); ok { + panicThresholdPercentage = x + } + + target, total := resources.ResolveMetricTarget(pa, config) + panicThreshold := target * panicThresholdPercentage / 100.0 + + tbc := config.TargetBurstCapacity + if x, ok := pa.TargetBC(); ok { + tbc = x + } + return &autoscaler.Decider{ + ObjectMeta: *pa.ObjectMeta.DeepCopy(), + Spec: autoscaler.DeciderSpec{ + TickInterval: config.TickInterval, + MaxScaleUpRate: config.MaxScaleUpRate, + MaxScaleDownRate: config.MaxScaleDownRate, + ScalingMetric: pa.Metric(), + TargetValue: target, + TotalValue: total, + TargetBurstCapacity: tbc, + PanicThreshold: panicThreshold, + StableWindow: resources.StableWindow(pa, config), + ServiceName: svc, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider_test.go new file mode 100644 index 0000000000..194e4fb0fb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/resources/decider_test.go @@ -0,0 +1,276 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + . "knative.dev/serving/pkg/testing" +) + +func TestMakeDecider(t *testing.T) { + cases := []struct { + name string + pa *v1alpha1.PodAutoscaler + svc string + want *autoscaler.Decider + cfgOpt func(autoscaler.Config) *autoscaler.Config + }{{ + name: "defaults", + pa: pa(), + want: decider(withTarget(100.0), withPanicThreshold(200.0), withTotal(100)), + }, { + name: "tu < 1", // See #4449 why Target=100 + pa: pa(), + want: decider(withTarget(80), withPanicThreshold(160.0), withTotal(100)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + }, { + name: "scale up and scale down rates", + pa: pa(), + want: decider(withTarget(100.0), withPanicThreshold(200.0), withTotal(100), + withScaleUpDownRates(19.84, 19.88)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.MaxScaleUpRate = 19.84 + c.MaxScaleDownRate = 19.88 + return &c + }, + }, { + name: "with container concurrency 1", + pa: pa(WithPAContainerConcurrency(1)), + want: decider(withTarget(1.0), withPanicThreshold(2.0), withTotal(1)), + }, { + name: "with target annotation 1", + pa: pa(WithTargetAnnotation("1")), + want: decider(withTarget(1.0), withTotal(1), withPanicThreshold(2.0), withTargetAnnotation("1")), + }, { + name: "with container concurrency and tu < 1", + pa: pa(WithPAContainerConcurrency(100)), + want: decider(withTarget(80), withTotal(100), withPanicThreshold(160)), // PanicThreshold depends on TCC. + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + }, { + name: "with burst capacity set", + pa: pa(WithPAContainerConcurrency(120)), + want: decider(withTarget(96), withTotal(120), withPanicThreshold(192), withTargetBurstCapacity(63)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.TargetBurstCapacity = 63 + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + }, { + name: "with burst capacity set on the annotation", + pa: pa(WithPAContainerConcurrency(120), withTBCAnnotation("211")), + want: decider(withTarget(96), withTotal(120), withPanicThreshold(192), + withDeciderTBCAnnotation("211"), withTargetBurstCapacity(211)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.TargetBurstCapacity = 63 + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + }, { + name: "with container concurrency greater than target annotation (ok)", + pa: pa(WithPAContainerConcurrency(10), WithTargetAnnotation("1")), + want: decider(withTarget(1.0), withTotal(1), withPanicThreshold(2.0), withTargetAnnotation("1")), + }, { + name: "with target annotation greater than container concurrency (ignore annotation for safety)", + pa: pa(WithPAContainerConcurrency(1), WithTargetAnnotation("10")), + want: decider(withTarget(1.0), withTotal(1), withPanicThreshold(2.0), withTargetAnnotation("10")), + }, { + name: "with higher panic target", + pa: pa(WithTargetAnnotation("10"), WithPanicThresholdPercentageAnnotation("400")), + want: decider( + withTarget(10.0), withPanicThreshold(40.0), withTotal(10), + withTargetAnnotation("10"), withPanicThresholdPercentageAnnotation("400")), + }, { + name: "with service name", + pa: pa(WithTargetAnnotation("10"), WithPanicThresholdPercentageAnnotation("400")), + svc: "rock-solid", + want: decider( + withService("rock-solid"), + withTarget(10.0), withPanicThreshold(40.0), withTotal(10.0), + withTargetAnnotation("10"), withPanicThresholdPercentageAnnotation("400")), + }, { + name: "with metric annotation", + pa: pa(WithMetricAnnotation("rps")), + want: decider(withTarget(100.0), withPanicThreshold(200.0), withTotal(100), withMetric("rps"), withMetricAnnotation("rps")), + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + cfg := config + if tc.cfgOpt != nil { + cfg = tc.cfgOpt(*config) + } + + if diff := cmp.Diff(tc.want, MakeDecider(context.Background(), tc.pa, cfg, tc.svc)); diff != "" { + t.Errorf("%q (-want, +got):\n%v", tc.name, diff) + } + }) + } +} + +func pa(options ...PodAutoscalerOption) *v1alpha1.PodAutoscaler { + p := &v1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-name", + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + autoscaling.MetricAnnotationKey: autoscaling.Concurrency, + }, + }, + Spec: v1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 0, + }, + Status: v1alpha1.PodAutoscalerStatus{}, + } + for _, fn := range options { + fn(p) + } + return p +} + +func withTBCAnnotation(tbc string) PodAutoscalerOption { + return func(pa *v1alpha1.PodAutoscaler) { + pa.Annotations[autoscaling.TargetBurstCapacityKey] = tbc + } +} + +func withDeciderTBCAnnotation(tbc string) deciderOption { + return func(d *autoscaler.Decider) { + d.Annotations[autoscaling.TargetBurstCapacityKey] = tbc + } +} + +func decider(options ...deciderOption) *autoscaler.Decider { + m := &autoscaler.Decider{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-name", + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + autoscaling.MetricAnnotationKey: autoscaling.Concurrency, + }, + }, + Spec: autoscaler.DeciderSpec{ + MaxScaleUpRate: config.MaxScaleUpRate, + TickInterval: config.TickInterval, + ScalingMetric: "concurrency", + TargetValue: 100, + TotalValue: 100, + TargetBurstCapacity: 211, + PanicThreshold: 200, + StableWindow: config.StableWindow, + }, + } + for _, fn := range options { + fn(m) + } + return m +} + +type deciderOption func(*autoscaler.Decider) + +func withMetric(metric string) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Spec.ScalingMetric = metric + } +} + +func withTargetBurstCapacity(tbc float64) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Spec.TargetBurstCapacity = tbc + } +} + +func withScaleUpDownRates(up, down float64) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Spec.MaxScaleUpRate = up + decider.Spec.MaxScaleDownRate = down + } +} + +func withTotal(total float64) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Spec.TotalValue = total + } +} + +func withTarget(target float64) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Spec.TargetValue = target + } +} + +func withService(s string) deciderOption { + return func(d *autoscaler.Decider) { + d.Spec.ServiceName = s + } +} + +func withPanicThreshold(threshold float64) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Spec.PanicThreshold = threshold + } +} + +func withTargetAnnotation(target string) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Annotations[autoscaling.TargetAnnotationKey] = target + } +} + +func withMetricAnnotation(metric string) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Annotations[autoscaling.MetricAnnotationKey] = metric + } +} + +func withPanicThresholdPercentageAnnotation(percentage string) deciderOption { + return func(decider *autoscaler.Decider) { + decider.Annotations[autoscaling.PanicThresholdPercentageAnnotationKey] = percentage + } +} + +var config = &autoscaler.Config{ + EnableScaleToZero: true, + ContainerConcurrencyTargetFraction: 1.0, + ContainerConcurrencyTargetDefault: 100.0, + TargetBurstCapacity: 211.0, + MaxScaleUpRate: 10.0, + RPSTargetDefault: 100, + TargetUtilization: 1.0, + StableWindow: 60 * time.Second, + PanicThresholdPercentage: 200, + PanicWindowPercentage: 10, + TickInterval: 2 * time.Second, + ScaleToZeroGracePeriod: 30 * time.Second, +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler.go new file mode 100644 index 0000000000..497ce476b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler.go @@ -0,0 +1,292 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kpa + +import ( + "context" + "fmt" + "net/http" + "time" + + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/injection/clients/dynamicclient" + "knative.dev/pkg/logging" + + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/network/prober" + "knative.dev/serving/pkg/activator" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + aresources "knative.dev/serving/pkg/reconciler/autoscaling/resources" + rresources "knative.dev/serving/pkg/reconciler/revision/resources" + "knative.dev/serving/pkg/resources" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" +) + +const ( + scaleUnknown = -1 + probePeriod = 1 * time.Second + probeTimeout = 45 * time.Second + // The time after which the PA will be re-enqueued. + // This number is small, since `handleScaleToZero` below will + // re-enque for the configured grace period. + reenqeuePeriod = 1 * time.Second + + // TODO(#3456): Remove this buffer once KPA does pod failure diagnostics. + // + // KPA will scale the Deployment down to zero if it fails to activate after ProgressDeadlineSeconds, + // however, after ProgressDeadlineSeconds, the Deployment itself updates its status, which causes + // the Revision to re-reconcile and diagnose pod failures. If we use the same timeout here, we will + // race the Revision reconciler and scale down the pods before it can actually surface the pod errors. + // We should instead do pod failure diagnostics here immediately before scaling down the Deployment. + activationTimeoutBuffer = 10 * time.Second + activationTimeout = time.Duration(rresources.ProgressDeadlineSeconds)*time.Second + activationTimeoutBuffer +) + +var probeOptions = []interface{}{ + prober.WithHeader(network.ProbeHeaderName, activator.Name), + prober.ExpectsBody(activator.Name), + prober.ExpectsStatusCodes([]int{http.StatusOK}), +} + +// for mocking in tests +type asyncProber interface { + Offer(context.Context, string, interface{}, time.Duration, time.Duration, ...interface{}) bool +} + +// scaler scales the target of a kpa-class PA up or down including scaling to zero. +type scaler struct { + psInformerFactory duck.InformerFactory + dynamicClient dynamic.Interface + transport http.RoundTripper + + // For sync probes. + activatorProbe func(pa *pav1alpha1.PodAutoscaler, transport http.RoundTripper) (bool, error) + + // For async probes. + probeManager asyncProber + enqueueCB func(interface{}, time.Duration) +} + +// newScaler creates a scaler. +func newScaler(ctx context.Context, psInformerFactory duck.InformerFactory, enqueueCB func(interface{}, time.Duration)) *scaler { + logger := logging.FromContext(ctx) + transport := pkgnet.NewProberTransport() + ks := &scaler{ + // Wrap it in a cache, so that we don't stamp out a new + // informer/lister each time. + psInformerFactory: psInformerFactory, + dynamicClient: dynamicclient.Get(ctx), + transport: transport, + + // Production setup uses the default probe implementation. + activatorProbe: activatorProbe, + probeManager: prober.New(func(arg interface{}, success bool, err error) { + logger.Infof("Async prober is done for %v: success?: %v error: %v", arg, success, err) + // Re-enqeue the PA in any case. If the probe timed out to retry again, if succeeded to scale to 0. + enqueueCB(arg, reenqeuePeriod) + }, transport), + enqueueCB: enqueueCB, + } + return ks +} + +// Resolves the pa to hostname:port. +func paToProbeTarget(pa *pav1alpha1.PodAutoscaler) string { + svc := pkgnet.GetServiceHostname(pa.Status.ServiceName, pa.Namespace) + port := networking.ServicePort(pa.Spec.ProtocolType) + return fmt.Sprintf("http://%s:%d/", svc, port) +} + +// activatorProbe returns true if via probe it determines that the +// PA is backed by the Activator. +func activatorProbe(pa *pav1alpha1.PodAutoscaler, transport http.RoundTripper) (bool, error) { + // No service name -- no probe. + if pa.Status.ServiceName == "" { + return false, nil + } + return prober.Do(context.Background(), transport, paToProbeTarget(pa), probeOptions...) +} + +// pre: 0 <= min <= max && 0 <= x +func applyBounds(min, max, x int32) int32 { + if x < min { + return min + } + if max != 0 && x > max { + return max + } + return x +} + +func (ks *scaler) handleScaleToZero(ctx context.Context, pa *pav1alpha1.PodAutoscaler, + sks *nv1a1.ServerlessService, desiredScale int32) (int32, bool) { + if desiredScale != 0 { + return desiredScale, true + } + + // We should only scale to zero when three of the following conditions are true: + // a) enable-scale-to-zero from configmap is true + // b) The PA has been active for at least the stable window, after which it gets marked inactive + // c) The PA has been inactive for at least the grace period + config := config.FromContext(ctx).Autoscaler + if !config.EnableScaleToZero { + return 1, true + } + + now := time.Now() + logger := logging.FromContext(ctx) + if pa.Status.IsActivating() { // Active=Unknown + // If we are stuck activating for longer than our progress deadline, presume we cannot succeed and scale to 0. + if pa.Status.CanFailActivation(now, activationTimeout) { + logger.Infof("Activation has timed out after %v.", activationTimeout) + return 0, true + } + ks.enqueueCB(pa, activationTimeout) + return scaleUnknown, false + } else if pa.Status.IsReady() { // Active=True + // Don't scale-to-zero if the PA is active + + // Do not scale to 0, but return desiredScale of 0 to mark PA inactive. + sw := aresources.StableWindow(pa, config) + af := pa.Status.ActiveFor(now) + if af >= sw { + // We do not need to enqueue PA here, since this will + // make SKS reconcile and when it's done, PA will be reconciled again. + return desiredScale, false + } + // Otherwise, scale down to at most 1 for the remainder of the idle period and then + // reconcile PA again. + logger.Infof("Sleeping additionally for %v before can scale to 0", sw-af) + ks.enqueueCB(pa, sw-af) + desiredScale = 1 + } else { // Active=False + r, err := ks.activatorProbe(pa, ks.transport) + logger.Infof("Probing activator = %v, err = %v", r, err) + if r { + // This enforces that the revision has been backed by the activator for at least + // ScaleToZeroGracePeriod time. + // Note: SKS will always be present when scaling to zero, so nil checks are just + // defensive programming. + + // Most conservative check, if it passes we're good. + if pa.Status.CanScaleToZero(now, config.ScaleToZeroGracePeriod) { + return desiredScale, true + } + + // Otherwise check how long SKS was in proxy mode. + to := config.ScaleToZeroGracePeriod + if sks != nil { + // Compute the difference between time we've been proxying with the timeout. + // If it's positive, that's the time we need to sleep, if negative -- we + // can scale to zero. + to -= sks.Status.ProxyFor() + if to <= 0 { + logger.Infof("Fast path scaling to 0, in proxy mode for: %v", sks.Status.ProxyFor()) + return desiredScale, true + } + } + + // Re-enqeue the PA for reconciliation with timeout of `to` to make sure we wait + // long enough. + ks.enqueueCB(pa, to) + return desiredScale, false + } + + // Otherwise (any prober failure) start the async probe. + logger.Info("PA is not yet backed by activator, cannot scale to zero") + if !ks.probeManager.Offer(context.Background(), paToProbeTarget(pa), pa, probePeriod, probeTimeout, probeOptions...) { + logger.Info("Probe for revision is already in flight") + } + return desiredScale, false + } + + return desiredScale, true +} + +func (ks *scaler) applyScale(ctx context.Context, pa *pav1alpha1.PodAutoscaler, desiredScale int32, + ps *pav1alpha1.PodScalable) (int32, error) { + logger := logging.FromContext(ctx) + + gvr, name, err := resources.ScaleResourceArguments(pa.Spec.ScaleTargetRef) + if err != nil { + return desiredScale, err + } + + psNew := ps.DeepCopy() + psNew.Spec.Replicas = &desiredScale + patch, err := duck.CreatePatch(ps, psNew) + if err != nil { + return desiredScale, err + } + patchBytes, err := patch.MarshalJSON() + if err != nil { + return desiredScale, err + } + + _, err = ks.dynamicClient.Resource(*gvr).Namespace(pa.Namespace).Patch(ps.Name, types.JSONPatchType, + patchBytes, metav1.PatchOptions{}) + if err != nil { + return desiredScale, fmt.Errorf("failed to apply scale to scale target %s: %w", name, err) + } + + logger.Debug("Successfully scaled.") + return desiredScale, nil +} + +// Scale attempts to scale the given PA's target reference to the desired scale. +func (ks *scaler) Scale(ctx context.Context, pa *pav1alpha1.PodAutoscaler, sks *nv1a1.ServerlessService, desiredScale int32) (int32, error) { + logger := logging.FromContext(ctx) + + if desiredScale < 0 && !pa.Status.IsActivating() { + logger.Debug("Metrics are not yet being collected.") + return desiredScale, nil + } + + min, max := pa.ScaleBounds() + if newScale := applyBounds(min, max, desiredScale); newScale != desiredScale { + logger.Debugf("Adjusting desiredScale to meet the min and max bounds before applying: %d -> %d", desiredScale, newScale) + desiredScale = newScale + } + + desiredScale, shouldApplyScale := ks.handleScaleToZero(ctx, pa, sks, desiredScale) + if !shouldApplyScale { + return desiredScale, nil + } + + ps, err := resources.GetScaleResource(pa.Namespace, pa.Spec.ScaleTargetRef, ks.psInformerFactory) + if err != nil { + return desiredScale, fmt.Errorf("failed to get scale target %v: %w", pa.Spec.ScaleTargetRef, err) + } + + currentScale := int32(1) + if ps.Spec.Replicas != nil { + currentScale = *ps.Spec.Replicas + } + if desiredScale == currentScale { + return desiredScale, nil + } + + logger.Infof("Scaling from %d to %d", currentScale, desiredScale) + return ks.applyScale(ctx, pa, desiredScale, ps) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler_test.go new file mode 100644 index 0000000000..afbdd9b345 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/kpa/scaler_test.go @@ -0,0 +1,670 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kpa + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "testing" + "time" + + // These are the fake informers we want setup. + fakedynamicclient "knative.dev/pkg/injection/clients/dynamicclient/fake" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + podscalable "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/network" + _ "knative.dev/pkg/system/testing" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/autoscaling" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + clientset "knative.dev/serving/pkg/client/clientset/versioned" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + revisionresources "knative.dev/serving/pkg/reconciler/revision/resources" + "knative.dev/serving/pkg/reconciler/revision/resources/names" + + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + fakedynamic "k8s.io/client-go/dynamic/fake" + clientgotesting "k8s.io/client-go/testing" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/testing" +) + +const ( + testNamespace = "test-namespace" + testRevision = "test-revision" + key = testNamespace + "/" + testRevision +) + +func TestScaler(t *testing.T) { + tests := []struct { + label string + startReplicas int + scaleTo int32 + minScale int32 + maxScale int32 + wantReplicas int32 + wantScaling bool + sks SKSOption + paMutation func(*pav1alpha1.PodAutoscaler) + proberfunc func(*pav1alpha1.PodAutoscaler, http.RoundTripper) (bool, error) + wantCBCount int + wantAsyncProbeCount int + }{{ + label: "waits to scale to zero (just before idle period)", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 1, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkActive(k, time.Now().Add(-stableWindow).Add(1*time.Second)) + }, + wantCBCount: 1, + }, { + // Custom window will be shorter in the tests with custom PA window. + label: "waits to scale to zero (just before idle period), custom PA window", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 1, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + WithWindowAnnotation(paStableWindow.String())(k) + paMarkActive(k, time.Now().Add(-paStableWindow).Add(1*time.Second)) + }, + wantCBCount: 1, + }, { + label: "custom PA window, check for standard window, no probe", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + WithWindowAnnotation(paStableWindow.String())(k) + paMarkActive(k, time.Now().Add(-stableWindow)) + }, + }, { + label: "scale to 1 waiting for idle expires", + startReplicas: 10, + scaleTo: 0, + wantReplicas: 1, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkActive(k, time.Now().Add(-stableWindow).Add(1*time.Second)) + }, + wantCBCount: 1, + }, { + label: "waits to scale to zero after idle period", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkActive(k, time.Now().Add(-stableWindow)) + }, + }, { + label: "waits to scale to zero after idle period (custom PA window)", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + WithWindowAnnotation(paStableWindow.String())(k) + paMarkActive(k, time.Now().Add(-paStableWindow)) + }, + }, { + label: "scale to zero after grace period", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod)) + }, + }, { + label: "waits to scale to zero (just before grace period)", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod).Add(time.Second)) + }, + wantCBCount: 1, + }, { + label: "waits to scale to zero (just before grace period, sks short)", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod).Add(time.Second)) + }, + sks: func(s *nv1a1.ServerlessService) { + markSKSInProxyFor(s, gracePeriod-time.Second) + }, + wantCBCount: 1, + }, { + label: "waits to scale to zero (just before grace period, sks in proxy long)", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod).Add(time.Second)) + }, + sks: func(s *nv1a1.ServerlessService) { + markSKSInProxyFor(s, gracePeriod) + }, + }, { + label: "scale to zero after grace period, but fail prober", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod)) + }, + proberfunc: func(*pav1alpha1.PodAutoscaler, http.RoundTripper) (bool, error) { + return false, errors.New("hell or high water") + }, + wantAsyncProbeCount: 1, + }, { + label: "scale to zero after grace period, but wrong prober response", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod)) + }, + proberfunc: func(*pav1alpha1.PodAutoscaler, http.RoundTripper) (bool, error) { return false, nil }, + wantAsyncProbeCount: 1, + }, { + label: "waits to scale to zero while activating until after deadline exceeded", + startReplicas: 1, + scaleTo: 0, + wantReplicas: -1, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkActivating(k, time.Now().Add(-activationTimeout/2)) + }, + wantCBCount: 1, + }, { + label: "scale to zero while activating after deadline exceeded", + startReplicas: 1, + scaleTo: 0, + wantReplicas: 0, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkActivating(k, time.Now().Add(-(activationTimeout + time.Second))) + }, + }, { + label: "scale down to minScale before grace period", + startReplicas: 10, + scaleTo: 0, + minScale: 2, + wantReplicas: 2, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod+time.Second)) + WithReachabilityReachable(k) + }, + }, { + label: "scale down to minScale after grace period", + startReplicas: 10, + scaleTo: 0, + minScale: 2, + wantReplicas: 2, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod)) + WithReachabilityReachable(k) + }, + }, { + label: "ignore minScale if unreachable", + startReplicas: 10, + scaleTo: 0, + minScale: 2, + wantReplicas: 0, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod)) + WithReachabilityUnreachable(k) // not needed, here for clarity + }, + }, { + label: "observe minScale if reachability unknown", + startReplicas: 10, + scaleTo: 0, + minScale: 2, + wantReplicas: 2, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod)) + WithReachabilityUnknown(k) + }, + }, { + label: "scales up", + startReplicas: 1, + scaleTo: 10, + wantReplicas: 10, + wantScaling: true, + }, { + label: "scales up to maxScale", + startReplicas: 1, + scaleTo: 10, + maxScale: 8, + wantReplicas: 8, + wantScaling: true, + }, { + label: "scale up inactive revision", + startReplicas: 1, + scaleTo: 10, + wantReplicas: 10, + wantScaling: true, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now().Add(-gracePeriod/2)) + }, + }, { + label: "does not scale up from zero with no metrics", + startReplicas: 0, + scaleTo: -1, // no metrics + wantReplicas: -1, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkInactive(k, time.Now()) + }, + }, { + label: "scales up from zero to desired one", + startReplicas: 0, + scaleTo: 1, + wantReplicas: 1, + wantScaling: true, + }, { + label: "scales up from zero to desired high scale", + startReplicas: 0, + scaleTo: 10, + wantReplicas: 10, + wantScaling: true, + }, { + label: "negative scale does not scale", + startReplicas: 12, + scaleTo: -1, + wantReplicas: -1, + wantScaling: false, + paMutation: func(k *pav1alpha1.PodAutoscaler) { + paMarkActive(k, time.Now()) + }, + }} + + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + dynamicClient := fakedynamicclient.Get(ctx) + + revision := newRevision(t, fakeservingclient.Get(ctx), test.minScale, test.maxScale) + deployment := newDeployment(t, dynamicClient, names.Deployment(revision), test.startReplicas) + cbCount := 0 + revisionScaler := newScaler(ctx, podscalable.Get(ctx), func(interface{}, time.Duration) { + cbCount++ + }) + if test.proberfunc != nil { + revisionScaler.activatorProbe = test.proberfunc + } else { + revisionScaler.activatorProbe = func(*pav1alpha1.PodAutoscaler, http.RoundTripper) (bool, error) { return true, nil } + } + cp := &countingProber{} + revisionScaler.probeManager = cp + + // We test like this because the dynamic client's fake doesn't properly handle + // patch modes prior to 1.13 (where vaikas added JSON Patch support). + gotScaling := false + dynamicClient.PrependReactor("patch", "deployments", + func(action clientgotesting.Action) (bool, runtime.Object, error) { + patch := action.(clientgotesting.PatchAction) + if !test.wantScaling { + t.Errorf("don't want scaling, but got patch: %s", string(patch.GetPatch())) + } + gotScaling = true + return true, nil, nil + }) + + pa := newKPA(t, fakeservingclient.Get(ctx), revision) + if test.paMutation != nil { + test.paMutation(pa) + } + + sks := sks("ns", "name") + if test.sks != nil { + test.sks(sks) + } + + ctx = config.ToContext(ctx, defaultConfig()) + desiredScale, err := revisionScaler.Scale(ctx, pa, sks, test.scaleTo) + if err != nil { + t.Error("Scale got an unexpected error: ", err) + } + if err == nil && desiredScale != test.wantReplicas { + t.Errorf("desiredScale = %d, wanted %d", desiredScale, test.wantReplicas) + } + if got, want := cp.count, test.wantAsyncProbeCount; got != want { + t.Errorf("Async probe invoked = %d time, want: %d", got, want) + } + if got, want := cbCount, test.wantCBCount; got != want { + t.Errorf("Enqueue callback invoked = %d time, want: %d", got, want) + } + if test.wantScaling { + if !gotScaling { + t.Error("want scaling, but got no scaling") + } + checkReplicas(t, dynamicClient, deployment, test.wantReplicas) + } + }) + } +} + +func TestDisableScaleToZero(t *testing.T) { + tests := []struct { + label string + startReplicas int + scaleTo int32 + minScale int32 + maxScale int32 + wantReplicas int32 + wantScaling bool + }{{ + label: "EnableScaleToZero == false and minScale == 0", + startReplicas: 10, + scaleTo: 0, + wantReplicas: 1, + wantScaling: true, + }, { + label: "EnableScaleToZero == false and minScale == 2", + startReplicas: 10, + scaleTo: 0, + minScale: 2, + wantReplicas: 2, + wantScaling: true, + }, { + label: "EnableScaleToZero == false and desire pod is -1(initial value)", + startReplicas: 10, + scaleTo: -1, + wantReplicas: -1, + wantScaling: false, + }} + + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + dynamicClient := fakedynamicclient.Get(ctx) + + // We test like this because the dynamic client's fake doesn't properly handle + // patch modes prior to 1.13 (where vaikas added JSON Patch support). + gotScaling := false + dynamicClient.PrependReactor("patch", "deployments", + func(action clientgotesting.Action) (bool, runtime.Object, error) { + patch := action.(clientgotesting.PatchAction) + if !test.wantScaling { + t.Errorf("don't want scaling, but got patch: %s", string(patch.GetPatch())) + } + gotScaling = true + return true, nil, nil + }) + + revision := newRevision(t, fakeservingclient.Get(ctx), test.minScale, test.maxScale) + deployment := newDeployment(t, dynamicClient, names.Deployment(revision), test.startReplicas) + revisionScaler := &scaler{ + dynamicClient: fakedynamicclient.Get(ctx), + psInformerFactory: podscalable.Get(ctx), + } + pa := newKPA(t, fakeservingclient.Get(ctx), revision) + paMarkActive(pa, time.Now()) + WithReachabilityReachable(pa) + + conf := defaultConfig() + conf.Autoscaler.EnableScaleToZero = false + ctx = config.ToContext(ctx, conf) + desiredScale, err := revisionScaler.Scale(ctx, pa, nil /*sks doesn't matter in this test*/, test.scaleTo) + + if err != nil { + t.Error("Scale got an unexpected error: ", err) + } + if err == nil && desiredScale != test.wantReplicas { + t.Errorf("desiredScale = %d, wanted %d", desiredScale, test.wantReplicas) + } + if test.wantScaling { + if !gotScaling { + t.Error("want scaling, but got no scaling") + } + checkReplicas(t, dynamicClient, deployment, test.wantReplicas) + } + }) + } +} + +func newKPA(t *testing.T, servingClient clientset.Interface, revision *v1alpha1.Revision) *pav1alpha1.PodAutoscaler { + pa := revisionresources.MakePA(revision) + pa.Status.InitializeConditions() + _, err := servingClient.AutoscalingV1alpha1().PodAutoscalers(testNamespace).Create(pa) + if err != nil { + t.Fatal("Failed to create PA.", err) + } + return pa +} + +func newRevision(t *testing.T, servingClient clientset.Interface, minScale, maxScale int32) *v1alpha1.Revision { + annotations := map[string]string{} + if minScale > 0 { + annotations[autoscaling.MinScaleAnnotationKey] = strconv.Itoa(int(minScale)) + } + if maxScale > 0 { + annotations[autoscaling.MaxScaleAnnotationKey] = strconv.Itoa(int(maxScale)) + } + rev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testRevision, + Annotations: annotations, + }, + } + rev, err := servingClient.ServingV1alpha1().Revisions(testNamespace).Create(rev) + if err != nil { + t.Fatal("Failed to create revision.", err) + } + + return rev +} + +func newDeployment(t *testing.T, dynamicClient dynamic.Interface, name string, replicas int) *v1.Deployment { + t.Helper() + + uns := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "namespace": testNamespace, + "name": name, + "uid": "1982", + }, + "spec": map[string]interface{}{ + "replicas": int64(replicas), + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + serving.RevisionUID: "1982", + }, + }, + }, + "status": map[string]interface{}{ + "replicas": int64(replicas), + }, + }, + } + + u, err := dynamicClient.Resource(schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }).Namespace(testNamespace).Create(uns, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Create() = %v", err) + } + + deployment := &v1.Deployment{} + if err := duck.FromUnstructured(u, deployment); err != nil { + t.Fatalf("FromUnstructured() = %v", err) + } + return deployment +} + +func paMarkActive(pa *pav1alpha1.PodAutoscaler, ltt time.Time) { + pa.Status.MarkActive() + + // This works because the conditions are sorted alphabetically + pa.Status.Conditions[0].LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(ltt)} +} + +func paMarkInactive(pa *pav1alpha1.PodAutoscaler, ltt time.Time) { + pa.Status.MarkInactive("", "") + + // This works because the conditions are sorted alphabetically + pa.Status.Conditions[0].LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(ltt)} +} + +func paMarkActivating(pa *pav1alpha1.PodAutoscaler, ltt time.Time) { + pa.Status.MarkActivating("", "") + + // This works because the conditions are sorted alphabetically + pa.Status.Conditions[0].LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(ltt)} +} + +func checkReplicas(t *testing.T, dynamicClient *fakedynamic.FakeDynamicClient, deployment *v1.Deployment, expectedScale int32) { + t.Helper() + + found := false + for _, action := range dynamicClient.Actions() { + switch action.GetVerb() { + case "patch": + patch := action.(clientgotesting.PatchAction) + if patch.GetName() != deployment.Name { + continue + } + want := fmt.Sprintf(`[{"op":"replace","path":"/spec/replicas","value":%d}]`, expectedScale) + if got := string(patch.GetPatch()); got != want { + t.Errorf("Patch = %s, wanted %s", got, want) + } + found = true + } + } + + if !found { + t.Errorf("Did not see scale update for %v", deployment.Name) + } +} + +func TestActivatorProbe(t *testing.T) { + oldRT := network.AutoTransport + defer func() { + network.AutoTransport = oldRT + }() + theErr := errors.New("rain") + + pa := kpa("who-let", "the-dogs-out", WithPAStatusService("woof")) + tests := []struct { + name string + rt network.RoundTripperFunc + wantRes bool + wantErr bool + }{{ + name: "ok", + rt: func(r *http.Request) (*http.Response, error) { + rsp := httptest.NewRecorder() + rsp.Write([]byte(activator.Name)) + return rsp.Result(), nil + }, + wantRes: true, + }, { + name: "400", + rt: func(r *http.Request) (*http.Response, error) { + rsp := httptest.NewRecorder() + rsp.Code = http.StatusBadRequest + rsp.Write([]byte("wrong header, I guess?")) + return rsp.Result(), nil + }, + wantRes: false, + wantErr: true, + }, { + name: "wrong body", + rt: func(r *http.Request) (*http.Response, error) { + rsp := httptest.NewRecorder() + rsp.Write([]byte("haxoorprober")) + return rsp.Result(), nil + }, + wantRes: false, + wantErr: true, + }, { + name: "all wrong", + rt: func(r *http.Request) (*http.Response, error) { + return nil, theErr + }, + wantRes: false, + wantErr: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := activatorProbe(pa, test.rt) + if got, want := res, test.wantRes; got != want { + t.Errorf("Result = %v, want: %v", got, want) + } + if got, want := err != nil, test.wantErr; got != want { + t.Errorf("WantErr = %v, want: %v: actual error is: %v", got, want, err) + } + }) + } +} + +type countingProber struct { + count int +} + +func (c *countingProber) Offer(ctx context.Context, target string, arg interface{}, period, timeout time.Duration, ops ...interface{}) bool { + c.count++ + return true +} + +func markSKSInProxyFor(sks *nv1a1.ServerlessService, d time.Duration) { + sks.Status.MarkActivatorEndpointsPopulated() + // This works because the conditions are sorted alphabetically + sks.Status.Conditions[0].LastTransitionTime = apis.VolatileTime{Inner: metav1.NewTime(time.Now().Add(-d))} +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go new file mode 100644 index 0000000000..1ebc03271a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go @@ -0,0 +1,161 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "context" + "fmt" + "reflect" + + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/apis/autoscaling" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + nv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + listers "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1" + nlisters "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/autoscaling/config" + "knative.dev/serving/pkg/reconciler/autoscaling/resources" + anames "knative.dev/serving/pkg/reconciler/autoscaling/resources/names" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// Base implements the core controller logic for autoscaling, given a Reconciler. +type Base struct { + *reconciler.Base + PALister listers.PodAutoscalerLister + ServiceLister corev1listers.ServiceLister + SKSLister nlisters.ServerlessServiceLister + MetricLister listers.MetricLister + ConfigStore reconciler.ConfigStore + PSInformerFactory duck.InformerFactory +} + +// ReconcileSKS reconciles a ServerlessService based on the given PodAutoscaler. +func (c *Base) ReconcileSKS(ctx context.Context, pa *pav1alpha1.PodAutoscaler, mode nv1alpha1.ServerlessServiceOperationMode) (*nv1alpha1.ServerlessService, error) { + logger := logging.FromContext(ctx) + + sksName := anames.SKS(pa.Name) + sks, err := c.SKSLister.ServerlessServices(pa.Namespace).Get(sksName) + if errors.IsNotFound(err) { + logger.Info("SKS does not exist; creating.") + sks = resources.MakeSKS(pa, mode) + _, err = c.ServingClientSet.NetworkingV1alpha1().ServerlessServices(sks.Namespace).Create(sks) + if err != nil { + return nil, fmt.Errorf("error creating SKS %s: %w", sksName, err) + } + logger.Info("Created SKS") + } else if err != nil { + return nil, fmt.Errorf("error getting SKS %s: %w", sksName, err) + } else if !metav1.IsControlledBy(sks, pa) { + pa.Status.MarkResourceNotOwned("ServerlessService", sksName) + return nil, fmt.Errorf("PA: %s does not own SKS: %s", pa.Name, sksName) + } else { + tmpl := resources.MakeSKS(pa, mode) + if !equality.Semantic.DeepEqual(tmpl.Spec, sks.Spec) { + want := sks.DeepCopy() + want.Spec = tmpl.Spec + logger.Infof("SKS %s changed; reconciling, want mode: %v", sksName, want.Spec.Mode) + if sks, err = c.ServingClientSet.NetworkingV1alpha1().ServerlessServices(sks.Namespace).Update(want); err != nil { + return nil, fmt.Errorf("error updating SKS %s: %w", sksName, err) + } + } + } + logger.Debug("Done reconciling SKS", sksName) + return sks, nil +} + +// DeleteMetricsServices removes all metrics services for the current PA. +// TODO(5900): Remove after 0.12 is cut. +func (c *Base) DeleteMetricsServices(ctx context.Context, pa *pav1alpha1.PodAutoscaler) error { + logger := logging.FromContext(ctx) + + svcs, err := c.ServiceLister.Services(pa.Namespace).List(labels.SelectorFromSet(map[string]string{ + autoscaling.KPALabelKey: pa.Name, + networking.ServiceTypeKey: string(networking.ServiceTypeMetrics), + })) + if err != nil { + return err + } + for _, s := range svcs { + if metav1.IsControlledBy(s, pa) { + logger.Infof("Removing redundant metric service %s", s.Name) + if err := c.KubeClientSet.CoreV1().Services( + s.Namespace).Delete(s.Name, &metav1.DeleteOptions{}); err != nil { + return err + } + } + } + return nil +} + +// ReconcileMetric reconciles a metric instance out of the given PodAutoscaler to control metric collection. +func (c *Base) ReconcileMetric(ctx context.Context, pa *pav1alpha1.PodAutoscaler, metricSN string) error { + desiredMetric := resources.MakeMetric(ctx, pa, metricSN, config.FromContext(ctx).Autoscaler) + metric, err := c.MetricLister.Metrics(desiredMetric.Namespace).Get(desiredMetric.Name) + if errors.IsNotFound(err) { + _, err = c.ServingClientSet.AutoscalingV1alpha1().Metrics(desiredMetric.Namespace).Create(desiredMetric) + if err != nil { + return fmt.Errorf("error creating metric: %w", err) + } + } else if err != nil { + return fmt.Errorf("error fetching metric: %w", err) + } else if !metav1.IsControlledBy(metric, pa) { + pa.Status.MarkResourceNotOwned("Metric", desiredMetric.Name) + return fmt.Errorf("PA: %s does not own Metric: %s", pa.Name, desiredMetric.Name) + } else { + if !equality.Semantic.DeepEqual(desiredMetric.Spec, metric.Spec) { + want := metric.DeepCopy() + want.Spec = desiredMetric.Spec + if _, err = c.ServingClientSet.AutoscalingV1alpha1().Metrics(desiredMetric.Namespace).Update(want); err != nil { + return fmt.Errorf("error updating metric: %w", err) + } + } + } + + return nil +} + +// UpdateStatus updates the status of the given PodAutoscaler. +func (c *Base) UpdateStatus(existing *pav1alpha1.PodAutoscaler, desired *pav1alpha1.PodAutoscaler) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = c.ServingClientSet.AutoscalingV1alpha1().PodAutoscalers(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = c.ServingClientSet.AutoscalingV1alpha1().PodAutoscalers(existing.Namespace).UpdateStatus(existing) + return err + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/doc.go new file mode 100644 index 0000000000..ce7ffd5005 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources contains methods for manipulating K8s resources +// shared between different PA implementations. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric.go new file mode 100644 index 0000000000..332e8f10ee --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/resources" +) + +// StableWindow returns the stable window for the revision from PA, if set, or +// systemwide default. +func StableWindow(pa *v1alpha1.PodAutoscaler, config *autoscaler.Config) time.Duration { + sw, ok := pa.Window() + if !ok { + sw = config.StableWindow + } + return sw +} + +// MakeMetric constructs a Metric resource from a PodAutoscaler +func MakeMetric(ctx context.Context, pa *v1alpha1.PodAutoscaler, metricSvc string, + config *autoscaler.Config) *v1alpha1.Metric { + stableWindow := StableWindow(pa, config) + + // Look for a panic window percentage annotation. + panicWindowPercentage, ok := pa.PanicWindowPercentage() + if !ok { + // Fall back to cluster config. + panicWindowPercentage = config.PanicWindowPercentage + } + panicWindow := time.Duration(float64(stableWindow) * panicWindowPercentage / 100.0).Round(time.Second) + if panicWindow < autoscaler.BucketSize { + panicWindow = autoscaler.BucketSize + } + return &v1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pa.Namespace, + Name: pa.Name, + Annotations: resources.CopyMap(pa.Annotations), + Labels: resources.CopyMap(pa.Labels), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pa)}, + }, + Spec: v1alpha1.MetricSpec{ + StableWindow: stableWindow, + PanicWindow: panicWindow, + ScrapeTarget: metricSvc, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric_test.go new file mode 100644 index 0000000000..49af73e78a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/metric_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + . "knative.dev/serving/pkg/testing" +) + +func TestMakeMetric(t *testing.T) { + cases := []struct { + name string + pa *v1alpha1.PodAutoscaler + msn string + want *v1alpha1.Metric + }{{ + name: "defaults", + pa: pa(), + msn: "ik", + want: metric(withScrapeTarget("ik")), + }, { + name: "with too short panic window", + pa: pa(WithWindowAnnotation("10s"), WithPanicWindowPercentageAnnotation("10")), + msn: "wil", + want: metric(withScrapeTarget("wil"), withWindowAnnotation("10s"), + withStableWindow(10*time.Second), withPanicWindow(autoscaler.BucketSize), + withPanicWindowPercentageAnnotation("10")), + }, { + name: "with longer stable window, no panic window percentage, defaults to 10%", + pa: pa(WithWindowAnnotation("10m")), + msn: "nu", + want: metric( + withScrapeTarget("nu"), + withStableWindow(10*time.Minute), withPanicWindow(time.Minute), + withWindowAnnotation("10m")), + }, { + name: "with longer panic window percentage", + pa: pa(WithPanicWindowPercentageAnnotation("50")), + msn: "dansen", + want: metric( + withScrapeTarget("dansen"), + withStableWindow(time.Minute), withPanicWindow(30*time.Second), + withPanicWindowPercentageAnnotation("50")), + }, { + name: "with panic window percentage+rounding", + pa: pa(WithPanicWindowPercentageAnnotation("51")), + msn: "dansen", + want: metric( + withScrapeTarget("dansen"), + withStableWindow(time.Minute), withPanicWindow(31*time.Second), + withPanicWindowPercentageAnnotation("51")), + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if diff := cmp.Diff(tc.want, MakeMetric(context.Background(), tc.pa, tc.msn, config)); diff != "" { + t.Errorf("%q (-want, +got):\n%v", tc.name, diff) + } + }) + } +} + +func TestStableWindow(t *testing.T) { + // Not set on PA. + thePa := pa() + if got, want := StableWindow(thePa, config), config.StableWindow; got != want { + t.Errorf("StableWindow = %v, want: %v", got, want) + } + + thePa = pa(WithWindowAnnotation("251s")) + if got, want := StableWindow(thePa, config), 251*time.Second; got != want { + t.Errorf("StableWindow = %v, want: %v", got, want) + } +} + +type MetricOption func(*v1alpha1.Metric) + +func withStableWindow(window time.Duration) MetricOption { + return func(metric *v1alpha1.Metric) { + metric.Spec.StableWindow = window + } +} + +func withPanicWindow(window time.Duration) MetricOption { + return func(metric *v1alpha1.Metric) { + metric.Spec.PanicWindow = window + } +} + +func withWindowAnnotation(window string) MetricOption { + return func(metric *v1alpha1.Metric) { + metric.Annotations[autoscaling.WindowAnnotationKey] = window + } +} + +func withPanicWindowPercentageAnnotation(percentage string) MetricOption { + return func(metric *v1alpha1.Metric) { + metric.Annotations[autoscaling.PanicWindowPercentageAnnotationKey] = percentage + } +} + +func withScrapeTarget(s string) MetricOption { + return func(metric *v1alpha1.Metric) { + metric.Spec.ScrapeTarget = s + } +} + +func metric(options ...MetricOption) *v1alpha1.Metric { + m := &v1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-name", + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + }, + Labels: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pa())}, + }, + Spec: v1alpha1.MetricSpec{ + StableWindow: 60 * time.Second, + PanicWindow: 6 * time.Second, + }, + } + for _, fn := range options { + fn(m) + } + return m +} + +func pa(options ...PodAutoscalerOption) *v1alpha1.PodAutoscaler { + p := &v1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-name", + Annotations: map[string]string{ + autoscaling.ClassAnnotationKey: autoscaling.KPA, + }, + }, + Spec: v1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 0, + }, + Status: v1alpha1.PodAutoscalerStatus{}, + } + for _, fn := range options { + fn(p) + } + return p +} + +var config = &autoscaler.Config{ + EnableScaleToZero: true, + ContainerConcurrencyTargetFraction: 1.0, + ContainerConcurrencyTargetDefault: 100.0, + RPSTargetDefault: 200.0, + TargetUtilization: 0.7, + MaxScaleUpRate: 10.0, + StableWindow: 60 * time.Second, + PanicThresholdPercentage: 200, + PanicWindowPercentage: 10, + TickInterval: 2 * time.Second, + ScaleToZeroGracePeriod: 30 * time.Second, +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/doc.go new file mode 100644 index 0000000000..0f3faf212f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names contains methods for manipulating K8s resources' names +// shared between different PA implementations. +package names diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names.go new file mode 100644 index 0000000000..3ad5b4b88e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names contains name generating functions for the pod autoscalers. +package names + +// SKS returns the name of the SKS resource that backs this PA. +func SKS(paName string) string { + return paName +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names_test.go new file mode 100644 index 0000000000..6cf6a07ee0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/names/names_test.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import "testing" + +func TestSKS(t *testing.T) { + if got, want := SKS("ristretto"), "ristretto"; got != want { + t.Errorf("SKSName = %q, want: %q", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks.go new file mode 100644 index 0000000000..c999e51e3d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/autoscaling" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler/autoscaling/resources/names" + "knative.dev/serving/pkg/resources" +) + +// MakeSKS makes an SKS resource from the PA and operation mode. +func MakeSKS(pa *pav1alpha1.PodAutoscaler, mode nv1a1.ServerlessServiceOperationMode) *nv1a1.ServerlessService { + return &nv1a1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SKS(pa.Name), + Namespace: pa.Namespace, + Labels: resources.CopyMap(pa.GetLabels()), + Annotations: resources.FilterMap(pa.GetAnnotations(), func(s string) bool { + return s == autoscaling.MetricAnnotationKey + }), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(pa)}, + }, + Spec: nv1a1.ServerlessServiceSpec{ + Mode: mode, + ObjectRef: pa.Spec.ScaleTargetRef, + ProtocolType: pa.Spec.ProtocolType, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks_test.go new file mode 100644 index 0000000000..fbded77340 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/sks_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + pav1a1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" +) + +// MakeSKS makes an SKS resource from the PA, selector and operation mode. +func TestMakeSKS(t *testing.T) { + pa := &pav1a1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "here", + Name: "with-you", + UID: "2006", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "with-you", + serving.RevisionUID: "2009", + }, + Annotations: map[string]string{ + "a": "b", + }, + }, + Spec: pav1a1.PodAutoscalerSpec{ + ProtocolType: networking.ProtocolHTTP1, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "blah", + }, + }, + } + + const mode = nv1a1.SKSOperationModeServe + + want := &nv1a1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "here", + Name: "with-you", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "with-you", + serving.RevisionUID: "2009", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: pav1a1.SchemeGroupVersion.String(), + Kind: "PodAutoscaler", + Name: "with-you", + UID: "2006", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: nv1a1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolHTTP1, + Mode: nv1a1.SKSOperationModeServe, + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "blah", + }, + }, + } + if got, want := MakeSKS(pa, mode), want; !cmp.Equal(got, want) { + t.Errorf("MakeSKS = %#v, want: %#v, diff: %s", got, want, cmp.Diff(got, want)) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go new file mode 100644 index 0000000000..1ef2b40157 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "math" + + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" +) + +// ResolveMetricTarget takes scaling metric knobs from multiple locations +// and resolves them to the final value to be used by the autoscaler. +// `target` is the target value of scaling metric that we autoscaler will aim for; +// `total` is the maximum possible value of scaling metric that is permitted on the pod. +func ResolveMetricTarget(pa *v1alpha1.PodAutoscaler, config *autoscaler.Config) (target float64, total float64) { + var tu float64 + + switch pa.Metric() { + case autoscaling.RPS: + total = config.RPSTargetDefault + tu = config.TargetUtilization + default: + // Concurrency is used by default + total = float64(pa.Spec.ContainerConcurrency) + // If containerConcurrency is 0 we'll always target the default. + if total == 0 { + total = config.ContainerConcurrencyTargetDefault + } + tu = config.ContainerConcurrencyTargetFraction + } + + // Use the target provided via annotation, if applicable. + if annotationTarget, ok := pa.Target(); ok { + total = annotationTarget + if pa.Metric() == autoscaling.Concurrency && pa.Spec.ContainerConcurrency != 0 { + // We pick the smaller value between container concurrency and the annotationTarget + // to make sure the autoscaler does not aim for a higher concurrency than the application + // can handle per containerConcurrency. + total = math.Min(annotationTarget, float64(pa.Spec.ContainerConcurrency)) + } + } + + if v, ok := pa.TargetUtilization(); ok { + tu = v + } + target = math.Max(1, total*tu) + + return target, total +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target_test.go new file mode 100644 index 0000000000..34ca5ceb91 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + + . "knative.dev/serving/pkg/testing" +) + +func TestResolveMetricTarget(t *testing.T) { + cases := []struct { + name string + pa *v1alpha1.PodAutoscaler + cfgOpt func(autoscaler.Config) *autoscaler.Config + wantTgt float64 + wantTot float64 + }{{ + name: "defaults", + pa: pa(), + wantTgt: 100, + wantTot: 100, + }, { + name: "default CC + 80% TU", + pa: pa(), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + wantTgt: 80, + wantTot: 100, + }, { + name: "non-default CC and TU", + pa: pa(), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.3 + c.ContainerConcurrencyTargetDefault = 2 + return &c + }, + wantTgt: 1, + wantTot: 2, + }, { + name: "with container concurrency 12 and TU=80%, but TU annotation 75%", + pa: pa(WithPAContainerConcurrency(12), WithTUAnnotation("75")), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + wantTgt: 9, + wantTot: 12, + }, { + name: "with container concurrency 10 and TU=80%", + pa: pa(WithPAContainerConcurrency(10)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + wantTgt: 8, + wantTot: 10, + }, { + name: "with container concurrency 1 and TU=80%", + pa: pa(WithPAContainerConcurrency(1)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + wantTgt: 1, // Not permitting less than 1. + wantTot: 1, + }, { + name: "with container concurrency 1", + pa: pa(WithPAContainerConcurrency(1)), + wantTgt: 1, + wantTot: 1, + }, { + name: "with container concurrency 10 and TU=80%", + pa: pa(WithPAContainerConcurrency(10)), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.8 + return &c + }, + wantTgt: 8, + wantTot: 10, + }, { + name: "with container concurrency 10", + pa: pa(WithPAContainerConcurrency(10)), + wantTgt: 10, + wantTot: 10, + }, { + name: "with target annotation 1", + pa: pa(WithTargetAnnotation("1")), + wantTgt: 1, + wantTot: 1, + }, { + name: "with target annotation 1 and TU=75%", + pa: pa(WithTargetAnnotation("1")), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.75 + return &c + }, + wantTgt: 1, + wantTot: 1, + }, { + name: "with target annotation 10 and TU=75%", + pa: pa(WithTargetAnnotation("10")), + cfgOpt: func(c autoscaler.Config) *autoscaler.Config { + c.ContainerConcurrencyTargetFraction = 0.75 + return &c + }, + wantTgt: 7.5, + wantTot: 10, + }, { + name: "with container concurrency greater than target annotation (ok)", + pa: pa(WithPAContainerConcurrency(10), WithTargetAnnotation("1")), + wantTgt: 1, + wantTot: 1, + }, { + name: "with target annotation greater than default (ok)", + pa: pa(WithTargetAnnotation("500")), + wantTgt: 500, + wantTot: 500, + }, { + name: "with target annotation greater than container concurrency (ignore annotation for safety)", + pa: pa(WithPAContainerConcurrency(1), WithTargetAnnotation("10")), + wantTgt: 1, + wantTot: 1, + }, { + name: "RPS: defaults", + pa: pa(WithMetricAnnotation(autoscaling.RPS), WithPAContainerConcurrency(1)), + wantTgt: 140, + wantTot: 200, + }, { + name: "RPS: with target annotation 1", + pa: pa(WithMetricAnnotation(autoscaling.RPS), WithTargetAnnotation("1")), + wantTgt: 1, + wantTot: 1, + }, { + name: "RPS: with TU annotation 75%", + pa: pa(WithMetricAnnotation(autoscaling.RPS), WithTUAnnotation("75")), + wantTgt: 150, + wantTot: 200, + }, { + name: "RPS: with target annotation greater than default", + pa: pa(WithMetricAnnotation(autoscaling.RPS), WithTargetAnnotation("300")), + wantTgt: 210, + wantTot: 300, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + cfg := config + if tc.cfgOpt != nil { + cfg = tc.cfgOpt(*cfg) + } + gotTgt, gotTot := ResolveMetricTarget(tc.pa, cfg) + if gotTgt != tc.wantTgt || gotTot != tc.wantTot { + t.Errorf("ResolveMetricTarget(%v, %v) = (%v, %v), want (%v, %v)", + tc.pa, config, gotTgt, gotTot, tc.wantTgt, tc.wantTot) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate.go new file mode 100644 index 0000000000..3dc7f0218c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate.go @@ -0,0 +1,301 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "context" + "fmt" + "hash/adler32" + "reflect" + "strconv" + + "k8s.io/apimachinery/pkg/util/sets" + + cmv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + kubelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + "knative.dev/pkg/apis" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/tracker" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + certmanagerclientset "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + acmelisters "knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2" + certmanagerlisters "knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2" + listers "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/certificate/config" + "knative.dev/serving/pkg/reconciler/certificate/resources" +) + +const ( + noCMConditionReason = "NoCertManagerCertCondition" + noCMConditionMessage = "The ready condition of Cert Manager Certifiate does not exist." + notReconciledReason = "ReconcileFailed" + notReconciledMessage = "Cert-Manager certificate has not yet been reconciled." + httpDomainLabel = "acme.cert-manager.io/http-domain" + httpChallengePath = "/.well-known/acme-challenge" +) + +// It comes from cert-manager status: +// https://github.com/jetstack/cert-manager/blob/b7e83b53820e712e7cf6b8dce3e5a050f249da79/pkg/controller/certificates/sync.go#L130 +var notReadyReasons = sets.NewString("InProgress", "Pending", "TemporaryCertificate") + +// Reconciler implements controller.Reconciler for Certificate resources. +type Reconciler struct { + *reconciler.Base + + // listers index properties about resources + knCertificateLister listers.CertificateLister + cmCertificateLister certmanagerlisters.CertificateLister + cmChallengeLister acmelisters.ChallengeLister + cmIssuerLister certmanagerlisters.ClusterIssuerLister + svcLister kubelisters.ServiceLister + certManagerClient certmanagerclientset.Interface + tracker tracker.Interface + + configStore reconciler.ConfigStore +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Certificate resource +// with the current status of the resource. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.configStore.ToContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + original, err := c.knCertificateLister.Certificates(namespace).Get(name) + if apierrs.IsNotFound(err) { + logger.Info("Knative Certificate in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy + knCert := original.DeepCopy() + + // Reconcile this copy of the Certificate and then write back any status + // updates regardless of whether the reconciliation errored out. + err = c.reconcile(ctx, knCert) + if err != nil { + logger.Warnw("Failed to reconcile certificate", zap.Error(err)) + c.Recorder.Event(knCert, corev1.EventTypeWarning, "InternalError", err.Error()) + knCert.Status.MarkNotReady(notReconciledReason, notReconciledMessage) + } + if equality.Semantic.DeepEqual(original.Status, knCert.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err := c.updateStatus(original, knCert); err != nil { + logger.Warnw("Failed to update certificate status", zap.Error(err)) + c.Recorder.Eventf(knCert, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for Certificate %s: %v", key, err) + return err + } + return err +} + +func (c *Reconciler) reconcile(ctx context.Context, knCert *v1alpha1.Certificate) error { + logger := logging.FromContext(ctx) + + knCert.SetDefaults(ctx) + knCert.Status.InitializeConditions() + + logger.Infof("Reconciling Cert-Manager certificate for Knative cert %s/%s.", knCert.Namespace, knCert.Name) + knCert.Status.ObservedGeneration = knCert.Generation + + cmConfig := config.FromContext(ctx).CertManager + + cmCert := resources.MakeCertManagerCertificate(cmConfig, knCert) + cmCert, err := c.reconcileCMCertificate(ctx, knCert, cmCert) + if err != nil { + return err + } + + knCert.Status.NotAfter = cmCert.Status.NotAfter + // Propagate cert-manager Certificate status to Knative Certificate. + cmCertReadyCondition := resources.GetReadyCondition(cmCert) + logger.Infof("cm cert condition %v.", cmCertReadyCondition) + + switch { + case cmCertReadyCondition == nil: + knCert.Status.MarkNotReady(noCMConditionReason, noCMConditionMessage) + return c.setHTTP01Challenges(knCert, cmCert) + case cmCertReadyCondition.Status == cmmeta.ConditionUnknown: + knCert.Status.MarkNotReady(cmCertReadyCondition.Reason, cmCertReadyCondition.Message) + return c.setHTTP01Challenges(knCert, cmCert) + case cmCertReadyCondition.Status == cmmeta.ConditionTrue: + knCert.Status.MarkReady() + knCert.Status.HTTP01Challenges = []v1alpha1.HTTP01Challenge{} + case cmCertReadyCondition.Status == cmmeta.ConditionFalse: + if notReadyReasons.Has(cmCertReadyCondition.Reason) { + knCert.Status.MarkNotReady(cmCertReadyCondition.Reason, cmCertReadyCondition.Message) + } else { + knCert.Status.MarkFailed(cmCertReadyCondition.Reason, cmCertReadyCondition.Message) + } + return c.setHTTP01Challenges(knCert, cmCert) + } + return nil +} + +func (c *Reconciler) reconcileCMCertificate(ctx context.Context, knCert *v1alpha1.Certificate, desired *cmv1alpha2.Certificate) (*cmv1alpha2.Certificate, error) { + cmCert, err := c.cmCertificateLister.Certificates(desired.Namespace).Get(desired.Name) + if apierrs.IsNotFound(err) { + cmCert, err = c.certManagerClient.CertmanagerV1alpha2().Certificates(desired.Namespace).Create(desired) + if err != nil { + c.Recorder.Eventf(knCert, corev1.EventTypeWarning, "CreationFailed", + "Failed to create Cert-Manager Certificate %s/%s: %v", desired.Name, desired.Namespace, err) + return nil, fmt.Errorf("failed to create Cert-Manager Certificate: %w", err) + } + c.Recorder.Eventf(knCert, corev1.EventTypeNormal, "Created", + "Created Cert-Manager Certificate %s/%s", desired.Namespace, desired.Name) + } else if err != nil { + return nil, fmt.Errorf("failed to get Cert-Manager Certificate: %w", err) + } else if !metav1.IsControlledBy(desired, knCert) { + knCert.Status.MarkResourceNotOwned("CertManagerCertificate", desired.Name) + return nil, fmt.Errorf("knative Certificate %s in namespace %s does not own CertManager Certificate: %s", knCert.Name, knCert.Namespace, desired.Name) + } else if !equality.Semantic.DeepEqual(cmCert.Spec, desired.Spec) { + copy := cmCert.DeepCopy() + copy.Spec = desired.Spec + updated, err := c.certManagerClient.CertmanagerV1alpha2().Certificates(copy.Namespace).Update(copy) + if err != nil { + c.Recorder.Eventf(knCert, corev1.EventTypeWarning, "UpdateFailed", + "Failed to create Cert-Manager Certificate %s/%s: %v", desired.Namespace, desired.Name, err) + return nil, fmt.Errorf("failed to update Cert-Manager Certificate: %w", err) + } + c.Recorder.Eventf(knCert, corev1.EventTypeNormal, "Updated", + "Updated Spec for Cert-Manager Certificate %s/%s", desired.Namespace, desired.Name) + return updated, nil + } + return cmCert, nil +} + +func (c *Reconciler) updateStatus(existing *v1alpha1.Certificate, desired *v1alpha1.Certificate) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = c.ServingClientSet.NetworkingV1alpha1().Certificates(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = c.ServingClientSet.NetworkingV1alpha1().Certificates(existing.Namespace).UpdateStatus(existing) + return err + }) +} + +func (c *Reconciler) setHTTP01Challenges(knCert *v1alpha1.Certificate, cmCert *cmv1alpha2.Certificate) error { + if isHTTP, err := c.isHTTPChallenge(cmCert); err != nil { + return err + } else if !isHTTP { + return nil + } + challenges := make([]v1alpha1.HTTP01Challenge, 0, len(cmCert.Spec.DNSNames)) + for _, dnsName := range cmCert.Spec.DNSNames { + // This selector comes from https://github.com/jetstack/cert-manager/blob/1b9b83a4b80068207b0a8070dadb0e760f5095f6/pkg/issuer/acme/http/pod.go#L34 + selector := labels.NewSelector() + value := strconv.FormatUint(uint64(adler32.Checksum([]byte(dnsName))), 10) + req, err := labels.NewRequirement(httpDomainLabel, selection.Equals, []string{value}) + if err != nil { + return fmt.Errorf("failed to create requirement %s=%s: %w", httpDomainLabel, value, err) + } + selector = selector.Add(*req) + + svcs, err := c.svcLister.Services(knCert.Namespace).List(selector) + if err != nil { + return fmt.Errorf("failed to list services: %w", err) + } + if len(svcs) == 0 { + return fmt.Errorf("no challenge solver service for domain %s", dnsName) + } + + for _, svc := range svcs { + if err := c.tracker.Track(svcRef(svc.Namespace, svc.Name), knCert); err != nil { + return err + } + owner := svc.GetOwnerReferences()[0] + cmChallenge, err := c.cmChallengeLister.Challenges(knCert.Namespace).Get(owner.Name) + if err != nil { + return err + } + + challenge := v1alpha1.HTTP01Challenge{ + ServiceName: svc.Name, + ServicePort: svc.Spec.Ports[0].TargetPort, + ServiceNamespace: svc.Namespace, + URL: &apis.URL{ + Scheme: "http", + Path: fmt.Sprintf("%s/%s", httpChallengePath, cmChallenge.Spec.Token), + Host: cmChallenge.Spec.DNSName, + }, + } + challenges = append(challenges, challenge) + } + } + knCert.Status.HTTP01Challenges = challenges + return nil +} + +func (c *Reconciler) isHTTPChallenge(cmCert *cmv1alpha2.Certificate) (bool, error) { + if issuer, err := c.cmIssuerLister.Get(cmCert.Spec.IssuerRef.Name); err != nil { + return false, err + } else { + return issuer.Spec.ACME != nil && + len(issuer.Spec.ACME.Solvers) > 0 && + issuer.Spec.ACME.Solvers[0].HTTP01 != nil, nil + } +} + +func svcRef(namespace, name string) corev1.ObjectReference { + gvk := corev1.SchemeGroupVersion.WithKind("Service") + apiVersion, kind := gvk.ToAPIVersionAndKind() + return corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Namespace: namespace, + Name: name, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate_test.go new file mode 100644 index 0000000000..847f27fa6a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/certificate_test.go @@ -0,0 +1,573 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "context" + "fmt" + "hash/adler32" + "testing" + "time" + + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + fakecertmanagerclient "knative.dev/serving/pkg/client/certmanager/injection/client/fake" + _ "knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge/fake" + _ "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate/fake" + _ "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer/fake" + _ "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake" + + acmev1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + cmv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/certificate/config" + "knative.dev/serving/pkg/reconciler/certificate/resources" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" +) + +const generation = 23132 + +var ( + correctDNSNames = []string{"correct-dns1.example.com", "correct-dns2.example.com"} + incorrectDNSNames = []string{"incorrect-dns.example.com"} + notAfter = &metav1.Time{ + Time: time.Unix(123, 456), + } + nonHTTP01Issuer = &cmv1alpha2.ClusterIssuer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Letsencrypt-issuer", + }, + Spec: cmv1alpha2.IssuerSpec{}, + } + http01Issuer = &cmv1alpha2.ClusterIssuer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Letsencrypt-issuer", + }, + Spec: cmv1alpha2.IssuerSpec{ + IssuerConfig: cmv1alpha2.IssuerConfig{ + ACME: &acmev1alpha2.ACMEIssuer{ + Solvers: []acmev1alpha2.ACMEChallengeSolver{{ + HTTP01: &acmev1alpha2.ACMEChallengeSolverHTTP01{}, + }}, + }, + }, + }, + } +) + +func TestNewController(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + configMapWatcher := configmap.NewStaticWatcher(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.CertManagerConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "issuerRef": "kind: ClusterIssuer\nname: letsencrypt-issuer", + }, + }) + + c := NewController(ctx, configMapWatcher) + if c == nil { + t.Fatal("Expected NewController to return a non-nil value") + } +} + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + Key: "too/many/parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "create CM certificate matching Knative Certificate", + Objects: []runtime.Object{ + knCert("knCert", "foo"), + nonHTTP01Issuer, + }, + WantCreates: []runtime.Object{ + resources.MakeCertManagerCertificate(certmanagerConfig(), knCert("knCert", "foo")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + Reason: noCMConditionReason, + Message: noCMConditionMessage, + }}, + }, + }), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Cert-Manager Certificate %s/%s", "foo", "knCert"), + }, + Key: "foo/knCert", + }, { + Name: "reconcile CM certificate to match desired one", + Objects: []runtime.Object{ + knCert("knCert", "foo"), + cmCert("knCert", "foo", incorrectDNSNames), + nonHTTP01Issuer, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cmCert("knCert", "foo", correctDNSNames), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + Reason: noCMConditionReason, + Message: noCMConditionMessage, + }}, + }, + }), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Spec for Cert-Manager Certificate %s/%s", "foo", "knCert"), + }, + Key: "foo/knCert", + }, { + Name: "observed generation is still updated when error is encountered, and ready status is unknown", + Objects: []runtime.Object{ + knCertWithStatusAndGeneration("knCert", "foo", + &v1alpha1.CertificateStatus{ + Status: duckv1.Status{ + ObservedGeneration: generation + 1, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, generation+1), + cmCert("knCert", "foo", incorrectDNSNames), + nonHTTP01Issuer, + }, + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "certificates"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cmCert("knCert", "foo", correctDNSNames), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatusAndGeneration("knCert", "foo", + &v1alpha1.CertificateStatus{ + Status: duckv1.Status{ + ObservedGeneration: generation + 1, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + Reason: notReconciledReason, + Message: notReconciledMessage, + }}, + }, + }, generation+1), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to create Cert-Manager Certificate %s: %v", + "foo/knCert", "inducing failure for update certificates"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to update Cert-Manager Certificate: inducing failure for update certificates"), + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for Certificate %s: %v", + "foo/knCert", "inducing failure for update certificates"), + }, + Key: "foo/knCert", + }, { + Name: "set Knative Certificate ready status with CM Certificate ready status", + Objects: []runtime.Object{ + knCert("knCert", "foo"), + cmCertWithStatus("knCert", "foo", correctDNSNames, cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionTrue}), + nonHTTP01Issuer, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + NotAfter: notAfter, + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }), + }}, + Key: "foo/knCert", + }, { + Name: "set Knative Certificate unknown status with CM Certificate unknown status", + Objects: []runtime.Object{ + knCert("knCert", "foo"), + cmCertWithStatus("knCert", "foo", correctDNSNames, cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionUnknown}), + nonHTTP01Issuer, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + NotAfter: notAfter, + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + }}, + }, + }), + }}, + Key: "foo/knCert", + }, { + Name: "set Knative Certificate not ready status with CM Certificate not ready status", + Objects: []runtime.Object{ + knCert("knCert", "foo"), + cmCertWithStatus("knCert", "foo", correctDNSNames, cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionFalse}), + nonHTTP01Issuer, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + NotAfter: notAfter, + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionFalse, + Severity: apis.ConditionSeverityError, + }}, + }, + }), + }}, + Key: "foo/knCert", + }, { + Name: "reconcile cm certificate fails", + Key: "foo/knCert", + Objects: []runtime.Object{ + knCert("knCert", "foo"), + nonHTTP01Issuer, + }, + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "certificates"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Cert-Manager Certificate knCert/foo: inducing failure for create certificates"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create Cert-Manager Certificate: inducing failure for create certificates"), + }, + WantCreates: []runtime.Object{ + resources.MakeCertManagerCertificate(certmanagerConfig(), knCert("knCert", "foo")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Reason: notReconciledReason, + Severity: apis.ConditionSeverityError, + Message: notReconciledMessage, + }}, + }, + }), + }}, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + knCertificateLister: listers.GetKnCertificateLister(), + cmCertificateLister: listers.GetCMCertificateLister(), + cmChallengeLister: listers.GetCMChallengeLister(), + cmIssuerLister: listers.GetCMClusterIssuerLister(), + svcLister: listers.GetK8sServiceLister(), + certManagerClient: fakecertmanagerclient.Get(ctx), + tracker: &NullTracker{}, + configStore: &testConfigStore{ + config: &config.Config{ + CertManager: certmanagerConfig(), + }, + }, + } + })) +} + +func TestReconcile_HTTP01Challenges(t *testing.T) { + table := TableTest{{ + Name: "fail to set status.HTTP01Challenges", + Key: "foo/knCert", + SkipNamespaceValidation: true, + WantErr: true, + Objects: []runtime.Object{ + knCert("knCert", "foo"), + http01Issuer, + }, + WantCreates: []runtime.Object{ + resources.MakeCertManagerCertificate(certmanagerConfig(), knCert("knCert", "foo")), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Cert-Manager Certificate %s/%s", "foo", "knCert"), + Eventf(corev1.EventTypeWarning, "InternalError", "no challenge solver service for domain %s", correctDNSNames[0]), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Reason: notReconciledReason, + Severity: apis.ConditionSeverityError, + Message: notReconciledMessage, + }}, + }, + }), + }}, + }, { + Name: "set Status.HTTP01Challenges on Knative certificate", + Key: "foo/knCert", + Objects: []runtime.Object{ + cmSolverService(correctDNSNames[0], "foo"), + cmSolverService(correctDNSNames[1], "foo"), + cmChallenge(correctDNSNames[0], "foo"), + cmChallenge(correctDNSNames[1], "foo"), + cmCert("knCert", "foo", correctDNSNames), + knCert("knCert", "foo"), + http01Issuer, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + HTTP01Challenges: []v1alpha1.HTTP01Challenge{{ + URL: &apis.URL{ + Scheme: "http", + Host: correctDNSNames[0], + Path: "/.well-known/acme-challenge/cm-challenge-token", + }, + ServiceName: "cm-solver-" + correctDNSNames[0], + ServiceNamespace: "foo", + }, { + URL: &apis.URL{ + Scheme: "http", + Host: correctDNSNames[1], + Path: "/.well-known/acme-challenge/cm-challenge-token", + }, + ServiceName: "cm-solver-" + correctDNSNames[1], + ServiceNamespace: "foo", + }}, + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + Reason: noCMConditionReason, + Message: noCMConditionMessage, + }}, + }, + }), + }}, + }, { + Name: "set Status.HTTP01Challenges on Knative certificate when status failed with InProgress", + Key: "foo/knCert", + Objects: []runtime.Object{ + cmSolverService(correctDNSNames[0], "foo"), + cmSolverService(correctDNSNames[1], "foo"), + cmChallenge(correctDNSNames[0], "foo"), + cmChallenge(correctDNSNames[1], "foo"), + cmCertWithStatus("knCert", "foo", correctDNSNames, cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionFalse, + Reason: "InProgress"}), + knCert("knCert", "foo"), + http01Issuer, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: knCertWithStatus("knCert", "foo", + &v1alpha1.CertificateStatus{ + NotAfter: notAfter, + HTTP01Challenges: []v1alpha1.HTTP01Challenge{{ + URL: &apis.URL{ + Scheme: "http", + Host: correctDNSNames[0], + Path: "/.well-known/acme-challenge/cm-challenge-token", + }, + ServiceName: "cm-solver-" + correctDNSNames[0], + ServiceNamespace: "foo", + }, { + URL: &apis.URL{ + Scheme: "http", + Host: correctDNSNames[1], + Path: "/.well-known/acme-challenge/cm-challenge-token", + }, + ServiceName: "cm-solver-" + correctDNSNames[1], + ServiceNamespace: "foo", + }}, + Status: duckv1.Status{ + ObservedGeneration: generation, + Conditions: duckv1.Conditions{{ + Type: v1alpha1.CertificateConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + Reason: "InProgress", + }}, + }, + }), + }}, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + knCertificateLister: listers.GetKnCertificateLister(), + cmCertificateLister: listers.GetCMCertificateLister(), + cmChallengeLister: listers.GetCMChallengeLister(), + cmIssuerLister: listers.GetCMClusterIssuerLister(), + svcLister: listers.GetK8sServiceLister(), + certManagerClient: fakecertmanagerclient.Get(ctx), + tracker: &NullTracker{}, + configStore: &testConfigStore{ + config: &config.Config{ + CertManager: certmanagerConfig(), + }, + }, + } + })) +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ reconciler.ConfigStore = (*testConfigStore)(nil) + +func certmanagerConfig() *config.CertManagerConfig { + return &config.CertManagerConfig{ + IssuerRef: &cmmeta.ObjectReference{ + Kind: "ClusterIssuer", + Name: "Letsencrypt-issuer", + }, + } +} + +func knCert(name, namespace string) *v1alpha1.Certificate { + return knCertWithStatus(name, namespace, &v1alpha1.CertificateStatus{}) +} + +func knCertWithStatus(name, namespace string, status *v1alpha1.CertificateStatus) *v1alpha1.Certificate { + return knCertWithStatusAndGeneration(name, namespace, status, generation) +} + +func knCertWithStatusAndGeneration(name, namespace string, status *v1alpha1.CertificateStatus, gen int) *v1alpha1.Certificate { + return &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: int64(gen), + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: correctDNSNames, + SecretName: "secret0", + }, + Status: *status, + } +} + +func cmCert(name, namespace string, dnsNames []string) *cmv1alpha2.Certificate { + cert := resources.MakeCertManagerCertificate(certmanagerConfig(), knCert(name, namespace)) + cert.Spec.DNSNames = dnsNames + return cert +} + +func cmCertWithStatus(name, namespace string, dnsNames []string, condition cmv1alpha2.CertificateCondition) *cmv1alpha2.Certificate { + cert := cmCert(name, namespace, dnsNames) + cert.Status.Conditions = []cmv1alpha2.CertificateCondition{condition} + cert.Status.NotAfter = notAfter + return cert +} + +func cmChallenge(hostname, namespace string) *acmev1alpha2.Challenge { + return &acmev1alpha2.Challenge{ + ObjectMeta: metav1.ObjectMeta{ + Name: "challenge-" + hostname, + Namespace: namespace, + }, + Spec: acmev1alpha2.ChallengeSpec{ + Type: "http01", + DNSName: hostname, + Token: "cm-challenge-token", + }, + } +} + +func cmSolverService(hostname, namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{{ + Name: "challenge-" + hostname, + }}, + Name: "cm-solver-" + hostname, + Namespace: namespace, + Labels: map[string]string{ + httpDomainLabel: fmt.Sprintf("%d", adler32.Checksum([]byte(hostname))), + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Port: 8090, + Protocol: "tcp", + }}, + }, + } + +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager.go new file mode 100644 index 0000000000..db30aca7f7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "github.com/ghodss/yaml" + + cmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" +) + +const ( + issuerRefKey = "issuerRef" + + // CertManagerConfigName is the name of the configmap containing all + // configuration related to Cert-Manager. + CertManagerConfigName = "config-certmanager" +) + +// CertManagerConfig contains Cert-Manager related configuration defined in the +// `config-certmanager` config map. +type CertManagerConfig struct { + IssuerRef *cmeta.ObjectReference +} + +// NewCertManagerConfigFromConfigMap creates an CertManagerConfig from the supplied ConfigMap +func NewCertManagerConfigFromConfigMap(configMap *corev1.ConfigMap) (*CertManagerConfig, error) { + // TODO(zhiminx): do we need to provide the default values here? + // TODO: validation check. + + config := &CertManagerConfig{ + IssuerRef: &cmeta.ObjectReference{}, + } + + if v, ok := configMap.Data[issuerRefKey]; ok { + if err := yaml.Unmarshal([]byte(v), config.IssuerRef); err != nil { + return nil, err + } + } + return config, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager_test.go new file mode 100644 index 0000000000..368bf190a8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/cert_manager_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package config + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + . "knative.dev/pkg/configmap/testing" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" +) + +func TestCertManagerConfig(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, CertManagerConfigName) + + if _, err := NewCertManagerConfigFromConfigMap(cm); err != nil { + t.Errorf("NewCertManagerConfigFromConfigMap(actual) = %v", err) + } + + if _, err := NewCertManagerConfigFromConfigMap(example); err != nil { + t.Errorf("NewCertManagerConfigFromConfigMap(actual) = %v", err) + } +} + +func TestIssuerRef(t *testing.T) { + isserRefCases := []struct { + name string + wantErr bool + wantConfig *CertManagerConfig + config *corev1.ConfigMap + }{{ + name: "invalid format", + wantErr: true, + wantConfig: (*CertManagerConfig)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: CertManagerConfigName, + }, + Data: map[string]string{ + issuerRefKey: "wrong format", + }, + }, + }, { + name: "valid IssuerRef", + wantErr: false, + wantConfig: &CertManagerConfig{ + IssuerRef: &cmmeta.ObjectReference{ + Name: "letsencrypt-issuer", + Kind: "ClusterIssuer", + }, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: CertManagerConfigName, + }, + Data: map[string]string{ + issuerRefKey: "kind: ClusterIssuer\nname: letsencrypt-issuer", + }, + }, + }} + + for _, tt := range isserRefCases { + t.Run(tt.name, func(t *testing.T) { + actualConfig, err := NewCertManagerConfigFromConfigMap(tt.config) + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewCertManagerConfigFromConfigMap() error = %v, WantErr %v", tt.name, err, tt.wantErr) + } + if diff := cmp.Diff(actualConfig, tt.wantConfig); diff != "" { + t.Fatalf("Want %v, but got %v", tt.wantConfig, actualConfig) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/doc.go new file mode 100644 index 0000000000..bdef45e6e6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Certificate controller depends. +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store.go new file mode 100644 index 0000000000..96fcdcbe03 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/pkg/configmap" +) + +type cfgKey struct{} + +// Config of CertManager. +// +k8s:deepcopy-gen=false +type Config struct { + CertManager *CertManagerConfig +} + +// FromContext fetch config from context. +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +// ToContext adds config to given context. +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// Store is configmap.UntypedStore based config store. +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a configmap.UntypedStore based config store. +// +// logger must be non-nil implementation of configmap.Logger (commonly used +// loggers conform) +// +// onAfterStore is a variadic list of callbacks to run +// after the ConfigMap has been processed and stored. +// +// See also: configmap.NewUntypedStore(). +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "certificate", + logger, + configmap.Constructors{ + CertManagerConfigName: NewCertManagerConfigFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +// ToContext adds Store contents to given context. +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +// Load fetches config from Store. +func (s *Store) Load() *Config { + return &Config{ + CertManager: s.UntypedLoad(CertManagerConfigName).(*CertManagerConfig).DeepCopy(), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store_test.go new file mode 100644 index 0000000000..4988e9b44f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/store_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + . "knative.dev/pkg/configmap/testing" + . "knative.dev/pkg/logging/testing" +) + +func TestStoreLoadWithContext(t *testing.T) { + store := NewStore(TestLogger(t)) + + certManagerConfig := ConfigMapFromTestFile(t, CertManagerConfigName) + store.OnConfigChanged(certManagerConfig) + config := FromContext(store.ToContext(context.Background())) + + expected, _ := NewCertManagerConfigFromConfigMap(certManagerConfig) + if diff := cmp.Diff(expected, config.CertManager); diff != "" { + t.Errorf("Unexpected CertManager config (-want, +got): %v", diff) + } +} + +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(TestLogger(t)) + store.OnConfigChanged(ConfigMapFromTestFile(t, CertManagerConfigName)) + config := store.Load() + + config.CertManager.IssuerRef.Kind = "newKind" + newConfig := store.Load() + if newConfig.CertManager.IssuerRef.Kind == "newKind" { + t.Error("CertManager config is not immutable") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml new file mode 120000 index 0000000000..e1a32211b0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/testdata/config-certmanager.yaml @@ -0,0 +1 @@ +../../../../../config/cert-manager/config.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..cb9636dbc8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/config/zz_generated.deepcopy.go @@ -0,0 +1,46 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + v1 "github.com/jetstack/cert-manager/pkg/apis/meta/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertManagerConfig) DeepCopyInto(out *CertManagerConfig) { + *out = *in + if in.IssuerRef != nil { + in, out := &in.IssuerRef, &out.IssuerRef + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertManagerConfig. +func (in *CertManagerConfig) DeepCopy() *CertManagerConfig { + if in == nil { + return nil + } + out := new(CertManagerConfig) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/controller.go new file mode 100644 index 0000000000..013f9ccda1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/controller.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/tracker" + "knative.dev/serving/pkg/apis/networking" + cmclient "knative.dev/serving/pkg/client/certmanager/injection/client" + cmchallengeinformer "knative.dev/serving/pkg/client/certmanager/injection/informers/acme/v1alpha2/challenge" + cmcertinformer "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/certificate" + clusterinformer "knative.dev/serving/pkg/client/certmanager/injection/informers/certmanager/v1alpha2/clusterissuer" + kcertinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/certificate/config" +) + +const ( + controllerAgentName = "certificate-controller" +) + +// NewController initializes the controller and is called by the generated code +// Registers eventhandlers to enqueue events. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + knCertificateInformer := kcertinformer.Get(ctx) + cmCertificateInformer := cmcertinformer.Get(ctx) + cmChallengeInformer := cmchallengeinformer.Get(ctx) + clusterIssuerInformer := clusterinformer.Get(ctx) + svcInformer := serviceinformer.Get(ctx) + + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + knCertificateLister: knCertificateInformer.Lister(), + cmCertificateLister: cmCertificateInformer.Lister(), + cmChallengeLister: cmChallengeInformer.Lister(), + cmIssuerLister: clusterIssuerInformer.Lister(), + svcLister: svcInformer.Lister(), + // TODO(mattmoor): Move this to the base. + certManagerClient: cmclient.Get(ctx), + } + + impl := controller.NewImpl(c, c.Logger, "Certificate") + + c.Logger.Info("Setting up event handlers") + classFilterFunc := reconciler.AnnotationFilterFunc(networking.CertificateClassAnnotationKey, network.CertManagerCertificateClassName, true) + certHandler := cache.FilteringResourceEventHandler{ + FilterFunc: classFilterFunc, + Handler: controller.HandleAll(impl.Enqueue), + } + knCertificateInformer.Informer().AddEventHandler(certHandler) + + cmCertificateInformer.Informer().AddEventHandler(controller.HandleAll(impl.EnqueueControllerOf)) + + c.tracker = tracker.New(impl.EnqueueKey, controller.GetTrackerLease(ctx)) + + svcInformer.Informer().AddEventHandler(controller.HandleAll( + controller.EnsureTypeMeta( + c.tracker.OnChanged, + corev1.SchemeGroupVersion.WithKind("Service"), + ), + )) + + c.Logger.Info("Setting up ConfigMap receivers") + resyncCertOnCertManagerconfigChange := configmap.TypeFilter(&config.CertManagerConfig{})(func(string, interface{}) { + impl.GlobalResync(knCertificateInformer.Informer()) + }) + configStore := config.NewStore(c.Logger.Named("config-store"), resyncCertOnCertManagerconfigChange) + configStore.WatchConfigs(cmw) + c.configStore = configStore + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate.go new file mode 100644 index 0000000000..e7a25cfd37 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + cmv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler/certificate/config" +) + +// MakeCertManagerCertificate creates a Cert-Manager `Certificate` for requesting a SSL certificate. +func MakeCertManagerCertificate(cmConfig *config.CertManagerConfig, knCert *v1alpha1.Certificate) *cmv1alpha2.Certificate { + cert := &cmv1alpha2.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: knCert.Name, + Namespace: knCert.Namespace, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(knCert)}, + Annotations: knCert.GetAnnotations(), + Labels: knCert.GetLabels(), + }, + Spec: cmv1alpha2.CertificateSpec{ + SecretName: knCert.Spec.SecretName, + DNSNames: knCert.Spec.DNSNames, + IssuerRef: *cmConfig.IssuerRef, + }, + } + return cert +} + +// GetReadyCondition gets the ready condition of a Cert-Manager `Certificate`. +func GetReadyCondition(cmCert *cmv1alpha2.Certificate) *cmv1alpha2.CertificateCondition { + for _, cond := range cmCert.Status.Conditions { + if cond.Type == cmv1alpha2.CertificateConditionReady { + return &cond + } + } + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate_test.go new file mode 100644 index 0000000000..e90ff3d210 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/certificate/resources/cert_manager_certificate_test.go @@ -0,0 +1,142 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + cmv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/reconciler/certificate/config" +) + +var cert = &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cert", + Namespace: "test-ns", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + Annotations: map[string]string{ + serving.CreatorAnnotation: "someone", + serving.UpdaterAnnotation: "someone", + }, + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: []string{"host1.example.com", "host2.example.com"}, + SecretName: "secret0", + }, +} + +var cmConfig = &config.CertManagerConfig{ + IssuerRef: &cmmeta.ObjectReference{ + Kind: "ClusterIssuer", + Name: "Letsencrypt-issuer", + }, +} + +func TestMakeCertManagerCertificate(t *testing.T) { + want := &cmv1alpha2.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cert", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(cert)}, + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + Annotations: map[string]string{ + serving.CreatorAnnotation: "someone", + serving.UpdaterAnnotation: "someone", + }, + }, + Spec: cmv1alpha2.CertificateSpec{ + SecretName: "secret0", + DNSNames: []string{"host1.example.com", "host2.example.com"}, + IssuerRef: cmmeta.ObjectReference{ + Kind: "ClusterIssuer", + Name: "Letsencrypt-issuer", + }, + }, + } + got := MakeCertManagerCertificate(cmConfig, cert) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MakeCertManagerCertificate (-want, +got) = %s", diff) + } +} + +func TestGetReadyCondition(t *testing.T) { + tests := []struct { + name string + cmCertificate *cmv1alpha2.Certificate + want *cmv1alpha2.CertificateCondition + }{{ + name: "ready", + cmCertificate: makeTestCertificate(cmmeta.ConditionTrue, "ready", "ready"), + want: &cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionTrue, + Reason: "ready", + Message: "ready", + }}, { + name: "not ready", + cmCertificate: makeTestCertificate(cmmeta.ConditionFalse, "not ready", "not ready"), + want: &cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionFalse, + Reason: "not ready", + Message: "not ready", + }}, { + name: "unknow", + cmCertificate: makeTestCertificate(cmmeta.ConditionUnknown, "unknown", "unknown"), + want: &cmv1alpha2.CertificateCondition{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cmmeta.ConditionUnknown, + Reason: "unknown", + Message: "unknown", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := GetReadyCondition(test.cmCertificate) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("GetReadyCondition (-want, +got) = %s", diff) + } + }) + } +} + +func makeTestCertificate(cond cmmeta.ConditionStatus, reason, message string) *cmv1alpha2.Certificate { + cert := &cmv1alpha2.Certificate{ + Status: cmv1alpha2.CertificateStatus{ + Conditions: []cmv1alpha2.CertificateCondition{{ + Type: cmv1alpha2.CertificateConditionReady, + Status: cond, + Reason: reason, + Message: message, + }}, + }, + } + return cert +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go new file mode 100644 index 0000000000..5306bf5b49 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go @@ -0,0 +1,364 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/configuration/resources" +) + +// Reconciler implements controller.Reconciler for Configuration resources. +type Reconciler struct { + *reconciler.Base + + // listers index properties about resources + configurationLister listers.ConfigurationLister + revisionLister listers.RevisionLister +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Configuration +// resource with the current status of the resource. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + // Get the Configuration resource with this namespace/name. + original, err := c.configurationLister.Configurations(namespace).Get(name) + if errors.IsNotFound(err) { + // The resource no longer exists, in which case we stop processing. + logger.Info("Configuration in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informer's copy. + config := original.DeepCopy() + + // Reconcile this copy of the configuration and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := c.reconcile(ctx, config) + if equality.Semantic.DeepEqual(original.Status, config.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err = c.updateStatus(original, config); err != nil { + logger.Warnw("Failed to update configuration status", zap.Error(err)) + c.Recorder.Eventf(config, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status: %v", err) + return err + } + if reconcileErr != nil { + c.Recorder.Event(config, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + return reconcileErr + } + // TODO(mattmoor): Remove this after 0.7 cuts. + // If the spec has changed, then assume we need an upgrade and issue a patch to trigger + // the webhook to upgrade via defaulting. Status updates do not trigger this due to the + // use of the /status resource. + if !equality.Semantic.DeepEqual(original.Spec, config.Spec) { + configurations := v1alpha1.SchemeGroupVersion.WithResource("configurations") + if err := c.MarkNeedsUpgrade(configurations, config.Namespace, config.Name); err != nil { + return err + } + } + return nil +} + +func (c *Reconciler) reconcile(ctx context.Context, config *v1alpha1.Configuration) error { + logger := logging.FromContext(ctx) + if config.GetDeletionTimestamp() != nil { + return nil + } + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + config.SetDefaults(v1.WithUpgradeViaDefaulting(ctx)) + config.Status.InitializeConditions() + + if err := config.ConvertUp(ctx, &v1beta1.Configuration{}); err != nil { + if ce, ok := err.(*v1alpha1.CannotConvertError); ok { + config.Status.MarkResourceNotConvertible(ce) + } + return err + } + + // Bump observed generation to denote that we have processed this + // generation regardless of success or failure. + config.Status.ObservedGeneration = config.Generation + + // First, fetch the revision that should exist for the current generation. + lcr, err := c.latestCreatedRevision(config) + if errors.IsNotFound(err) { + lcr, err = c.createRevision(ctx, config) + if err != nil { + c.Recorder.Eventf(config, corev1.EventTypeWarning, "CreationFailed", "Failed to create Revision: %v", err) + + // Mark the Configuration as not-Ready since creating + // its latest revision failed. + config.Status.MarkRevisionCreationFailed(err.Error()) + + return fmt.Errorf("failed to create Revision: %w", err) + } + } else if errors.IsAlreadyExists(err) { + // If we get an already-exists error from latestCreatedRevision it means + // that the Revision name already exists for another Configuration or at + // the wrong generation of this configuration. + config.Status.MarkRevisionCreationFailed(err.Error()) + return nil + } else if err != nil { + return fmt.Errorf("failed to get Revision: %w", err) + } + + revName := lcr.Name + + // Second, set this to be the latest revision that we have created. + config.Status.SetLatestCreatedRevisionName(revName) + + // Last, determine whether we should set LatestReadyRevisionName to our + // LatestCreatedRevision based on its readiness. + rc := lcr.Status.GetCondition(v1alpha1.RevisionConditionReady) + switch { + case rc == nil || rc.Status == corev1.ConditionUnknown: + logger.Infof("Revision %q of configuration is not ready", revName) + + case rc.Status == corev1.ConditionTrue: + logger.Infof("Revision %q of configuration is ready", revName) + if config.Status.LatestReadyRevisionName == "" { + // Surface an event for the first revision becoming ready. + c.Recorder.Event(config, corev1.EventTypeNormal, "ConfigurationReady", + "Configuration becomes ready") + } + + case rc.Status == corev1.ConditionFalse: + logger.Infof("Revision %q of configuration has failed", revName) + // TODO(mattmoor): Only emit the event the first time we see this. + config.Status.MarkLatestCreatedFailed(lcr.Name, rc.Message) + c.Recorder.Eventf(config, corev1.EventTypeWarning, "LatestCreatedFailed", + "Latest created revision %q has failed", lcr.Name) + + default: + return fmt.Errorf("unrecognized condition status: %v on revision %q", rc.Status, revName) + } + + if err = c.findAndSetLatestReadyRevision(config); err != nil { + return fmt.Errorf("failed to find and set latest ready revision: %w", err) + } + return nil +} + +// findAndSetLatestReadyRevision finds the last ready revision and sets LatestReadyRevisionName to it. +func (c *Reconciler) findAndSetLatestReadyRevision(config *v1alpha1.Configuration) error { + sortedRevisions, err := c.getSortedCreatedRevisions(config) + if err != nil { + return err + } + for _, rev := range sortedRevisions { + if rev.Status.IsReady() { + old, new := config.Status.LatestReadyRevisionName, rev.Name + config.Status.SetLatestReadyRevisionName(rev.Name) + if old != new { + c.Recorder.Eventf(config, corev1.EventTypeNormal, "LatestReadyUpdate", + "LatestReadyRevisionName updated to %q", rev.Name) + } + return nil + } + } + return nil +} + +// getSortedCreatedRevisions returns the list of created revisions sorted in descending +// generation order between the generation of the latest ready revision and config's generation (both inclusive). +func (c *Reconciler) getSortedCreatedRevisions(config *v1alpha1.Configuration) ([]*v1alpha1.Revision, error) { + lister := c.revisionLister.Revisions(config.Namespace) + configSelector := labels.SelectorFromSet(map[string]string{ + serving.ConfigurationLabelKey: config.Name, + }) + if config.Status.LatestReadyRevisionName != "" { + lrr, err := lister.Get(config.Status.LatestReadyRevisionName) + if err != nil { + return nil, err + } + start := lrr.Generation + var generations []string + for i := start; i <= int64(config.Generation); i++ { + generations = append(generations, strconv.FormatInt(i, 10)) + } + + // Add an "In" filter so that the configurations we get back from List have generation + // in range (config's latest ready generation, config's generation] + generationKey := serving.ConfigurationGenerationLabelKey + inReq, err := labels.NewRequirement(generationKey, + selection.In, + generations, + ) + if err != nil { + return nil, err + } + configSelector = configSelector.Add(*inReq) + } + + list, err := lister.List(configSelector) + if err != nil { + return nil, err + } + // Return a sorted list with Generation in descending order + if len(list) > 1 { + sort.Slice(list, func(i, j int) bool { + // BYO name always be the first + if config.Spec.Template.Name == list[i].Name { + return true + } + if config.Spec.Template.Name == list[j].Name { + return false + } + intI, errI := strconv.Atoi(list[i].Labels[serving.ConfigurationGenerationLabelKey]) + intJ, errJ := strconv.Atoi(list[j].Labels[serving.ConfigurationGenerationLabelKey]) + if errI != nil || errJ != nil { + return true + } + return intI > intJ + }) + } + return list, nil +} + +// CheckNameAvailability checks that if the named Revision specified by the Configuration +// is available (not found), exists (but matches), or exists with conflict (doesn't match). +func CheckNameAvailability(config *v1alpha1.Configuration, lister listers.RevisionLister) (*v1alpha1.Revision, error) { + // If config.Spec.GetTemplate().Name is set, then we can directly look up + // the revision by name. + name := config.Spec.GetTemplate().Name + if name == "" { + return nil, nil + } + errConflict := errors.NewAlreadyExists(v1alpha1.Resource("revisions"), name) + + rev, err := lister.Revisions(config.Namespace).Get(name) + if errors.IsNotFound(err) { + // Does not exist, we must be good! + // note: for the name to change the generation must change. + return nil, err + } else if err != nil { + return nil, err + } else if !metav1.IsControlledBy(rev, config) { + // If the revision isn't controller by this configuration, then + // do not use it. + return nil, errConflict + } + + // Check the generation on this revision. + generationKey := serving.ConfigurationGenerationLabelKey + expectedValue := resources.RevisionLabelValueForKey(generationKey, config) + if rev.Labels != nil && rev.Labels[generationKey] == expectedValue { + return rev, nil + } + // We only require spec equality because the rest is immutable and the user may have + // annotated or labeled the Revision (beyond what the Configuration might have). + if !equality.Semantic.DeepEqual(config.Spec.GetTemplate().Spec, rev.Spec) { + return nil, errConflict + } + return rev, nil +} + +func (c *Reconciler) latestCreatedRevision(config *v1alpha1.Configuration) (*v1alpha1.Revision, error) { + if rev, err := CheckNameAvailability(config, c.revisionLister); rev != nil || err != nil { + return rev, err + } + + lister := c.revisionLister.Revisions(config.Namespace) + generationKey := serving.ConfigurationGenerationLabelKey + + list, err := lister.List(labels.SelectorFromSet(map[string]string{ + generationKey: resources.RevisionLabelValueForKey(generationKey, config), + serving.ConfigurationLabelKey: config.Name, + })) + + if err == nil && len(list) > 0 { + return list[0], nil + } + + return nil, errors.NewNotFound(v1alpha1.Resource("revisions"), fmt.Sprintf("revision for %s", config.Name)) +} + +func (c *Reconciler) createRevision(ctx context.Context, config *v1alpha1.Configuration) (*v1alpha1.Revision, error) { + logger := logging.FromContext(ctx) + + rev := resources.MakeRevision(config) + created, err := c.ServingClientSet.ServingV1alpha1().Revisions(config.Namespace).Create(rev) + if err != nil { + return nil, err + } + c.Recorder.Eventf(config, corev1.EventTypeNormal, "Created", "Created Revision %q", created.Name) + logger.Infof("Created Revision: %#v", created) + + return created, nil +} + +func (c *Reconciler) updateStatus(existing *v1alpha1.Configuration, desired *v1alpha1.Configuration) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = c.ServingClientSet.ServingV1alpha1().Configurations(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = c.ServingClientSet.ServingV1alpha1().Configurations(desired.Namespace).UpdateStatus(existing) + return err + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration_test.go new file mode 100644 index 0000000000..6698e99189 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration_test.go @@ -0,0 +1,493 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "context" + "testing" + "time" + + // Inject the fake informers we need. + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/configuration/resources" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +var revisionSpec = v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + }, +} + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + now := time.Now() + + table := TableTest{{ + Name: "bad workqueue key", + Key: "too/many/parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "nop deletion reconcile", + // Test that with a DeletionTimestamp we do nothing. + Objects: []runtime.Object{ + cfg("foo", "delete-pending", 1234, WithConfigDeletionTimestamp), + }, + Key: "foo/delete-pending", + }, { + Name: "create revision matching generation", + Objects: []runtime.Object{ + cfg("no-revisions-yet", "foo", 1234), + }, + WantCreates: []runtime.Object{ + rev("no-revisions-yet", "foo", 1234), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("no-revisions-yet", "foo", 1234, + // The following properties are set when we first reconcile a + // Configuration and a Revision is created. + WithLatestCreated("no-revisions-yet-00001"), WithObservedGen), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Revision %q", "no-revisions-yet-00001"), + }, + Key: "foo/no-revisions-yet", + }, { + Name: "create revision byo name", + Objects: []runtime.Object{ + cfg("byo-name-create", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-create-foo" + }), + }, + WantCreates: []runtime.Object{ + rev("byo-name-create", "foo", 1234, func(rev *v1alpha1.Revision) { + rev.Name = "byo-name-create-foo" + rev.GenerateName = "" + }), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("byo-name-create", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-create-foo" + }, + // The following properties are set when we first reconcile a + // Configuration and a Revision is created. + WithLatestCreated("byo-name-create-foo"), WithObservedGen), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Revision %q", "byo-name-create-foo"), + }, + Key: "foo/byo-name-create", + }, { + Name: "create revision byo name (exists)", + Objects: []runtime.Object{ + cfg("byo-name-exists", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-exists-foo" + }, + // The following properties are set when we first reconcile a + // Configuration and a Revision is created. + WithLatestCreated("byo-name-exists-foo"), WithObservedGen), + rev("byo-name-exists", "foo", 1234, WithCreationTimestamp(now), func(rev *v1alpha1.Revision) { + rev.Name = "byo-name-exists-foo" + rev.GenerateName = "" + }), + }, + Key: "foo/byo-name-exists", + }, { + Name: "create revision byo name (exists, wrong generation, right spec)", + // This example shows what we might see with a `git revert` in GitOps. + Objects: []runtime.Object{ + cfg("byo-name-git-revert", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-git-revert-foo" + }), + rev("byo-name-git-revert", "foo", 1200, WithCreationTimestamp(now), func(rev *v1alpha1.Revision) { + rev.Name = "byo-name-git-revert-foo" + rev.GenerateName = "" + }), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("byo-name-git-revert", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-git-revert-foo" + }, WithLatestCreated("byo-name-git-revert-foo"), WithObservedGen), + }}, + Key: "foo/byo-name-git-revert", + }, { + Name: "create revision byo name (exists @ wrong generation w/ wrong spec)", + Objects: []runtime.Object{ + cfg("byo-name-wrong-gen-wrong-spec", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-wrong-gen-wrong-spec-foo" + }), + rev("byo-name-wrong-gen-wrong-spec", "foo", 1200, func(rev *v1alpha1.Revision) { + rev.Name = "byo-name-wrong-gen-wrong-spec-foo" + rev.GenerateName = "" + rev.Spec.GetContainer().Env = append(rev.Spec.GetContainer().Env, corev1.EnvVar{ + Name: "FOO", + Value: "bar", + }) + }), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("byo-name-wrong-gen-wrong-spec", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-name-wrong-gen-wrong-spec-foo" + }, MarkRevisionCreationFailed(`revisions.serving.knative.dev "byo-name-wrong-gen-wrong-spec-foo" already exists`), WithObservedGen), + }}, + Key: "foo/byo-name-wrong-gen-wrong-spec", + }, { + Name: "create revision byo name (exists not owned)", + Objects: []runtime.Object{ + cfg("byo-rev-not-owned", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-rev-not-owned-foo" + }), + rev("byo-rev-not-owned", "foo", 1200, func(rev *v1alpha1.Revision) { + rev.Name = "byo-rev-not-owned-foo" + rev.GenerateName = "" + rev.OwnerReferences = nil + }), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("byo-rev-not-owned", "foo", 1234, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "byo-rev-not-owned-foo" + }, MarkRevisionCreationFailed(`revisions.serving.knative.dev "byo-rev-not-owned-foo" already exists`), WithObservedGen), + }}, + Key: "foo/byo-rev-not-owned", + }, { + Name: "webhook validation failure", + // If we attempt to create a Revision with a bad ContainerConcurrency set, we fail. + WantErr: true, + Objects: []runtime.Object{ + cfg("validation-failure", "foo", 1234, WithConfigContainerConcurrency(-1)), + }, + WantCreates: []runtime.Object{ + rev("validation-failure", "foo", 1234, WithRevContainerConcurrency(-1)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("validation-failure", "foo", 1234, WithConfigContainerConcurrency(-1), + // Expect Revision creation to fail with the following error. + MarkRevisionCreationFailed("expected 0 <= -1 <= 1000: spec.containerConcurrency"), WithObservedGen), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Revision: expected 0 <= -1 <= 1000: spec.containerConcurrency"), + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status: expected 0 <= -1 <= 1000: spec.template.spec.containerConcurrency"), + }, + Key: "foo/validation-failure", + }, { + Name: "reconcile revision matching generation (ready: unknown)", + Objects: []runtime.Object{ + cfg("matching-revision-not-done", "foo", 5432), + rev("matching-revision-not-done", "foo", 5432, + WithCreationTimestamp(now), + WithRevName("matching-revision-not-done-00001"), + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("matching-revision-not-done", "foo", 5432, + // If the Revision already exists, we still update these fields. + // This could happen if the prior status update failed for some reason. + WithLatestCreated("matching-revision-not-done-00001"), WithObservedGen), + }}, + Key: "foo/matching-revision-not-done", + }, { + Name: "reconcile revision matching generation (ready: true)", + Objects: []runtime.Object{ + cfg("matching-revision-done", "foo", 5555, WithLatestCreated("matching-revision-done-00001"), WithObservedGen), + rev("matching-revision-done", "foo", 5555, + WithCreationTimestamp(now), MarkRevisionReady, WithRevName("matching-revision-done-00001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("matching-revision-done", "foo", 5555, WithObservedGen, + // When we see the LatestCreatedRevision become Ready, then we + // update the latest ready revision. + WithLatestCreated("matching-revision-done-00001"), + WithLatestReady("matching-revision-done-00001")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "ConfigurationReady", "Configuration becomes ready"), + Eventf(corev1.EventTypeNormal, "LatestReadyUpdate", "LatestReadyRevisionName updated to %q", + "matching-revision-done-00001"), + }, + Key: "foo/matching-revision-done", + }, { + Name: "reconcile revision matching generation (ready: true, idempotent)", + Objects: []runtime.Object{ + cfg("matching-revision-done-idempotent", "foo", 5566, + WithObservedGen, WithLatestCreated("matching-revision"), WithLatestReady("matching-revision")), + rev("matching-revision-done-idempotent", "foo", 5566, + WithCreationTimestamp(now), MarkRevisionReady, WithRevName("matching-revision")), + }, + Key: "foo/matching-revision-done-idempotent", + }, { + Name: "reconcile revision matching generation (ready: false)", + Objects: []runtime.Object{ + cfg("matching-revision-failed", "foo", 5555, WithLatestCreated("matching-revision"), WithObservedGen), + rev("matching-revision-failed", "foo", 5555, + WithCreationTimestamp(now), MarkContainerMissing, WithRevName("matching-revision")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("matching-revision-failed", "foo", 5555, + WithLatestCreated("matching-revision"), WithObservedGen, + // When the LatestCreatedRevision reports back a failure, + // then we surface that failure. + MarkLatestCreatedFailed("It's the end of the world as we know it")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "LatestCreatedFailed", "Latest created revision %q has failed", + "matching-revision"), + }, + Key: "foo/matching-revision-failed", + }, { + Name: "reconcile revision matching generation (ready: bad)", + Objects: []runtime.Object{ + cfg("bad-condition", "foo", 5555, WithLatestCreated("bad-condition"), WithObservedGen), + rev("bad-condition", "foo", 5555, + WithRevName("bad-condition"), + WithRevStatus(v1alpha1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.RevisionConditionReady, + Status: "Bad", + Severity: "Error", + }}, + }, + })), + }, + WantErr: true, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `unrecognized condition status: Bad on revision "bad-condition"`), + }, + Key: "foo/bad-condition", + }, { + Name: "failure creating revision", + // We induce a failure creating a revision + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "revisions"), + }, + Objects: []runtime.Object{ + cfg("create-revision-failure", "foo", 99998), + }, + WantCreates: []runtime.Object{ + rev("create-revision-failure", "foo", 99998), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("create-revision-failure", "foo", 99998, + // When we fail to create a Revision is should be surfaced in + // the Configuration status. + MarkRevisionCreationFailed("inducing failure for create revisions"), WithObservedGen), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Revision: inducing failure for create revisions"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create Revision: inducing failure for create revisions"), + }, + Key: "foo/create-revision-failure", + }, { + Name: "failure updating configuration status", + // Induce a failure updating the status of the configuration. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "configurations"), + }, + Objects: []runtime.Object{ + cfg("update-config-failure", "foo", 1234), + }, + WantCreates: []runtime.Object{ + rev("update-config-failure", "foo", 1234), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("update-config-failure", "foo", 1234, + // These would be the status updates after a first + // reconcile, which we use to trigger the update + // where we've induced a failure. + WithLatestCreated("update-config-failure-00001"), WithObservedGen), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Revision %q", "update-config-failure-00001"), + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status: inducing failure for update configurations"), + }, + Key: "foo/update-config-failure", + }, { + Name: "failed revision recovers", + Objects: []runtime.Object{ + cfg("revision-recovers", "foo", 1337, + WithLatestCreated("revision-recovers-00001"), + WithLatestReady("revision-recovers-00001"), + WithObservedGen, + MarkLatestCreatedFailed("Weebles wobble, but they don't fall down")), + rev("revision-recovers", "foo", 1337, + WithCreationTimestamp(now), + WithRevName("revision-recovers-00001"), + MarkRevisionReady, + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("revision-recovers", "foo", 1337, + WithLatestCreated("revision-recovers-00001"), + WithLatestReady("revision-recovers-00001"), + WithObservedGen, + // When a LatestReadyRevision recovers from failure, + // then we should go back to Ready. + ), + }}, + Key: "foo/revision-recovers", + }, { + // The name is a bit misleading but essentially we are testing that + // querying the latest created revision includes the configuration name + // as part of the selector + Name: "two steady state configs with same generation should be a noop", + Objects: []runtime.Object{ + // double-trouble needs to be first for this test to fail + // when no fix is present + cfg("double-trouble", "foo", 1, + WithLatestCreated("double-trouble-00001"), + WithLatestReady("double-trouble-00001"), WithObservedGen), + cfg("first-trouble", "foo", 1, + WithLatestCreated("first-trouble-00001"), + WithLatestReady("first-trouble-00001"), WithObservedGen), + + rev("first-trouble", "foo", 1, + WithRevName("first-trouble-00001"), + WithCreationTimestamp(now), MarkRevisionReady), + rev("double-trouble", "foo", 1, + WithRevName("double-trouble-00001"), + WithCreationTimestamp(now), MarkRevisionReady), + }, + Key: "foo/double-trouble", + }, { + Name: "three revisions with the latest revision failed, the latest ready should be updated to the last ready revision", + Objects: []runtime.Object{ + cfg("threerevs", "foo", 3, + WithLatestCreated("threerevs-00002"), + WithLatestReady("threerevs-00001"), WithObservedGen, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "threerevs-00003" + }, + ), + rev("threerevs", "foo", 1, + WithRevName("threerevs-00001"), + WithCreationTimestamp(now), MarkRevisionReady), + rev("threerevs", "foo", 2, + WithRevName("threerevs-00002"), + WithCreationTimestamp(now), MarkRevisionReady), + rev("threerevs", "foo", 3, + WithRevName("threerevs-00003"), + WithCreationTimestamp(now), MarkInactive("", "")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("threerevs", "foo", 3, + WithLatestCreated("threerevs-00003"), + WithLatestReady("threerevs-00002"), + WithObservedGen, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "threerevs-00003" + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "LatestReadyUpdate", "LatestReadyRevisionName updated to %q", "threerevs-00002"), + }, + Key: "foo/threerevs", + }, { + Name: "revision not ready, the latest ready should be updated, but the configuration should still be ready==Unknown", + Objects: []runtime.Object{ + cfg("revnotready", "foo", 3, + WithLatestCreated("revnotready-00002"), + WithLatestReady("revnotready-00001"), WithObservedGen, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "revnotready-00003" + }, + ), + rev("revnotready", "foo", 1, + WithRevName("revnotready-00001"), + WithCreationTimestamp(now), MarkRevisionReady), + rev("revnotready", "foo", 2, + WithRevName("revnotready-00002"), + WithCreationTimestamp(now), MarkRevisionReady), + rev("revnotready", "foo", 3, + WithRevName("revnotready-00003"), + WithCreationTimestamp(now)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: cfg("revnotready", "foo", 3, + // The config should NOT be ready, because LCR != LRR + WithLatestCreated("revnotready-00003"), + WithLatestReady("revnotready-00002"), + WithObservedGen, func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "revnotready-00003" + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "LatestReadyUpdate", "LatestReadyRevisionName updated to %q", "revnotready-00002"), + }, + Key: "foo/revnotready", + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + } + })) +} + +func cfg(name, namespace string, generation int64, co ...ConfigOption) *v1alpha1.Configuration { + c := &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: generation, + }, + Spec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: *revisionSpec.DeepCopy(), + }, + }, + } + for _, opt := range co { + opt(c) + } + c.SetDefaults(context.Background()) + return c +} + +func rev(name, namespace string, generation int64, ro ...RevisionOption) *v1alpha1.Revision { + r := resources.MakeRevision(cfg(name, namespace, generation)) + r.SetDefaults(v1.WithUpgradeViaDefaulting(context.Background())) + for _, opt := range ro { + opt(r) + } + return r +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/controller.go new file mode 100644 index 0000000000..27ec982383 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/controller.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "context" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + configurationinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + "knative.dev/serving/pkg/reconciler" +) + +const controllerAgentName = "configuration-controller" + +// NewController creates a new Configuration controller +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + + configurationInformer := configurationinformer.Get(ctx) + revisionInformer := revisioninformer.Get(ctx) + + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + configurationLister: configurationInformer.Lister(), + revisionLister: revisionInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, "Configurations") + + c.Logger.Info("Setting up event handlers") + configurationInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + revisionInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Configuration")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/queueing_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/queueing_test.go new file mode 100644 index 0000000000..609664a3e9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/queueing_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "context" + "testing" + "time" + + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/configmap" + "knative.dev/pkg/system" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakeconfigurationinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake" + "knative.dev/serving/pkg/gc" + + . "knative.dev/pkg/reconciler/testing" +) + +const ( + testNamespace = "test" +) + +func getTestConfiguration() *v1alpha1.Configuration { + cfg := &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/configurations/test-config", + Name: "test-config", + Namespace: testNamespace, + }, + Spec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + ServiceAccountName: "test-account", + // corev1.Container has a lot of setting. We try to pass many + // of them here to verify that we pass through the settings to + // the derived Revisions. + Containers: []corev1.Container{{ + Image: "gcr.io/repo/image", + Command: []string{"echo"}, + Args: []string{"hello", "world"}, + WorkingDir: "/tmp", + Env: []corev1.EnvVar{{ + Name: "EDITOR", + Value: "emacs", + }}, + LivenessProbe: &corev1.Probe{ + TimeoutSeconds: 42, + }, + ReadinessProbe: &corev1.Probe{ + TimeoutSeconds: 43, + }, + TerminationMessagePath: "/dev/null", + }}, + }, + }, + }, + }, + }, + } + cfg.SetDefaults(context.Background()) + return cfg +} + +func TestNewConfigurationCallsSyncHandler(t *testing.T) { + ctx, cancel, _ := SetupFakeContextWithCancel(t) + eg := errgroup.Group{} + defer func() { + cancel() + if err := eg.Wait(); err != nil { + t.Fatalf("Error running controller: %v", err) + } + }() + + configMapWatcher := configmap.NewStaticWatcher(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: gc.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }) + + ctrl := NewController(ctx, configMapWatcher) + + servingClient := fakeservingclient.Get(ctx) + + h := NewHooks() + + // Check for revision created as a signal that syncHandler ran. + h.OnCreate(&servingClient.Fake, "revisions", func(obj runtime.Object) HookResult { + rev := obj.(*v1alpha1.Revision) + t.Logf("Revision created: %q", rev.Name) + + return HookComplete + }) + + eg.Go(func() error { + return ctrl.Run(2, ctx.Done()) + }) + + config := getTestConfiguration() + configI := fakeconfigurationinformer.Get(ctx) + configI.Informer().GetIndexer().Add(config) + ctrl.Enqueue(config) + + if err := h.WaitForHooks(5 * time.Second); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/doc.go new file mode 100644 index 0000000000..eede0c5b00 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources holds simple functions for synthesizing child resources from +// a Configuration resource and any relevant Configuration controller configuration. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go new file mode 100644 index 0000000000..9056b83b5d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// MakeRevision creates a revision object from configuration. +func MakeRevision(config *v1alpha1.Configuration) *v1alpha1.Revision { + // Start from the ObjectMeta/Spec inlined in the Configuration resources. + rev := &v1alpha1.Revision{ + ObjectMeta: config.Spec.GetTemplate().ObjectMeta, + Spec: config.Spec.GetTemplate().Spec, + } + // Populate the Namespace and Name. + rev.Namespace = config.Namespace + + if rev.Name == "" { + rev.GenerateName = config.Name + "-" + } + + UpdateRevisionLabels(rev, config) + UpdateRevisionAnnotations(rev, config) + + // Populate OwnerReferences so that deletes cascade. + rev.OwnerReferences = append(rev.OwnerReferences, *kmeta.NewControllerRef(config)) + + return rev +} + +// UpdateRevisionLabels sets the revisions labels given a Configuration. +func UpdateRevisionLabels(rev *v1alpha1.Revision, config *v1alpha1.Configuration) { + if rev.Labels == nil { + rev.Labels = make(map[string]string) + } + + for _, key := range []string{ + serving.ConfigurationLabelKey, + serving.ServiceLabelKey, + serving.ConfigurationGenerationLabelKey, + } { + rev.Labels[key] = RevisionLabelValueForKey(key, config) + } +} + +// UpdateRevisionAnnotations sets the revisions annotations given a Configuration's updater annotation. +func UpdateRevisionAnnotations(rev *v1alpha1.Revision, config *v1alpha1.Configuration) { + if rev.Annotations == nil { + rev.Annotations = make(map[string]string) + } + + // Populate the CreatorAnnotation from configuration. + cans := config.GetAnnotations() + if c, ok := cans[serving.UpdaterAnnotation]; ok { + rev.Annotations[serving.CreatorAnnotation] = c + } +} + +// RevisionLabelValueForKey returns the label value for the given key. +func RevisionLabelValueForKey(key string, config *v1alpha1.Configuration) string { + switch key { + case serving.ConfigurationLabelKey: + return config.Name + case serving.ServiceLabelKey: + return config.Labels[serving.ServiceLabelKey] + case serving.ConfigurationGenerationLabelKey: + return fmt.Sprintf("%d", config.Generation) + } + + return "" +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision_test.go new file mode 100644 index 0000000000..8b94a0e44d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision_test.go @@ -0,0 +1,302 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestMakeRevisions(t *testing.T) { + tests := []struct { + name string + configuration *v1alpha1.Configuration + want *v1alpha1.Revision + }{{ + name: "no build", + configuration: &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "no", + Name: "build", + Generation: 10, + }, + Spec: v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + want: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "no", + GenerateName: "build-", + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "build", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + Labels: map[string]string{ + serving.ConfigurationLabelKey: "build", + serving.ConfigurationGenerationLabelKey: "10", + serving.ServiceLabelKey: "", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, { + name: "with labels", + configuration: &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "with", + Name: "labels", + Generation: 100, + }, + Spec: v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + want: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "with", + GenerateName: "labels-", + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "labels", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + Labels: map[string]string{ + serving.ConfigurationLabelKey: "labels", + serving.ConfigurationGenerationLabelKey: "100", + serving.ServiceLabelKey: "", + "foo": "bar", + "baz": "blah", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, { + name: "with annotations", + configuration: &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "with", + Name: "annotations", + Generation: 100, + }, + Spec: v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + want: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "with", + GenerateName: "annotations-", + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "annotations", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + Labels: map[string]string{ + serving.ConfigurationLabelKey: "annotations", + serving.ConfigurationGenerationLabelKey: "100", + serving.ServiceLabelKey: "", + }, + Annotations: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, { + name: "with creator annotation from config", + configuration: &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "anno", + Name: "config", + Annotations: map[string]string{ + "serving.knative.dev/creator": "admin", + "serving.knative.dev/lastModifier": "someone", + }, + Generation: 10, + }, + Spec: v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, + }, + want: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "anno", + GenerateName: "config-", + Annotations: map[string]string{ + "serving.knative.dev/creator": "someone", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "config", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + Labels: map[string]string{ + serving.ConfigurationLabelKey: "config", + serving.ConfigurationGenerationLabelKey: "10", + serving.ServiceLabelKey: "", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }, { + name: "with creator annotation from config with other annotations", + configuration: &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "anno", + Name: "config", + Annotations: map[string]string{ + "serving.knative.dev/creator": "admin", + "serving.knative.dev/lastModifier": "someone", + }, + Generation: 10, + }, + Spec: v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + }, + }, + }, + }, + want: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "anno", + GenerateName: "config-", + Annotations: map[string]string{ + "serving.knative.dev/creator": "someone", + "foo": "bar", + "baz": "blah", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "config", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + Labels: map[string]string{ + serving.ConfigurationLabelKey: "config", + serving.ConfigurationGenerationLabelKey: "10", + serving.ServiceLabelKey: "", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MakeRevision(test.configuration) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("MakeRevision (-want, +got) = %v", diff) + } + }) + + t.Run(test.name+"(template)", func(t *testing.T) { + // Test the Template variant. + test.configuration.Spec.Template = test.configuration.Spec.DeprecatedRevisionTemplate + test.configuration.Spec.DeprecatedRevisionTemplate = nil + got := MakeRevision(test.configuration) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("MakeRevision (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/doc.go new file mode 100644 index 0000000000..9f59cd0138 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/doc.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package reconciler defines implementations of the Reconciler interface +// defined at github.com/knative/pkg/controller.Reconciler. These implement +// the basic workhorse functionality of controllers, while leaving the +// shared controller implementation to manage things like the workqueue. +// +// Despite defining a Reconciler, each of the packages here are expected to +// expose a controller constructor like: +// func NewController(...) *controller.Impl { ... } +// These constructors will: +// 1. Construct the Reconciler, +// 2. Construct a controller.Impl with that Reconciler, +// 3. Wire the assorted informers this Reconciler watches to call appropriate +// enqueue methods on the controller. +package reconciler diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/filter.go b/test/vendor/knative.dev/serving/pkg/reconciler/filter.go new file mode 100644 index 0000000000..1b922a08a9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/filter.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// AnnotationFilterFunc creates a FilterFunc only accepting objects with given annotation key and value +func AnnotationFilterFunc(key string, value string, allowUnset bool) func(interface{}) bool { + return func(obj interface{}) bool { + if mo, ok := obj.(metav1.Object); ok { + anno := mo.GetAnnotations() + annoVal, ok := anno[key] + if !ok { + return allowUnset + } + return annoVal == value + } + return false + } +} + +// LabelExistsFilterFunc creates a FilterFunc only accepting objects which have a given label. +func LabelExistsFilterFunc(label string) func(obj interface{}) bool { + return func(obj interface{}) bool { + if mo, ok := obj.(metav1.Object); ok { + labels := mo.GetLabels() + _, ok := labels[label] + return ok + } + return false + } +} + +// LabelFilterFunc creates a FilterFunc only accepting objects where a label is set to a specific value. +func LabelFilterFunc(label string, value string, allowUnset bool) func(interface{}) bool { + return func(obj interface{}) bool { + if mo, ok := obj.(metav1.Object); ok { + labels := mo.GetLabels() + val, ok := labels[label] + if !ok { + return allowUnset + } + return val == value + } + return false + } +} + +// NameFilterFunc creates a FilterFunc only accepting objects with the given name. +func NameFilterFunc(name string) func(interface{}) bool { + return func(obj interface{}) bool { + if mo, ok := obj.(metav1.Object); ok { + return mo.GetName() == name + } + return false + } +} + +// NamespaceFilterFunc creates a FilterFunc only accepting objects in the given namespace. +func NamespaceFilterFunc(namespace string) func(interface{}) bool { + return func(obj interface{}) bool { + if mo, ok := obj.(metav1.Object); ok { + return mo.GetNamespace() == namespace + } + return false + } +} + +// Not inverts the result of the predicate. +func Not(f func(interface{}) bool) func(interface{}) bool { + return func(obj interface{}) bool { + return !f(obj) + } +} + +// ChainFilterFuncs creates a FilterFunc which performs an AND of the passed FilterFuncs. +func ChainFilterFuncs(funcs ...func(interface{}) bool) func(interface{}) bool { + return func(obj interface{}) bool { + for _, f := range funcs { + if !f(obj) { + return false + } + } + return true + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/filter_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/filter_test.go new file mode 100644 index 0000000000..f3897e98af --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/filter_test.go @@ -0,0 +1,308 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +const ( + keyToFilter = "testKey" + valueToFilter = "testVal" + nameToFilter = "testName" + namespaceToFilter = "testSpace" +) + +func config(namespace, name string, annos, labels map[string]string) *v1alpha1.Configuration { + return &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Annotations: annos, + Labels: labels, + }, + } +} + +func configWithLabels(labels map[string]string) *v1alpha1.Configuration { + return config(namespaceToFilter, nameToFilter, nil, labels) +} + +func configWithAnnotations(annos map[string]string) *v1alpha1.Configuration { + return config(namespaceToFilter, nameToFilter, annos, nil) +} + +func configWithName(name string) *v1alpha1.Configuration { + return config(namespaceToFilter, name, nil, nil) +} + +func configWithNamespace(namespace string) *v1alpha1.Configuration { + return config(namespace, nameToFilter, nil, nil) +} + +type params struct { + name string + allowUnset bool + in interface{} + want bool +} + +func TestAnnotationFilter(t *testing.T) { + tests := []params{{ + name: "non kubernetes object", + in: struct{}{}, + want: false, + }, { + name: "empty annotations", + in: configWithAnnotations(nil), + want: false, + }, { + name: "empty annotations, allow unset", + allowUnset: true, + in: configWithAnnotations(nil), + want: true, + }, { + name: "other annotations", + in: configWithAnnotations(map[string]string{"anotherKey": "anotherValue"}), + want: false, + }, { + name: "other annotations, allow unset", + allowUnset: true, + in: configWithAnnotations(map[string]string{"anotherKey": "anotherValue"}), + want: true, + }, { + name: "matching key, value mismatch", + in: configWithAnnotations(map[string]string{keyToFilter: "testVal2"}), + want: false, + }, { + name: "matching key, value mismatch, allow unset", + allowUnset: true, + in: configWithAnnotations(map[string]string{keyToFilter: "testVal2"}), + want: false, + }, { + name: "match", + in: configWithAnnotations(map[string]string{keyToFilter: valueToFilter}), + want: true, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filter := AnnotationFilterFunc(keyToFilter, valueToFilter, test.allowUnset) + got := filter(test.in) + if got != test.want { + t.Errorf("AnnotationFilterFunc() = %v, want %v", got, test.want) + } + }) + } +} + +func TestLabelExistsFilterFunc(t *testing.T) { + ti := []params{{ + name: "label exists", + in: configWithLabels(map[string]string{keyToFilter: valueToFilter}), + want: true, + }, { + name: "empty labels", + in: configWithLabels(map[string]string{}), + want: false, + }, { + name: "non-empty map, the required label doesn't exist", + in: configWithLabels(map[string]string{"randomLabel": ""}), + want: false, + }, { + name: "non kubernetes object", + in: struct{}{}, + want: false, + }} + + for _, test := range ti { + t.Run(test.name, func(t *testing.T) { + filter := LabelExistsFilterFunc(keyToFilter) + got := filter(test.in) + if got != test.want { + t.Errorf("LabelExistsFilterFunc() = %v, want %v", got, test.want) + } + }) + } +} + +func TestLabelFilterFunc(t *testing.T) { + ti := []params{{ + name: "label matches no unset", + in: configWithLabels(map[string]string{keyToFilter: valueToFilter}), + allowUnset: false, + want: true, + }, { + name: "label matches with unset", + in: configWithLabels(map[string]string{keyToFilter: valueToFilter}), + allowUnset: true, + want: true, + }, { + name: "label mismatch no unset", + in: configWithLabels(map[string]string{keyToFilter: "otherval"}), + allowUnset: false, + want: false, + }, { + name: "label mismatch with unset", + in: configWithLabels(map[string]string{keyToFilter: "otherval"}), + allowUnset: true, + want: false, + }, { + name: "label missing no unset", + in: configWithLabels(map[string]string{}), + allowUnset: false, + want: false, + }, { + name: "label missing with unset", + in: configWithLabels(map[string]string{}), + allowUnset: true, + want: true, + }, { + name: "nil labels no unset", + in: configWithLabels(nil), + allowUnset: false, + want: false, + }, { + name: "nil labels with unset", + in: configWithLabels(nil), + allowUnset: true, + want: true, + }, { + name: "non kubernetes object", + in: struct{}{}, + want: false, + }} + + for _, test := range ti { + t.Run(test.name, func(t *testing.T) { + filter := LabelFilterFunc(keyToFilter, valueToFilter, test.allowUnset) + got := filter(test.in) + if got != test.want { + t.Errorf("LabelFilterFunc() = %v, want %v", got, test.want) + } + }) + } +} + +func TestNameFilterFunc(t *testing.T) { + ti := []params{{ + name: "name match", + in: configWithName(nameToFilter), + want: true, + }, { + name: "name mismatch", + in: configWithName("bogus"), + want: false, + }, { + name: "non kubernetes object", + in: struct{}{}, + want: false, + }} + + for _, test := range ti { + t.Run(test.name, func(t *testing.T) { + filter := NameFilterFunc(nameToFilter) + got := filter(test.in) + if got != test.want { + t.Errorf("NameFilterFunc() = %v, want %v", got, test.want) + } + }) + } +} + +func TestNamespaceFilterFunc(t *testing.T) { + ti := []params{{ + name: "namespace match", + in: configWithNamespace(namespaceToFilter), + want: true, + }, { + name: "namespace mismatch", + in: configWithNamespace("bogus"), + want: false, + }, { + name: "non kubernetes object", + in: struct{}{}, + want: false, + }} + + for _, test := range ti { + t.Run(test.name, func(t *testing.T) { + filter := NamespaceFilterFunc(namespaceToFilter) + got := filter(test.in) + if got != test.want { + t.Errorf("NamespaceFilterFunc() = %v, want %v", got, test.want) + } + }) + } +} + +func TestChainFilterFuncs(t *testing.T) { + tc := []struct { + name string + chain []bool + want bool + }{{ + name: "single true", + chain: []bool{true}, + want: true, + }, { + name: "single false", + chain: []bool{false}, + want: false, + }, { + name: "second false", + chain: []bool{true, false}, + want: false, + }, { + name: "multi true", + chain: []bool{true, true}, + want: true, + }} + + for _, test := range tc { + t.Run(test.name, func(t *testing.T) { + filters := make([]func(interface{}) bool, len(test.chain)) + for i, chainVal := range test.chain { + filters[i] = func(interface{}) bool { + return chainVal + } + } + filter := ChainFilterFuncs(filters...) + got := filter(nil) + if got != test.want { + t.Errorf("ChainFilterFuncs() = %v, want %v", got, test.want) + } + }) + } +} + +func TestNotFilter(t *testing.T) { + odd := func(o interface{}) bool { + // Return true if odd. + return (o.(int))&1 == 1 + } + if got, want := Not(odd)(1), false; got != want { + t.Errorf("Odd input = %v, want: %v", got, want) + } + if got, want := Not(odd)(2), true; got != want { + t.Errorf("Odd input = %v, want: %v", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/doc.go new file mode 100644 index 0000000000..e44ba8167b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Configuration controller depends. +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store.go new file mode 100644 index 0000000000..0d5f0b6df8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/gc" +) + +type cfgKey struct{} + +// +k8s:deepcopy-gen=false +type Config struct { + RevisionGC *gc.Config +} + +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +func (s *Store) Load() *Config { + return &Config{ + RevisionGC: s.UntypedLoad(gc.ConfigName).(*gc.Config).DeepCopy(), + } +} + +func NewStore(ctx context.Context, onAfterStore ...func(name string, value interface{})) *Store { + return &Store{ + UntypedStore: configmap.NewUntypedStore( + "configuration", + logging.FromContext(ctx), + configmap.Constructors{ + gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx), + }, + onAfterStore..., + ), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store_test.go new file mode 100644 index 0000000000..3b25b3d3d6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/store_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/serving/pkg/gc" + + . "knative.dev/pkg/configmap/testing" +) + +func TestStoreLoadWithContext(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + store := NewStore(ctx) + + gcConfig := ConfigMapFromTestFile(t, "config-gc") + + store.OnConfigChanged(gcConfig) + + config := FromContext(store.ToContext(context.Background())) + + t.Run("revision-gc", func(t *testing.T) { + expected, _ := gc.NewConfigFromConfigMapFunc(ctx)(gcConfig) + if diff := cmp.Diff(expected, config.RevisionGC); diff != "" { + t.Errorf("Unexpected controller config (-want, +got): %v", diff) + } + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/testdata/config-gc.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/testdata/config-gc.yaml new file mode 120000 index 0000000000..71c3f7d74d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/testdata/config-gc.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/gc.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..e1176acb84 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/config/zz_generated.deepcopy.go @@ -0,0 +1,21 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/controller.go new file mode 100644 index 0000000000..a127bdf35e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/controller.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "context" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + configurationinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + gcconfig "knative.dev/serving/pkg/gc" + pkgreconciler "knative.dev/serving/pkg/reconciler" + configns "knative.dev/serving/pkg/reconciler/gc/config" +) + +const ( + controllerAgentName = "revision-gc-controller" +) + +// NewController creates a new Garbage Collection controller +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + + configurationInformer := configurationinformer.Get(ctx) + revisionInformer := revisioninformer.Get(ctx) + + c := &reconciler{ + Base: pkgreconciler.NewBase(ctx, controllerAgentName, cmw), + configurationLister: configurationInformer.Lister(), + revisionLister: revisionInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, "Garbage Collection") + + c.Logger.Info("Setting up event handlers") + + // Since the gc controller came from the configuration controller, having event handlers + // on both configuration and revision matches the existing behaviors of the configuration + // controller. This is to minimize risk heading into v1. + // TODO (taragu): probably one or both of these event handlers are not needed + configurationInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + revisionInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Configuration")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + c.Logger.Info("Setting up ConfigMap receivers with resync func") + configsToResync := []interface{}{ + &gcconfig.Config{}, + } + resync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + // Triggers syncs on all revisions when configuration changes. + impl.GlobalResync(revisionInformer.Informer()) + }) + + c.Logger.Info("Setting up ConfigMap receivers") + configStore := configns.NewStore(logging.WithLogger(ctx, c.Logger.Named("config-store")), resync) + configStore.WatchConfigs(c.ConfigMapWatcher) + c.configStore = configStore + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go new file mode 100644 index 0000000000..6b71dc853a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go @@ -0,0 +1,151 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "context" + "sort" + "time" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + pkgreconciler "knative.dev/serving/pkg/reconciler" + configns "knative.dev/serving/pkg/reconciler/gc/config" +) + +// reconciler implements controller.Reconciler for Garbage Collection resources. +type reconciler struct { + *pkgreconciler.Base + + // listers index properties about resources + configurationLister listers.ConfigurationLister + revisionLister listers.RevisionLister + + configStore pkgreconciler.ConfigStore +} + +// Check that our reconciler implements controller.Reconciler +var _ controller.Reconciler = (*reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Garbage Collection +// resource with the current status of the resource. +func (c *reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.configStore.ToContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + // Get the Configuration resource with this namespace/name. + config, err := c.configurationLister.Configurations(namespace).Get(name) + if errors.IsNotFound(err) { + // The resource no longer exists, in which case we stop processing. + logger.Errorf("Configuration %q in work queue no longer exists", key) + return nil + } else if err != nil { + return err + } + + reconcileErr := c.reconcile(ctx, config) + if reconcileErr != nil { + c.Recorder.Event(config, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + } + return reconcileErr +} + +func (c *reconciler) reconcile(ctx context.Context, config *v1alpha1.Configuration) error { + cfg := configns.FromContext(ctx).RevisionGC + logger := logging.FromContext(ctx) + + selector := labels.Set{serving.ConfigurationLabelKey: config.Name}.AsSelector() + revs, err := c.revisionLister.Revisions(config.Namespace).List(selector) + if err != nil { + return err + } + + gcSkipOffset := cfg.StaleRevisionMinimumGenerations + + if gcSkipOffset >= int64(len(revs)) { + return nil + } + + // Sort by creation timestamp descending + sort.Slice(revs, func(i, j int) bool { + return revs[j].CreationTimestamp.Before(&revs[i].CreationTimestamp) + }) + + for _, rev := range revs[gcSkipOffset:] { + if isRevisionStale(ctx, rev, config) { + err := c.ServingClientSet.ServingV1alpha1().Revisions(rev.Namespace).Delete(rev.Name, &metav1.DeleteOptions{}) + if err != nil { + logger.With(zap.Error(err)).Errorf("Failed to delete stale revision %q", rev.Name) + continue + } + } + } + return nil +} + +func isRevisionStale(ctx context.Context, rev *v1alpha1.Revision, config *v1alpha1.Configuration) bool { + if config.Status.LatestReadyRevisionName == rev.Name { + return false + } + + cfg := configns.FromContext(ctx).RevisionGC + logger := logging.FromContext(ctx) + + curTime := time.Now() + if rev.ObjectMeta.CreationTimestamp.Add(cfg.StaleRevisionCreateDelay).After(curTime) { + // Revision was created sooner than staleRevisionCreateDelay. Ignore it. + return false + } + + lastPin, err := rev.GetLastPinned() + if err != nil { + if err.(v1alpha1.LastPinnedParseError).Type != v1alpha1.AnnotationParseErrorTypeMissing { + logger.Errorw("Failed to determine revision last pinned", zap.Error(err)) + } else { + // Revision was never pinned and its RevisionConditionReady is not true after staleRevisionCreateDelay. + // It usually happens when ksvc was deployed with wrong configuration. + rc := rev.Status.GetCondition(v1beta1.RevisionConditionReady) + if rc == nil || rc.Status != corev1.ConditionTrue { + return true + } + } + return false + } + + ret := lastPin.Add(cfg.StaleRevisionTimeout).Before(curTime) + if ret { + logger.Infof("Detected stale revision %v with creation time %v and lastPinned time %v.", rev.ObjectMeta.Name, rev.ObjectMeta.CreationTimestamp, lastPin) + } + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/gc/gc_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/gc/gc_test.go new file mode 100644 index 0000000000..773ff9afcf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/gc/gc_test.go @@ -0,0 +1,359 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "context" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + clientgotesting "k8s.io/client-go/testing" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + . "knative.dev/pkg/reconciler/testing" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + gcconfig "knative.dev/serving/pkg/gc" + pkgreconciler "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/configuration/resources" + "knative.dev/serving/pkg/reconciler/gc/config" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +var revisionSpec = v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + }, +} + +func TestGCReconcile(t *testing.T) { + now := time.Now() + tenMinutesAgo := now.Add(-10 * time.Minute) + + old := now.Add(-11 * time.Minute) + older := now.Add(-12 * time.Minute) + oldest := now.Add(-13 * time.Minute) + + table := TableTest{{ + Name: "delete oldest, keep two", + Objects: []runtime.Object{ + cfg("keep-two", "foo", 5556, + WithLatestCreated("5556"), + WithLatestReady("5556"), + WithObservedGen), + rev("keep-two", "foo", 5554, MarkRevisionReady, + WithRevName("5554"), + WithCreationTimestamp(oldest), + WithLastPinned(tenMinutesAgo)), + rev("keep-two", "foo", 5555, MarkRevisionReady, + WithRevName("5555"), + WithCreationTimestamp(older), + WithLastPinned(tenMinutesAgo)), + rev("keep-two", "foo", 5556, MarkRevisionReady, + WithRevName("5556"), + WithCreationTimestamp(old), + WithLastPinned(tenMinutesAgo)), + }, + WantDeletes: []clientgotesting.DeleteActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + Verb: "delete", + Resource: schema.GroupVersionResource{ + Group: "serving.knative.dev", + Version: "v1alpha1", + Resource: "revisions", + }, + }, + Name: "5554", + }}, + Key: "foo/keep-two", + }, { + Name: "keep oldest when no lastPinned", + Objects: []runtime.Object{ + cfg("keep-no-last-pinned", "foo", 5556, + WithLatestCreated("5556"), + WithLatestReady("5556"), + WithObservedGen), + // No lastPinned so we will keep this. + rev("keep-no-last-pinned", "foo", 5554, MarkRevisionReady, + WithRevName("5554"), + WithCreationTimestamp(oldest)), + rev("keep-no-last-pinned", "foo", 5555, MarkRevisionReady, + WithRevName("5555"), + WithCreationTimestamp(older), + WithLastPinned(tenMinutesAgo)), + rev("keep-no-last-pinned", "foo", 5556, MarkRevisionReady, + WithRevName("5556"), + WithCreationTimestamp(old), + WithLastPinned(tenMinutesAgo)), + }, + Key: "foo/keep-no-last-pinned", + }, { + Name: "keep recent lastPinned", + Objects: []runtime.Object{ + cfg("keep-recent-last-pinned", "foo", 5556, + WithLatestCreated("5556"), + WithLatestReady("5556"), + WithObservedGen), + rev("keep-recent-last-pinned", "foo", 5554, MarkRevisionReady, + WithRevName("5554"), + WithCreationTimestamp(oldest), + // This is an indication that things are still routing here. + WithLastPinned(now)), + rev("keep-recent-last-pinned", "foo", 5555, MarkRevisionReady, + WithRevName("5555"), + WithCreationTimestamp(older), + WithLastPinned(tenMinutesAgo)), + rev("keep-recent-last-pinned", "foo", 5556, MarkRevisionReady, + WithRevName("5556"), + WithCreationTimestamp(old), + WithLastPinned(tenMinutesAgo)), + }, + Key: "foo/keep-recent-last-pinned", + }, { + Name: "keep LatestReadyRevision", + Objects: []runtime.Object{ + // Create a revision where the LatestReady is 5554, but LatestCreated is 5556. + // We should keep LatestReady even if it is old. + cfg("keep-two", "foo", 5556, + WithLatestReady("5554"), + // This comes after 'WithLatestReady' so the + // Configuration's 'Ready' Status is 'Unknown' + WithLatestCreated("5556"), + WithObservedGen), + rev("keep-two", "foo", 5554, MarkRevisionReady, + WithRevName("5554"), + WithCreationTimestamp(oldest), + WithLastPinned(tenMinutesAgo)), + rev("keep-two", "foo", 5555, // Not Ready + WithRevName("5555"), + WithCreationTimestamp(older), + WithLastPinned(tenMinutesAgo)), + rev("keep-two", "foo", 5556, // Not Ready + WithRevName("5556"), + WithCreationTimestamp(old), + WithLastPinned(tenMinutesAgo)), + }, + Key: "foo/keep-two", + }, { + Name: "keep stale revision because of minimum generations", + Objects: []runtime.Object{ + cfg("keep-all", "foo", 5554, + // Don't set the latest ready revision here + // since those by default are always retained + WithLatestCreated("keep-all"), + WithObservedGen), + rev("keep-all", "foo", 5554, + WithRevName("keep-all"), + WithCreationTimestamp(oldest), + WithLastPinned(tenMinutesAgo)), + }, + Key: "foo/keep-all", + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &reconciler{ + Base: pkgreconciler.NewBase(ctx, controllerAgentName, cmw), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + configStore: &testConfigStore{ + config: &config.Config{ + RevisionGC: &gcconfig.Config{ + StaleRevisionCreateDelay: 5 * time.Minute, + StaleRevisionTimeout: 5 * time.Minute, + StaleRevisionMinimumGenerations: 2, + }, + }, + }, + } + })) +} + +func TestIsRevisionStale(t *testing.T) { + curTime := time.Now() + staleTime := curTime.Add(-10 * time.Minute) + + tests := []struct { + name string + rev *v1alpha1.Revision + latestRev string + want bool + }{{ + name: "fresh revision that was never pinned", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrev", + CreationTimestamp: metav1.NewTime(curTime), + }, + }, + want: false, + }, { + name: "stale revision that was never pinned w/ Ready status", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrev", + CreationTimestamp: metav1.NewTime(staleTime), + }, + Status: v1alpha1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.RevisionConditionReady, + Status: "True", + }}, + }, + }, + }, + want: false, + }, { + name: "stale revision that was never pinned w/o Ready status", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrev", + CreationTimestamp: metav1.NewTime(staleTime), + }, + Status: v1alpha1.RevisionStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.RevisionConditionReady, + Status: "Unknown", + }}, + }, + }, + }, + want: true, + }, { + name: "stale revision that was previously pinned", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrev", + CreationTimestamp: metav1.NewTime(staleTime), + Annotations: map[string]string{ + "serving.knative.dev/lastPinned": fmt.Sprintf("%d", staleTime.Unix()), + }, + }, + }, + want: true, + }, { + name: "fresh revision that was previously pinned", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrev", + CreationTimestamp: metav1.NewTime(staleTime), + Annotations: map[string]string{ + "serving.knative.dev/lastPinned": fmt.Sprintf("%d", curTime.Unix()), + }, + }, + }, + want: false, + }, { + name: "stale latest ready revision", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myrev", + CreationTimestamp: metav1.NewTime(staleTime), + Annotations: map[string]string{ + "serving.knative.dev/lastPinned": fmt.Sprintf("%d", staleTime.Unix()), + }, + }, + }, + latestRev: "myrev", + want: false, + }} + + cfgStore := testConfigStore{ + config: &config.Config{ + RevisionGC: &gcconfig.Config{ + StaleRevisionCreateDelay: 5 * time.Minute, + StaleRevisionTimeout: 5 * time.Minute, + StaleRevisionMinimumGenerations: 2, + }, + }, + } + ctx := cfgStore.ToContext(context.Background()) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := &v1alpha1.Configuration{ + Status: v1alpha1.ConfigurationStatus{ + ConfigurationStatusFields: v1alpha1.ConfigurationStatusFields{ + LatestReadyRevisionName: test.latestRev, + }, + }, + } + + got := isRevisionStale(ctx, test.rev, cfg) + + if got != test.want { + t.Errorf("IsRevisionStale want %v got %v", test.want, got) + } + }) + } +} + +func cfg(name, namespace string, generation int64, co ...ConfigOption) *v1alpha1.Configuration { + c := &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: generation, + }, + Spec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: *revisionSpec.DeepCopy(), + }, + }, + } + for _, opt := range co { + opt(c) + } + c.SetDefaults(context.Background()) + return c +} + +func rev(name, namespace string, generation int64, ro ...RevisionOption) *v1alpha1.Revision { + r := resources.MakeRevision(cfg(name, namespace, generation)) + r.SetDefaults(v1.WithUpgradeViaDefaulting(context.Background())) + for _, opt := range ro { + opt(r) + } + return r +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ pkgreconciler.ConfigStore = (*testConfigStore)(nil) diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/OWNERS b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/doc.go new file mode 100644 index 0000000000..be678e9f9b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Ingress controller depends. +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio.go new file mode 100644 index 0000000000..2e85ed8a4a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/network" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" +) + +const ( + // IstioConfigName is the name of the configmap containing all + // customizations for istio related features. + IstioConfigName = "config-istio" + + // gatewayKeyPrefix is the prefix of all keys to configure Istio gateways for public Ingresses. + gatewayKeyPrefix = "gateway." + + // localGatewayKeyPrefix is the prefix of all keys to configure Istio gateways for public & private Ingresses. + localGatewayKeyPrefix = "local-gateway." +) + +func defaultGateways() []Gateway { + return []Gateway{{ + Namespace: system.Namespace(), + Name: networking.KnativeIngressGateway, + ServiceURL: fmt.Sprintf("istio-ingressgateway.istio-system.svc.%s", + network.GetClusterDomainName()), + }} +} + +func defaultLocalGateways() []Gateway { + return []Gateway{{ + Namespace: system.Namespace(), + Name: networking.ClusterLocalGateway, + ServiceURL: fmt.Sprintf(networking.ClusterLocalGateway+".istio-system.svc.%s", + network.GetClusterDomainName()), + }} +} + +// Gateway specifies the name of the Gateway and the K8s Service backing it. +type Gateway struct { + Namespace string + Name string + ServiceURL string +} + +// QualifiedName returns gateway name in '{namespace}/{name}' format. +func (g Gateway) QualifiedName() string { + return g.Namespace + "/" + g.Name +} + +// Istio contains istio related configuration defined in the +// istio config map. +type Istio struct { + // IngressGateway specifies the gateway urls for public Ingress. + IngressGateways []Gateway + + // LocalGateway specifies the gateway urls for public & private Ingress. + LocalGateways []Gateway +} + +func parseGateways(configMap *corev1.ConfigMap, prefix string) ([]Gateway, error) { + urls := map[string]string{} + gatewayNames := []string{} + for k, v := range configMap.Data { + if !strings.HasPrefix(k, prefix) || k == prefix { + continue + } + gatewayName, serviceURL := k[len(prefix):], v + if errs := validation.IsDNS1123Subdomain(serviceURL); len(errs) > 0 { + return nil, fmt.Errorf("invalid gateway format: %v", errs) + } + gatewayNames = append(gatewayNames, gatewayName) + urls[gatewayName] = serviceURL + } + sort.Strings(gatewayNames) + gateways := make([]Gateway, len(gatewayNames)) + for i, gatewayName := range gatewayNames { + var namespace, name string + parts := strings.SplitN(gatewayName, ".", 2) + if len(parts) == 1 { + namespace = system.Namespace() + name = parts[0] + } else { + namespace = parts[0] + name = parts[1] + } + gateways[i] = Gateway{ + Namespace: namespace, + Name: name, + ServiceURL: urls[gatewayName], + } + } + return gateways, nil +} + +// NewIstioFromConfigMap creates an Istio config from the supplied ConfigMap +func NewIstioFromConfigMap(configMap *corev1.ConfigMap) (*Istio, error) { + gateways, err := parseGateways(configMap, gatewayKeyPrefix) + if err != nil { + return nil, err + } + if len(gateways) == 0 { + gateways = defaultGateways() + } + localGateways, err := parseGateways(configMap, localGatewayKeyPrefix) + if err != nil { + return nil, err + } + if len(localGateways) == 0 { + localGateways = defaultLocalGateways() + } + localGateways = removeMeshGateway(localGateways) + return &Istio{ + IngressGateways: gateways, + LocalGateways: localGateways, + }, nil +} + +func removeMeshGateway(gateways []Gateway) []Gateway { + gws := []Gateway{} + for _, g := range gateways { + if g.Name != "mesh" { + gws = append(gws, g) + } + } + return gws +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio_test.go new file mode 100644 index 0000000000..2a7199c93c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/istio_test.go @@ -0,0 +1,235 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/system" + + . "knative.dev/pkg/configmap/testing" + _ "knative.dev/pkg/system/testing" +) + +func TestIstio(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, IstioConfigName) + + if _, err := NewIstioFromConfigMap(cm); err != nil { + t.Errorf("NewIstioFromConfigMap(actual) = %v", err) + } + + if _, err := NewIstioFromConfigMap(example); err != nil { + t.Errorf("NewIstioFromConfigMap(example) = %v", err) + } +} + +func TestQualifiedName(t *testing.T) { + g := Gateway{ + Namespace: "foo", + Name: "bar", + } + expected := "foo/bar" + saw := g.QualifiedName() + if saw != expected { + t.Errorf("Expected %q, saw %q", expected, saw) + } +} + +func TestGatewayConfiguration(t *testing.T) { + gatewayConfigTests := []struct { + name string + wantErr bool + wantIstio interface{} + config *corev1.ConfigMap + }{{ + name: "gateway configuration with no network input", + wantIstio: &Istio{ + IngressGateways: defaultGateways(), + LocalGateways: defaultLocalGateways(), + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + }, + }, { + name: "gateway configuration with invalid url", + wantErr: true, + wantIstio: (*Istio)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "gateway.invalid": "_invalid", + }, + }, + }, { + name: "gateway configuration with valid url", + wantErr: false, + wantIstio: &Istio{ + IngressGateways: []Gateway{{ + Namespace: "knative-testing", + Name: "knative-ingress-freeway", + ServiceURL: "istio-ingressfreeway.istio-system.svc.cluster.local", + }}, + LocalGateways: defaultLocalGateways(), + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "gateway.knative-ingress-freeway": "istio-ingressfreeway.istio-system.svc.cluster.local", + }, + }, + }, { + name: "gateway configuration in custom namespace with valid url", + wantErr: false, + wantIstio: &Istio{ + IngressGateways: []Gateway{{ + Namespace: "custom-namespace", + Name: "custom-gateway", + ServiceURL: "istio-ingressfreeway.istio-system.svc.cluster.local", + }}, + LocalGateways: defaultLocalGateways(), + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "gateway.custom-namespace.custom-gateway": "istio-ingressfreeway.istio-system.svc.cluster.local", + }, + }, + }, { + name: "gateway configuration in custom namespace with invalid url", + wantErr: true, + wantIstio: (*Istio)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "gateway.custom-namespace.invalid": "_invalid", + }, + }, + }, { + name: "local gateway configuration with valid url", + wantErr: false, + wantIstio: &Istio{ + IngressGateways: defaultGateways(), + LocalGateways: []Gateway{{ + Namespace: "knative-testing", + Name: "knative-ingress-backroad", + ServiceURL: "istio-ingressbackroad.istio-system.svc.cluster.local", + }}, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "local-gateway.knative-ingress-backroad": "istio-ingressbackroad.istio-system.svc.cluster.local", + }, + }, + }, { + name: "local gateway configuration with invalid url", + wantErr: true, + wantIstio: (*Istio)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "local-gateway.invalid": "_invalid", + }, + }, + }, { + name: "local gateway configuration in custom namespace with valid url", + wantErr: false, + wantIstio: &Istio{ + IngressGateways: defaultGateways(), + LocalGateways: []Gateway{{ + Namespace: "custom-namespace", + Name: "custom-local-gateway", + ServiceURL: "istio-ingressbackroad.istio-system.svc.cluster.local", + }}, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "local-gateway.custom-namespace.custom-local-gateway": "istio-ingressbackroad.istio-system.svc.cluster.local", + }, + }, + }, { + name: "local gateway configuration in custom namespace with invalid url", + wantErr: true, + wantIstio: (*Istio)(nil), + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "local-gateway.custom-namespace.invalid": "_invalid", + }, + }, + }, { + name: "local gateway configuration with mesh", + wantErr: false, + wantIstio: &Istio{ + IngressGateways: defaultGateways(), + LocalGateways: []Gateway{}, + }, + config: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: IstioConfigName, + }, + Data: map[string]string{ + "local-gateway.mesh": "mesh", + }, + }, + }} + + for _, tt := range gatewayConfigTests { + t.Run(tt.name, func(t *testing.T) { + actualIstio, err := NewIstioFromConfigMap(tt.config) + if (err != nil) != tt.wantErr { + t.Fatalf("Test: %q; NewIstioFromConfigMap() error = %v, WantErr %v", tt.name, err, tt.wantErr) + } + + if diff := cmp.Diff(actualIstio, tt.wantIstio); diff != "" { + t.Fatalf("Want %v, but got %v", tt.wantIstio, actualIstio) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store.go new file mode 100644 index 0000000000..3b004cdb4d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/pkg/configmap" + "knative.dev/serving/pkg/network" +) + +type cfgKey struct{} + +// Config of Istio. +// +k8s:deepcopy-gen=false +type Config struct { + Istio *Istio + Network *network.Config +} + +// FromContext fetch config from context. +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +// ToContext adds config to given context. +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// Store is configmap.UntypedStore based config store. +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a configmap.UntypedStore based config store. +// +// logger must be non-nil implementation of configmap.Logger (commonly used +// loggers conform) +// +// onAfterStore is a variadic list of callbacks to run +// after the ConfigMap has been processed and stored. +// +// See also: configmap.NewUntypedStore(). +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "ingress", + logger, + configmap.Constructors{ + IstioConfigName: NewIstioFromConfigMap, + network.ConfigName: network.NewConfigFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +// ToContext adds Store contents to given context. +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +// Load fetches config from Store. +func (s *Store) Load() *Config { + return &Config{ + Istio: s.UntypedLoad(IstioConfigName).(*Istio).DeepCopy(), + Network: s.UntypedLoad(network.ConfigName).(*network.Config).DeepCopy(), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store_test.go new file mode 100644 index 0000000000..877c363248 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/store_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + logtesting "knative.dev/pkg/logging/testing" + + . "knative.dev/pkg/configmap/testing" + "knative.dev/serving/pkg/network" +) + +func TestStoreLoadWithContext(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + istioConfig := ConfigMapFromTestFile(t, IstioConfigName) + networkConfig := ConfigMapFromTestFile(t, network.ConfigName) + store.OnConfigChanged(istioConfig) + store.OnConfigChanged(networkConfig) + config := FromContext(store.ToContext(context.Background())) + + expectedIstio, _ := NewIstioFromConfigMap(istioConfig) + if diff := cmp.Diff(expectedIstio, config.Istio); diff != "" { + t.Errorf("Unexpected istio config (-want, +got): %v", diff) + } + + expectNetworkConfig, _ := network.NewConfigFromConfigMap(networkConfig) + if diff := cmp.Diff(expectNetworkConfig, config.Network); diff != "" { + t.Errorf("Unexpected TLS mode (-want, +got): %s", diff) + } +} + +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + store.OnConfigChanged(ConfigMapFromTestFile(t, IstioConfigName)) + store.OnConfigChanged(ConfigMapFromTestFile(t, network.ConfigName)) + + config := store.Load() + + config.Istio.IngressGateways = []Gateway{{Name: "mutated", ServiceURL: "mutated"}} + config.Network.HTTPProtocol = network.HTTPRedirected + + newConfig := store.Load() + + if newConfig.Istio.IngressGateways[0].Name == "mutated" { + t.Error("Istio config is not immutable") + } + if newConfig.Network.HTTPProtocol == network.HTTPRedirected { + t.Error("Network config is not immuable") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml new file mode 120000 index 0000000000..7f36983365 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-istio.yaml @@ -0,0 +1 @@ +../../../../../config/istio-ingress/config.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml new file mode 120000 index 0000000000..56cb332a04 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/testdata/config-network.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/network.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..db64b9144d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/config/zz_generated.deepcopy.go @@ -0,0 +1,63 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Istio) DeepCopyInto(out *Istio) { + *out = *in + if in.IngressGateways != nil { + in, out := &in.IngressGateways, &out.IngressGateways + *out = make([]Gateway, len(*in)) + copy(*out, *in) + } + if in.LocalGateways != nil { + in, out := &in.LocalGateways, &out.LocalGateways + *out = make([]Gateway, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Istio. +func (in *Istio) DeepCopy() *Istio { + if in == nil { + return nil + } + out := new(Istio) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/controller.go new file mode 100644 index 0000000000..69f870da02 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/controller.go @@ -0,0 +1,132 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + podinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod" + secretinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/secret" + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/tracker" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + ingressinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress" + gatewayinformer "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway" + virtualserviceinformer "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/network/status" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/ingress/config" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" +) + +const ( + controllerAgentName = "ingress-controller" +) + +// NewController works as a constructor for Ingress Controller +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + virtualServiceInformer := virtualserviceinformer.Get(ctx) + gatewayInformer := gatewayinformer.Get(ctx) + secretInformer := secretinformer.Get(ctx) + ingressInformer := ingressinformer.Get(ctx) + + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + virtualServiceLister: virtualServiceInformer.Lister(), + gatewayLister: gatewayInformer.Lister(), + secretLister: secretInformer.Lister(), + ingressLister: ingressInformer.Lister(), + finalizer: ingressFinalizer, + } + impl := controller.NewImpl(c, c.Logger, "Ingresses") + + c.Logger.Info("Setting up Ingress event handlers") + myFilterFunc := reconciler.AnnotationFilterFunc(networking.IngressClassAnnotationKey, network.IstioIngressClassName, true) + ingressHandler := cache.FilteringResourceEventHandler{ + FilterFunc: myFilterFunc, + Handler: controller.HandleAll(impl.Enqueue), + } + ingressInformer.Informer().AddEventHandler(ingressHandler) + + virtualServiceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: myFilterFunc, + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + c.Logger.Info("Setting up ConfigMap receivers") + configsToResync := []interface{}{ + &config.Istio{}, + &network.Config{}, + } + resyncIngressesOnConfigChange := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + impl.FilteredGlobalResync(myFilterFunc, ingressInformer.Informer()) + }) + configStore := config.NewStore(c.Logger.Named("config-store"), resyncIngressesOnConfigChange) + configStore.WatchConfigs(cmw) + c.configStore = configStore + + c.Logger.Info("Setting up statusManager") + endpointsInformer := endpointsinformer.Get(ctx) + serviceInformer := serviceinformer.Get(ctx) + podInformer := podinformer.Get(ctx) + resyncOnIngressReady := func(ing *v1alpha1.Ingress) { + impl.EnqueueKey(types.NamespacedName{Namespace: ing.GetNamespace(), Name: ing.GetName()}) + } + statusProber := status.NewProber( + c.Logger.Named("status-manager"), + NewProbeTargetLister( + c.Logger.Named("probe-lister"), + gatewayInformer.Lister(), + endpointsInformer.Lister(), + serviceInformer.Lister()), + resyncOnIngressReady) + c.statusManager = statusProber + statusProber.Start(ctx.Done()) + + ingressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + // Cancel probing when a VirtualService is deleted + DeleteFunc: statusProber.CancelIngressProbing, + }) + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + // Cancel probing when a Pod is deleted + DeleteFunc: statusProber.CancelPodProbing, + }) + + c.Logger.Info("Setting up secret informer event handler") + tracker := tracker.New(impl.EnqueueKey, controller.GetTrackerLease(ctx)) + c.tracker = tracker + + secretInformer.Informer().AddEventHandler(controller.HandleAll( + controller.EnsureTypeMeta( + tracker.OnChanged, + corev1.SchemeGroupVersion.WithKind("Secret"), + ), + )) + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/doc.go new file mode 100644 index 0000000000..c4597b76db --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +Package ingress implements a kubernetes controller which tracks Ingress resource +and reconcile VirtualService as its child resource. + +*/ +package ingress diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress.go new file mode 100644 index 0000000000..dab6601726 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress.go @@ -0,0 +1,490 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + "knative.dev/pkg/logging" + listers "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + + "knative.dev/pkg/controller" + "knative.dev/pkg/tracker" + istiolisters "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" + + "go.uber.org/zap" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/network/status" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/ingress/config" + "knative.dev/serving/pkg/reconciler/ingress/resources" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + istioclientset "knative.dev/serving/pkg/client/istio/clientset/versioned" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" + coreaccessor "knative.dev/serving/pkg/reconciler/accessor/core" + istioaccessor "knative.dev/serving/pkg/reconciler/accessor/istio" +) + +const ( + virtualServiceNotReconciled = "ReconcileVirtualServiceFailed" + notReconciledReason = "ReconcileIngressFailed" + notReconciledMessage = "Ingress reconciliation failed" +) + +// ingressfinalizer is the name that we put into the resource finalizer list, e.g. +// metadata: +// finalizers: +// - ingresses.networking.internal.knative.dev +var ( + ingressResource = v1alpha1.Resource("ingresses") + ingressFinalizer = ingressResource.String() +) + +// Reconciler implements the control loop for the Ingress resources. +type Reconciler struct { + *reconciler.Base + + virtualServiceLister istiolisters.VirtualServiceLister + gatewayLister istiolisters.GatewayLister + secretLister corev1listers.SecretLister + ingressLister listers.IngressLister + + configStore reconciler.ConfigStore + tracker tracker.Interface + finalizer string + + statusManager status.Manager +} + +var ( + _ controller.Reconciler = (*Reconciler)(nil) + _ coreaccessor.SecretAccessor = (*Reconciler)(nil) + _ istioaccessor.VirtualServiceAccessor = (*Reconciler)(nil) +) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Ingress resource +// with the current status of the resource. +func (r *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = r.configStore.ToContext(ctx) + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + ns, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorf("invalid resource key: %s", key) + return nil + } + + // Get the Ingress resource with this namespace and name. + original, err := r.ingressLister.Ingresses(ns).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logger.Info("Ingress in work queue no longer exists") + return nil + } else if err != nil { + return err + } + // Don't modify the informers copy + ingress := original.DeepCopy() + + // Reconcile this copy of the Ingress and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := r.reconcileIngress(ctx, ingress) + if reconcileErr != nil { + r.Recorder.Event(ingress, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + ingress.Status.MarkIngressNotReady(notReconciledReason, notReconciledMessage) + } + if equality.Semantic.DeepEqual(original.Status, ingress.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else { + if err = r.updateStatus(original, ingress); err != nil { + logger.Warnw("Failed to update Ingress status", zap.Error(err)) + r.Recorder.Eventf(ingress, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for Ingress %q: %v", ingress.GetName(), err) + return err + } + + logger.Infof("Updated status for Ingress %q", ingress.GetName()) + r.Recorder.Eventf(ingress, corev1.EventTypeNormal, "Updated", + "Updated status for Ingress %q", ingress.GetName()) + } + return reconcileErr +} + +func (r *Reconciler) reconcileIngress(ctx context.Context, ing *v1alpha1.Ingress) error { + logger := logging.FromContext(ctx) + if ing.GetDeletionTimestamp() != nil { + return r.reconcileDeletion(ctx, ing) + } + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + ing.SetDefaults(ctx) + + ing.Status.InitializeConditions() + logger.Infof("Reconciling ingress: %#v", ing) + + gatewayNames := qualifiedGatewayNamesFromContext(ctx) + vses, err := resources.MakeVirtualServices(ing, gatewayNames) + if err != nil { + return err + } + + // First, create the VirtualServices. + logger.Infof("Creating/Updating VirtualServices") + ing.Status.ObservedGeneration = ing.GetGeneration() + if err := r.reconcileVirtualServices(ctx, ing, vses); err != nil { + ing.Status.MarkLoadBalancerFailed(virtualServiceNotReconciled, err.Error()) + return err + } + + if r.shouldReconcileTLS(ing) { + // Add the finalizer before adding `Servers` into Gateway so that we can be sure + // the `Servers` get cleaned up from Gateway. + if err := r.ensureFinalizer(ing); err != nil { + return err + } + + originSecrets, err := resources.GetSecrets(ing, r.secretLister) + if err != nil { + return err + } + targetSecrets, err := resources.MakeSecrets(ctx, originSecrets, ing) + if err != nil { + return err + } + if err := r.reconcileCertSecrets(ctx, ing, targetSecrets); err != nil { + return err + } + + for _, gw := range config.FromContext(ctx).Istio.IngressGateways { + ns, err := resources.ServiceNamespaceFromURL(gw.ServiceURL) + if err != nil { + return err + } + desired, err := resources.MakeTLSServers(ing, ns, originSecrets) + if err != nil { + return err + } + if err := r.reconcileGateway(ctx, ing, gw, desired); err != nil { + return err + } + } + } + + // Update status + ing.Status.MarkNetworkConfigured() + + ready, err := r.statusManager.IsReady(ctx, ing) + if err != nil { + return fmt.Errorf("failed to probe Ingress %s/%s: %w", ing.GetNamespace(), ing.GetName(), err) + } + if ready { + lbs := getLBStatus(gatewayServiceURLFromContext(ctx, ing)) + publicLbs := getLBStatus(publicGatewayServiceURLFromContext(ctx)) + privateLbs := getLBStatus(privateGatewayServiceURLFromContext(ctx)) + ing.Status.MarkLoadBalancerReady(lbs, publicLbs, privateLbs) + } else { + ing.Status.MarkLoadBalancerNotReady() + } + + // TODO(zhiminx): Mark Route status to indicate that Gateway is configured. + logger.Info("Ingress successfully synced") + return nil +} + +func (r *Reconciler) reconcileCertSecrets(ctx context.Context, ing *v1alpha1.Ingress, desiredSecrets []*corev1.Secret) error { + for _, certSecret := range desiredSecrets { + // We track the origin and desired secrets so that desired secrets could be synced accordingly when the origin TLS certificate + // secret is refreshed. + r.tracker.Track(resources.SecretRef(certSecret.Namespace, certSecret.Name), ing) + r.tracker.Track(resources.SecretRef( + certSecret.Labels[networking.OriginSecretNamespaceLabelKey], + certSecret.Labels[networking.OriginSecretNameLabelKey]), ing) + if _, err := coreaccessor.ReconcileSecret(ctx, ing, certSecret, r); err != nil { + if kaccessor.IsNotOwned(err) { + ing.Status.MarkResourceNotOwned("Secret", certSecret.Name) + } + return err + } + } + return nil +} + +func (r *Reconciler) reconcileVirtualServices(ctx context.Context, ing *v1alpha1.Ingress, + desired []*v1alpha3.VirtualService) error { + // First, create all needed VirtualServices. + kept := sets.NewString() + for _, d := range desired { + if d.GetAnnotations()[networking.IngressClassAnnotationKey] != network.IstioIngressClassName { + // We do not create resources that do not have istio ingress class annotation. + // As a result, obsoleted resources will be cleaned up. + continue + } + if _, err := istioaccessor.ReconcileVirtualService(ctx, ing, d, r); err != nil { + if kaccessor.IsNotOwned(err) { + ing.Status.MarkResourceNotOwned("VirtualService", d.Name) + } + return err + } + kept.Insert(d.Name) + } + + // Now, remove the extra ones. + // TODO(https://github.com/knative/serving/issues/6363): Switch to use networking.IngressLabelKey instead. + vses, err := r.virtualServiceLister.VirtualServices(resources.VirtualServiceNamespace(ing)).List( + labels.Set(map[string]string{ + serving.RouteLabelKey: ing.GetLabels()[serving.RouteLabelKey], + serving.RouteNamespaceLabelKey: ing.GetLabels()[serving.RouteNamespaceLabelKey]}).AsSelector()) + if err != nil { + return fmt.Errorf("failed to get VirtualServices: %w", err) + } + for _, vs := range vses { + n, ns := vs.Name, vs.Namespace + if kept.Has(n) { + continue + } + if !metav1.IsControlledBy(vs, ing) { + // We shouldn't remove resources not controlled by us. + continue + } + if err = r.IstioClientSet.NetworkingV1alpha3().VirtualServices(ns).Delete(n, &metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("failed to delete VirtualService: %w", err) + } + } + return nil +} + +func (r *Reconciler) reconcileDeletion(ctx context.Context, ing *v1alpha1.Ingress) error { + logger := logging.FromContext(ctx) + + // If our finalizer is first, delete the `Servers` from Gateway for this Ingress, + // and remove the finalizer. + if len(ing.GetFinalizers()) == 0 || ing.GetFinalizers()[0] != r.finalizer { + return nil + } + istiocfg := config.FromContext(ctx).Istio + logger.Infof("Cleaning up Gateway Servers for Ingress %s", ing.GetName()) + for _, gws := range [][]config.Gateway{istiocfg.IngressGateways, istiocfg.LocalGateways} { + for _, gw := range gws { + if err := r.reconcileGateway(ctx, ing, gw, []*istiov1alpha3.Server{}); err != nil { + return err + } + } + } + + // Update the Ingress to remove the finalizer. + logger.Info("Removing finalizer") + ing.SetFinalizers(ing.GetFinalizers()[1:]) + _, err := r.ServingClientSet.NetworkingV1alpha1().Ingresses(ing.GetNamespace()).Update(ing) + return err +} + +// Update the Status of the Ingress. Caller is responsible for checking +// for semantic differences before calling. +func (r *Reconciler) updateStatus(existing *v1alpha1.Ingress, desired *v1alpha1.Ingress) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = r.ServingClientSet.NetworkingV1alpha1().Ingresses(desired.GetNamespace()).Get(desired.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = r.ServingClientSet.NetworkingV1alpha1().Ingresses(existing.GetNamespace()).UpdateStatus(existing) + return err + }) +} + +func (r *Reconciler) ensureFinalizer(ing *v1alpha1.Ingress) error { + finalizers := sets.NewString(ing.GetFinalizers()...) + if finalizers.Has(r.finalizer) { + return nil + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": append(ing.GetFinalizers(), r.finalizer), + "resourceVersion": ing.GetResourceVersion(), + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return err + } + + _, err = r.ServingClientSet.NetworkingV1alpha1().Ingresses(ing.GetNamespace()).Patch(ing.GetName(), types.MergePatchType, patch) + return err +} + +func (r *Reconciler) reconcileGateway(ctx context.Context, ing *v1alpha1.Ingress, gw config.Gateway, desired []*istiov1alpha3.Server) error { + // TODO(zhiminx): Need to handle the scenario when deleting Ingress. In this scenario, + // the Gateway servers of the Ingress need also be removed from Gateway. + gateway, err := r.gatewayLister.Gateways(gw.Namespace).Get(gw.Name) + if err != nil { + // Unlike VirtualService, a default gateway needs to be existent. + // It should be installed when installing Knative. + return fmt.Errorf("failed to get Gateway: %w", err) + } + + existing := resources.GetServers(gateway, ing) + existingHTTPServer := resources.GetHTTPServer(gateway) + if existingHTTPServer != nil { + existing = append(existing, existingHTTPServer) + } + + desiredHTTPServer := resources.MakeHTTPServer(config.FromContext(ctx).Network.HTTPProtocol, []string{"*"}) + if desiredHTTPServer != nil { + desired = append(desired, desiredHTTPServer) + } + + if equality.Semantic.DeepEqual(existing, desired) { + return nil + } + + copy := gateway.DeepCopy() + copy = resources.UpdateGateway(copy, desired, existing) + if _, err := r.IstioClientSet.NetworkingV1alpha3().Gateways(copy.Namespace).Update(copy); err != nil { + return fmt.Errorf("failed to update Gateway: %w", err) + } + r.Recorder.Eventf(ing, corev1.EventTypeNormal, "Updated", "Updated Gateway %s/%s", gateway.Namespace, gateway.Name) + return nil +} + +// GetKubeClient returns the client to access k8s resources. +func (r *Reconciler) GetKubeClient() kubernetes.Interface { + return r.KubeClientSet +} + +// GetSecretLister returns the lister for Secret. +func (r *Reconciler) GetSecretLister() corev1listers.SecretLister { + return r.secretLister +} + +// GetIstioClient returns the client to access Istio resources. +func (r *Reconciler) GetIstioClient() istioclientset.Interface { + return r.IstioClientSet +} + +// GetVirtualServiceLister returns the lister for VirtualService. +func (r *Reconciler) GetVirtualServiceLister() istiolisters.VirtualServiceLister { + return r.virtualServiceLister +} + +// qualifiedGatewayNamesFromContext get gateway names from context +func qualifiedGatewayNamesFromContext(ctx context.Context) map[v1alpha1.IngressVisibility]sets.String { + publicGateways := sets.NewString() + for _, gw := range config.FromContext(ctx).Istio.IngressGateways { + publicGateways.Insert(gw.QualifiedName()) + } + + privateGateways := sets.NewString() + for _, gw := range config.FromContext(ctx).Istio.LocalGateways { + privateGateways.Insert(gw.QualifiedName()) + } + + return map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: publicGateways, + v1alpha1.IngressVisibilityClusterLocal: privateGateways, + } +} + +// gatewayServiceURLFromContext return an address of a load-balancer +// that the given Ingress is exposed to, or empty string if +// none. +func gatewayServiceURLFromContext(ctx context.Context, ing *v1alpha1.Ingress) string { + if ing.IsPublic() { + return publicGatewayServiceURLFromContext(ctx) + } + + return privateGatewayServiceURLFromContext(ctx) +} + +func publicGatewayServiceURLFromContext(ctx context.Context) string { + cfg := config.FromContext(ctx).Istio + if len(cfg.IngressGateways) > 0 { + return cfg.IngressGateways[0].ServiceURL + } + + return "" +} + +func privateGatewayServiceURLFromContext(ctx context.Context) string { + cfg := config.FromContext(ctx).Istio + if len(cfg.LocalGateways) > 0 { + return cfg.LocalGateways[0].ServiceURL + } + + return "" +} + +// getLBStatus get LB Status +func getLBStatus(gatewayServiceURL string) []v1alpha1.LoadBalancerIngressStatus { + // The Ingress isn't load-balanced by any particular + // Service, but through a Service mesh. + if gatewayServiceURL == "" { + return []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + } + } + return []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: gatewayServiceURL}, + } +} + +func (r *Reconciler) shouldReconcileTLS(ia *v1alpha1.Ingress) bool { + // We should keep reconciling the Ingress whose TLS has been reconciled before + // to make sure deleting IngressTLS will clean up the TLS server in the Gateway. + return (ia.IsPublic() && len(ia.Spec.TLS) > 0) || r.wasTLSReconciled(ia) +} + +func (r *Reconciler) wasTLSReconciled(ia *v1alpha1.Ingress) bool { + return len(ia.GetFinalizers()) != 0 && ia.GetFinalizers()[0] == r.finalizer +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress_test.go new file mode 100644 index 0000000000..40d6e21274 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/ingress_test.go @@ -0,0 +1,1435 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "fmt" + "testing" + "time" + + // Inject our fakes + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/secret/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + _ "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake" + fakeistioclient "knative.dev/serving/pkg/client/istio/injection/client/fake" + _ "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/gateway/fake" + _ "knative.dev/serving/pkg/client/istio/injection/informers/networking/v1alpha3/virtualservice/fake" + "knative.dev/serving/pkg/network/ingress" + + proto "github.com/gogo/protobuf/proto" + "github.com/google/go-cmp/cmp" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + clientgotesting "k8s.io/client-go/testing" + "knative.dev/pkg/kmeta" + + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" + apiconfig "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/ingress/config" + "knative.dev/serving/pkg/reconciler/ingress/resources" + presources "knative.dev/serving/pkg/resources" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" +) + +const ( + originDomainInternal = "origin.istio-system.svc.cluster.local" + newDomainInternal = "custom.istio-system.svc.cluster.local" + targetSecretName = "reconciling-ingress-uid" +) + +var ( + originGateways = map[string]string{ + "gateway.knative-test-gateway": originDomainInternal, + } + newGateways = map[string]string{ + "gateway." + networking.KnativeIngressGateway: newDomainInternal, + "gateway.knative-test-gateway": originDomainInternal, + } + ingressGateway = map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString(networking.KnativeIngressGateway), + } + gateways = map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString("knative-test-gateway", networking.KnativeIngressGateway), + } + defaultMaxRevisionTimeout = time.Duration(apiconfig.DefaultMaxRevisionTimeoutSeconds) * time.Second +) + +var ( + ingressRules = []v1alpha1.IngressRule{{ + Hosts: []string{ + "domain.com", + "test-route.test-ns.svc.cluster.local", + "test-route.test-ns.svc", + "test-route.test-ns", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "test-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + }}, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }}, + }, + }, + }} + + ingressTLS = []v1alpha1.IngressTLS{{ + Hosts: []string{"host-tls.example.com"}, + SecretName: "secret0", + SecretNamespace: "istio-system", + }} + + // The gateway server according to ingressTLS. + ingressTLSServer = &istiov1alpha3.Server{ + Hosts: []string{"host-tls.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/reconciling-ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: "tls.crt", + PrivateKey: "tls.key", + CredentialName: "secret0", + }, + } + + ingressHTTPRedirectServer = &istiov1alpha3.Server{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http-server", + Number: 80, + Protocol: "HTTP", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + HttpsRedirect: true, + }, + } + + // The gateway server irrelevant to ingressTLS. + irrelevantServer = &istiov1alpha3.Server{ + Hosts: []string{"test.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: "tls.crt", + PrivateKey: "tls.key", + CredentialName: "other-secret", + }, + } + + deletionTime = metav1.NewTime(time.Unix(1e9, 0)) +) + +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + Key: "too/many/parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "skip ingress not matching class key", + Objects: []runtime.Object{ + addAnnotations(ing("no-virtualservice-yet", 1234), + map[string]string{networking.IngressClassAnnotationKey: "fake-controller"}), + }, + }, { + Name: "create VirtualService matching Ingress", + + Objects: []runtime.Object{ + ing("no-virtualservice-yet", 1234), + }, + WantCreates: []runtime.Object{ + resources.MakeMeshVirtualService(insertProbe(ing("no-virtualservice-yet", 1234)), gateways), + resources.MakeIngressVirtualService(insertProbe(ing("no-virtualservice-yet", 1234)), + makeGatewayMap([]string{"knative-testing/knative-test-gateway", "knative-testing/" + networking.KnativeIngressGateway}, nil)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithStatus("no-virtualservice-yet", 1234, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "no-virtualservice-yet-mesh"), + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "no-virtualservice-yet"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "no-virtualservice-yet"), + }, + Key: "test-ns/no-virtualservice-yet", + }, { + Name: "observed generation is updated when error is encountered in reconciling, and ingress ready status is unknown", + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "virtualservices"), + }, + Objects: []runtime.Object{ + ingressWithStatus("reconcile-failed", 1234, + v1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + ), + &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-failed", + Namespace: "test-ns", + Labels: map[string]string{ + networking.IngressLabelKey: "reconcile-failed", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing("reconcile-failed", 1234))}, + }, + Spec: istiov1alpha3.VirtualService{}, + }, + }, + WantCreates: []runtime.Object{ + resources.MakeMeshVirtualService(insertProbe(ing("reconcile-failed", 1234)), gateways), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: resources.MakeIngressVirtualService(insertProbe(ing("reconcile-failed", 1234)), + makeGatewayMap([]string{"knative-testing/knative-test-gateway", "knative-testing/" + networking.KnativeIngressGateway}, nil)), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithStatus("reconcile-failed", 1234, + v1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Reason: virtualServiceNotReconciled, + Severity: apis.ConditionSeverityError, + Message: "failed to update VirtualService: inducing failure for update virtualservices", + Status: corev1.ConditionFalse, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionFalse, + Severity: apis.ConditionSeverityError, + Reason: notReconciledReason, + Message: notReconciledMessage, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconcile-failed-mesh"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to update VirtualService: inducing failure for update virtualservices"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconcile-failed"), + }, + Key: "test-ns/reconcile-failed", + }, { + Name: "reconcile VirtualService to match desired one", + Objects: []runtime.Object{ + ing("reconcile-virtualservice", 1234), + &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-virtualservice", + Namespace: "test-ns", + Labels: map[string]string{ + networking.IngressLabelKey: "reconcile-virtualservice", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing("reconcile-virtualservice", 1234))}, + }, + Spec: istiov1alpha3.VirtualService{}, + }, + &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-virtualservice-extra", + Namespace: "test-ns", + Labels: map[string]string{ + networking.IngressLabelKey: "reconcile-virtualservice", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing("reconcile-virtualservice", 1234))}, + }, + Spec: istiov1alpha3.VirtualService{}, + }, + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: resources.MakeIngressVirtualService(insertProbe(ing("reconcile-virtualservice", 1234)), + makeGatewayMap([]string{"knative-testing/knative-test-gateway", "knative-testing/" + networking.KnativeIngressGateway}, nil)), + }}, + WantCreates: []runtime.Object{ + resources.MakeMeshVirtualService(insertProbe(ing("reconcile-virtualservice", 1234)), gateways), + }, + WantDeletes: []clientgotesting.DeleteActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "test-ns", + Verb: "delete", + }, + Name: "reconcile-virtualservice-extra", + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithStatus("reconcile-virtualservice", 1234, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconcile-virtualservice-mesh"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated VirtualService %s/%s", + "test-ns", "reconcile-virtualservice"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconcile-virtualservice"), + }, + Key: "test-ns/reconcile-virtualservice", + }, { + Name: "clean up VirtualServices when ingress class annotation is not istio", + Objects: []runtime.Object{ + &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-virtualservice", + Namespace: "test-ns", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + Annotations: map[string]string{networking.IngressClassAnnotationKey: "some-other-ingress"}, + ResourceVersion: "v1", + }, + Spec: v1alpha1.IngressSpec{ + DeprecatedGeneration: 1234, + Rules: ingressRules, + // Deprecated, needed because of DeepCopy behavior + Visibility: v1alpha1.IngressVisibilityExternalIP, + }, + }, + + &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-virtualservice", + Namespace: "test-ns", + Labels: map[string]string{ + networking.IngressLabelKey: "reconcile-virtualservice", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + Annotations: map[string]string{networking.IngressClassAnnotationKey: network.IstioIngressClassName}, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing("reconcile-virtualservice", 1234))}, + }, + Spec: istiov1alpha3.VirtualService{}, + }, + &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-virtualservice-extra", + Namespace: "test-ns", + Labels: map[string]string{ + networking.IngressLabelKey: "reconcile-virtualservice", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + Annotations: map[string]string{networking.IngressClassAnnotationKey: network.IstioIngressClassName}, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing("reconcile-virtualservice", 1234))}, + }, + Spec: istiov1alpha3.VirtualService{}, + }, + }, + WantDeletes: []clientgotesting.DeleteActionImpl{ + { + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "test-ns", + Verb: "delete", + }, + Name: "reconcile-virtualservice", + }, + { + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "test-ns", + Verb: "delete", + }, + Name: "reconcile-virtualservice-extra", + }, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: addAnnotations(ingressWithStatus("reconcile-virtualservice", 1234, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), map[string]string{networking.IngressClassAnnotationKey: "some-other-ingress"}), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconcile-virtualservice"), + }, + Key: "test-ns/reconcile-virtualservice", + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + virtualServiceLister: listers.GetVirtualServiceLister(), + gatewayLister: listers.GetGatewayLister(), + finalizer: ingressFinalizer, + configStore: &testConfigStore{ + config: ReconcilerTestConfig(), + }, + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) { + return true, nil + }, + }, + ingressLister: listers.GetIngressLister(), + } + })) +} + +func TestReconcile_EnableAutoTLS(t *testing.T) { + table := TableTest{{ + Name: "update Gateway to match newly created Ingress", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + ingressWithTLS("reconciling-ingress", 1234, ingressTLS), + // No Gateway servers match the given TLS of Ingress. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + originSecret("istio-system", "secret0"), + }, + WantCreates: []runtime.Object{ + // The creation of gateways are triggered when setting up the test. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + + resources.MakeMeshVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLS)), ingressGateway), + resources.MakeIngressVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLS)), + makeGatewayMap([]string{"knative-testing/" + networking.KnativeIngressGateway}, nil)), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + // ingressTLSServer needs to be added into Gateway. + Object: gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{ingressTLSServer, irrelevantServer}), + }}, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddFinalizerAction("reconciling-ingress", ingressFinalizer), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithTLSAndStatus("reconciling-ingress", 1234, + ingressTLS, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress-mesh"), + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Gateway %s/%s", system.Namespace(), networking.KnativeIngressGateway), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconciling-ingress"), + }, + Key: "test-ns/reconciling-ingress", + }, { + Name: "No preinstalled Gateways", + Objects: []runtime.Object{ + ingressWithTLS("reconciling-ingress", 1234, ingressTLS), + originSecret("istio-system", "secret0"), + }, + WantCreates: []runtime.Object{ + resources.MakeMeshVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLS)), ingressGateway), + resources.MakeIngressVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLS)), + makeGatewayMap([]string{"knative-testing/" + networking.KnativeIngressGateway}, nil)), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddFinalizerAction("reconciling-ingress", ingressFinalizer), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithTLSAndStatus("reconciling-ingress", 1234, + ingressTLS, + v1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionUnknown, + Severity: apis.ConditionSeverityError, + Reason: notReconciledReason, + Message: notReconciledMessage, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress-mesh"), + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress"), + Eventf(corev1.EventTypeWarning, "InternalError", `failed to get Gateway: gateway.networking.istio.io "%s" not found`, networking.KnativeIngressGateway), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconciling-ingress"), + }, + // Error should be returned when there is no preinstalled gateways. + WantErr: true, + Key: "test-ns/reconciling-ingress", + }, { + Name: "delete Ingress", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + ingressWithFinalizers("reconciling-ingress", 1234, ingressTLS, []string{ingressFinalizer}, &deletionTime), + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer, ingressTLSServer}), + }, + WantCreates: []runtime.Object{ + // The creation of gateways are triggered when setting up the test. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer, ingressTLSServer}), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + }, { + // Finalizer should be removed. + Object: ingressWithFinalizers("reconciling-ingress", 1234, ingressTLS, []string{}, &deletionTime), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Gateway %s/%s", system.Namespace(), networking.KnativeIngressGateway), + }, + Key: "test-ns/reconciling-ingress", + }, { + Name: "delete IngressTLS", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + ingressWithFinalizers("reconciling-ingress", 1234, []v1alpha1.IngressTLS{}, []string{ingressFinalizer}, nil), + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer, ingressTLSServer}), + }, + WantCreates: []runtime.Object{ + // The creation of gateways are triggered when setting up the test. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer, ingressTLSServer}), + + resources.MakeMeshVirtualService(insertProbe(ingressWithFinalizers("reconciling-ingress", 1234, []v1alpha1.IngressTLS{}, []string{ingressFinalizer}, nil)), ingressGateway), + resources.MakeIngressVirtualService(insertProbe(ingressWithFinalizers("reconciling-ingress", 1234, []v1alpha1.IngressTLS{}, []string{ingressFinalizer}, nil)), + makeGatewayMap([]string{"knative-testing/" + networking.KnativeIngressGateway}, nil)), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + // IngressTLS related TLS servers should be removed from Gateway. + Object: gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithFinalizersAndStatus("reconciling-ingress", 1234, + []string{ingressFinalizer}, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress-mesh"), + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Gateway %s/%s", system.Namespace(), networking.KnativeIngressGateway), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconciling-ingress"), + }, + Key: "test-ns/reconciling-ingress", + }, { + Name: "TLS Secret is not in the namespace of Istio gateway service", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving")), + // No Gateway servers match the given TLS of Ingress. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + // The namespace (`knative-serving`) of the origin secret is different + // from the namespace (`istio-system`) of Istio gateway service. + originSecret("knative-serving", "secret0"), + }, + WantCreates: []runtime.Object{ + // The creation of gateways are triggered when setting up the test. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + + resources.MakeMeshVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving"))), ingressGateway), + resources.MakeIngressVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving"))), + makeGatewayMap([]string{"knative-testing/" + networking.KnativeIngressGateway}, nil)), + + // The secret copy under istio-system. + secret("istio-system", targetSecretName, map[string]string{ + networking.OriginSecretNameLabelKey: "secret0", + networking.OriginSecretNamespaceLabelKey: "knative-serving", + }), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + // ingressTLSServer with the name of the secret copy needs to be added into Gateway. + Object: gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{withCredentialName(deepCopy(ingressTLSServer), targetSecretName), irrelevantServer}), + }}, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddFinalizerAction("reconciling-ingress", ingressFinalizer), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithTLSAndStatus("reconciling-ingress", 1234, + ingressTLSWithSecretNamespace("knative-serving"), + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress-mesh"), + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress"), + Eventf(corev1.EventTypeNormal, "Created", "Created Secret %s/%s", "istio-system", targetSecretName), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Gateway %s/%s", system.Namespace(), networking.KnativeIngressGateway), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconciling-ingress"), + }, + Key: "test-ns/reconciling-ingress", + }, { + Name: "Reconcile Target secret", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving")), + + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{withCredentialName(deepCopy(ingressTLSServer), targetSecretName), irrelevantServer}), + // The origin secret. + originSecret("knative-serving", "secret0"), + + // The target secret that has the Data different from the origin secret. The Data should be reconciled. + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: targetSecretName, + Namespace: "istio-system", + Labels: map[string]string{ + networking.OriginSecretNameLabelKey: "secret0", + networking.OriginSecretNamespaceLabelKey: "knative-serving", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( + ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving")), + )}, + }, + Data: map[string][]byte{ + "wrong_data": []byte("wrongdata"), + }, + }, + }, + WantCreates: []runtime.Object{ + // The creation of gateways are triggered when setting up the test. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{withCredentialName(deepCopy(ingressTLSServer), targetSecretName), irrelevantServer}), + resources.MakeMeshVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving"))), ingressGateway), + resources.MakeIngressVirtualService(insertProbe(ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving"))), + makeGatewayMap([]string{"knative-testing/" + networking.KnativeIngressGateway}, nil)), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: targetSecretName, + Namespace: "istio-system", + Labels: map[string]string{ + networking.OriginSecretNameLabelKey: "secret0", + networking.OriginSecretNamespaceLabelKey: "knative-serving", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( + ingressWithTLS("reconciling-ingress", 1234, ingressTLSWithSecretNamespace("knative-serving")), + )}, + }, + // The data is expected to be updated to the right one. + Data: map[string][]byte{ + "test-secret": []byte("abcd"), + }, + }, + }}, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddFinalizerAction("reconciling-ingress", ingressFinalizer), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithTLSAndStatus("reconciling-ingress", 1234, + ingressTLSWithSecretNamespace("knative-serving"), + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress-mesh"), + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Secret %s/%s", "istio-system", targetSecretName), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconciling-ingress"), + }, + Key: "test-ns/reconciling-ingress", + }, { + Name: "Reconcile with autoTLS but cluster local visibilty, mesh only", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + ingressWithTLSClusterLocal("reconciling-ingress", 1234, ingressTLS), + // No Gateway servers match the given TLS of Ingress. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + originSecret("istio-system", "secret0"), + }, + WantCreates: []runtime.Object{ + // The creation of gateways are triggered when setting up the test. + gateway(networking.KnativeIngressGateway, system.Namespace(), []*istiov1alpha3.Server{irrelevantServer}), + resources.MakeMeshVirtualService(insertProbe(ingressWithTLSClusterLocal("reconciling-ingress", 1234, ingressTLS)), ingressGateway), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: ingressWithTLSAndStatusClusterLocal("reconciling-ingress", 1234, + ingressTLS, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + PublicLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + }, + PrivateLoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {MeshOnly: true}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + Severity: apis.ConditionSeverityError, + }}, + }, + }, + ), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created VirtualService %q", "reconciling-ingress-mesh"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated status for Ingress %q", "reconciling-ingress"), + }, + Key: "test-ns/reconciling-ingress", + }} + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + + // As we use a customized resource name for Gateway CRD (i.e. `gateways`), not the one + // originally generated by kubernetes code generator (i.e. `gatewaies`), we have to + // explicitly create gateways when setting up the test per suggestion + // https://github.com/knative/serving/blob/a6852fc3b6cdce72b99c5d578dd64f2e03dabb8b/vendor/k8s.io/client-go/testing/fixture.go#L292 + gateways := getGatewaysFromObjects(listers.GetIstioObjects()) + for _, gateway := range gateways { + fakeistioclient.Get(ctx).NetworkingV1alpha3().Gateways(gateway.Namespace).Create(gateway) + } + + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + virtualServiceLister: listers.GetVirtualServiceLister(), + gatewayLister: listers.GetGatewayLister(), + secretLister: listers.GetSecretLister(), + tracker: &NullTracker{}, + finalizer: ingressFinalizer, + // Enable reconciling gateway. + configStore: &testConfigStore{ + config: &config.Config{ + Istio: &config.Istio{ + IngressGateways: []config.Gateway{{ + Namespace: system.Namespace(), + Name: networking.KnativeIngressGateway, + ServiceURL: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system"), + }}, + }, + Network: &network.Config{ + HTTPProtocol: network.HTTPDisabled, + }, + }, + }, + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) { + return true, nil + }, + }, + ingressLister: listers.GetIngressLister(), + } + })) +} + +func getGatewaysFromObjects(objects []runtime.Object) []*v1alpha3.Gateway { + gateways := []*v1alpha3.Gateway{} + for _, object := range objects { + if gateway, ok := object.(*v1alpha3.Gateway); ok { + gateways = append(gateways, gateway) + } + } + return gateways +} + +func gateway(name, namespace string, servers []*istiov1alpha3.Server) *v1alpha3.Gateway { + return &v1alpha3.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: istiov1alpha3.Gateway{ + Servers: servers, + }, + } +} + +func originSecret(namespace, name string) *corev1.Secret { + tmp := secret(namespace, name, map[string]string{}) + tmp.UID = "uid" + return tmp +} + +func secret(namespace, name string, labels map[string]string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing("reconciling-ingress", 1234))}, + }, + Data: map[string][]byte{ + "test-secret": []byte("abcd"), + }, + } +} + +func withCredentialName(tlsServer *istiov1alpha3.Server, credentialName string) *istiov1alpha3.Server { + tlsServer.Tls.CredentialName = credentialName + return tlsServer +} + +// Open-coded deepCopy since istio.io/api's Server doesn't provide one currently +func deepCopy(server *istiov1alpha3.Server) *istiov1alpha3.Server { + return proto.Clone(server).(*istiov1alpha3.Server) +} + +func ingressTLSWithSecretNamespace(namespace string) []v1alpha1.IngressTLS { + result := []v1alpha1.IngressTLS{} + for _, tls := range ingressTLS { + tls.SecretNamespace = namespace + result = append(result, tls) + } + return result +} + +func patchAddFinalizerAction(ingressName, finalizer string) clientgotesting.PatchActionImpl { + action := clientgotesting.PatchActionImpl{ + Name: ingressName, + } + patch := fmt.Sprintf(`{"metadata":{"finalizers":[%q],"resourceVersion":"v1"}}`, finalizer) + action.Patch = []byte(patch) + return action +} + +func addAnnotations(ing *v1alpha1.Ingress, annos map[string]string) *v1alpha1.Ingress { + // UnionMaps(a, b) where value from b wins. Use annos for second arg. + ing.ObjectMeta.Annotations = presources.UnionMaps(ing.ObjectMeta.Annotations, annos) + return ing +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ reconciler.ConfigStore = (*testConfigStore)(nil) + +func ReconcilerTestConfig() *config.Config { + return &config.Config{ + Istio: &config.Istio{ + IngressGateways: []config.Gateway{{ + Namespace: system.Namespace(), + Name: "knative-test-gateway", + ServiceURL: pkgnet.GetServiceHostname("test-ingressgateway", "istio-system"), + }, { + Namespace: system.Namespace(), + Name: networking.KnativeIngressGateway, + ServiceURL: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system"), + }}, + }, + Network: &network.Config{ + AutoTLS: false, + }, + } +} + +func ingressWithStatus(name string, generation int64, status v1alpha1.IngressStatus) *v1alpha1.Ingress { + return &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "test-ns", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + Annotations: map[string]string{networking.IngressClassAnnotationKey: network.IstioIngressClassName}, + ResourceVersion: "v1", + }, + Spec: v1alpha1.IngressSpec{ + DeprecatedGeneration: generation, + Rules: ingressRules, + // Deprecated, needed because of DeepCopy behavior + Visibility: v1alpha1.IngressVisibilityExternalIP, + }, + Status: status, + } +} + +func ing(name string, generation int64) *v1alpha1.Ingress { + return ingressWithStatus(name, generation, v1alpha1.IngressStatus{}) +} + +func ingressWithFinalizers(name string, generation int64, tls []v1alpha1.IngressTLS, finalizers []string, deletionTime *metav1.Time) *v1alpha1.Ingress { + ingress := ingressWithTLS(name, generation, tls) + ingress.ObjectMeta.Finalizers = finalizers + if deletionTime != nil { + ingress.ObjectMeta.DeletionTimestamp = deletionTime + } + return ingress +} + +func ingressWithFinalizersAndStatus(name string, generation int64, finalizers []string, status v1alpha1.IngressStatus) *v1alpha1.Ingress { + ingress := ingressWithFinalizers(name, generation, []v1alpha1.IngressTLS{}, finalizers, nil) + ingress.Status = status + return ingress +} + +func ingressWithTLS(name string, generation int64, tls []v1alpha1.IngressTLS) *v1alpha1.Ingress { + return ingressWithTLSAndStatus(name, generation, tls, v1alpha1.IngressStatus{}) +} + +func ingressWithTLSClusterLocal(name string, generation int64, tls []v1alpha1.IngressTLS) *v1alpha1.Ingress { + ci := ingressWithTLSAndStatus(name, generation, tls, v1alpha1.IngressStatus{}).DeepCopy() + ci.Spec.Visibility = v1alpha1.IngressVisibilityClusterLocal + + rules := ci.Spec.Rules + for i, rule := range rules { + rCopy := rule.DeepCopy() + rCopy.Visibility = v1alpha1.IngressVisibilityClusterLocal + rules[i] = *rCopy + } + + ci.Spec.Rules = rules + + return ci +} + +func ingressWithTLSAndStatus(name string, generation int64, tls []v1alpha1.IngressTLS, status v1alpha1.IngressStatus) *v1alpha1.Ingress { + ci := ingressWithStatus(name, generation, status) + ci.Spec.TLS = tls + return ci +} + +func ingressWithTLSAndStatusClusterLocal(name string, generation int64, tls []v1alpha1.IngressTLS, status v1alpha1.IngressStatus) *v1alpha1.Ingress { + ci := ingressWithTLSClusterLocal(name, generation, tls) + ci.Status = status + return ci +} + +func newTestSetup(t *testing.T, configs ...*corev1.ConfigMap) ( + context.Context, + context.CancelFunc, + []controller.Informer, + *controller.Impl, + *configmap.ManualWatcher) { + + ctx, cancel, informers := SetupFakeContextWithCancel(t) + configMapWatcher := &configmap.ManualWatcher{Namespace: system.Namespace()} + controller := NewController(ctx, configMapWatcher) + + controller.Reconciler.(*Reconciler).statusManager = &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) { + return true, nil + }, + } + + cms := append([]*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.IstioConfigName, + Namespace: system.Namespace(), + }, + Data: originGateways, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "autoTLS": "Disabled", + }, + }}, configs...) + + for _, cfg := range cms { + configMapWatcher.OnChange(cfg) + } + + return ctx, cancel, informers, controller, configMapWatcher +} + +func TestGlobalResyncOnUpdateGatewayConfigMap(t *testing.T) { + ctx, cancel, informers, ctrl, watcher := newTestSetup(t) + + grp := errgroup.Group{} + + servingClient := fakeservingclient.Get(ctx) + + h := NewHooks() + + // Check for Ingress created as a signal that syncHandler ran + h.OnUpdate(&servingClient.Fake, "ingresses", func(obj runtime.Object) HookResult { + ci := obj.(*v1alpha1.Ingress) + t.Logf("ingress updated: %q", ci.Name) + + gateways := ci.Status.LoadBalancer.Ingress + if len(gateways) != 1 { + t.Logf("Unexpected gateways: %v", gateways) + return HookIncomplete + } + expectedDomainInternal := newDomainInternal + if gateways[0].DomainInternal != expectedDomainInternal { + t.Logf("Expected gateway %q but got %q", expectedDomainInternal, gateways[0].DomainInternal) + return HookIncomplete + } + + return HookComplete + }) + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + if err := grp.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + waitInformers() + }() + + if err := watcher.Start(ctx.Done()); err != nil { + t.Fatalf("failed to start ingress manager: %v", err) + } + + grp.Go(func() error { return ctrl.Run(1, ctx.Done()) }) + + ingress := ingressWithStatus("config-update", 1234, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: ""}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + ) + ingressClient := servingClient.NetworkingV1alpha1().Ingresses("test-ns") + + // Create a ingress. + ingressClient.Create(ingress) + + // Test changes in gateway config map. Ingress should get updated appropriately. + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.IstioConfigName, + Namespace: system.Namespace(), + }, + Data: newGateways, + } + watcher.OnChange(&domainConfig) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Error(err) + } +} + +func insertProbe(ing *v1alpha1.Ingress) *v1alpha1.Ingress { + ing = ing.DeepCopy() + ingress.InsertProbe(ing) + return ing +} + +func TestGlobalResyncOnUpdateNetwork(t *testing.T) { + ctx, cancel, informers, ctrl, watcher := newTestSetup(t) + + grp := errgroup.Group{} + + istioClient := fakeistioclient.Get(ctx) + + h := NewHooks() + + // Check for Gateway created as a signal that syncHandler ran + h.OnUpdate(&istioClient.Fake, "gateways", func(obj runtime.Object) HookResult { + updatedGateway := obj.(*v1alpha3.Gateway) + // The expected gateway should include the Istio TLS server. + expectedGateway := gateway("knative-test-gateway", system.Namespace(), []*istiov1alpha3.Server{ingressTLSServer}) + expectedGateway.Spec.Servers = append(expectedGateway.Spec.Servers, ingressHTTPRedirectServer) + expectedGateway.Spec.Servers = resources.SortServers(expectedGateway.Spec.Servers) + + if diff := cmp.Diff(updatedGateway, expectedGateway); diff != "" { + t.Logf("Unexpected Gateway (-want, +got): %v", diff) + return HookIncomplete + } + + return HookComplete + }) + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Failed to start ingress manager: %v", err) + } + defer func() { + cancel() + if err := grp.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + waitInformers() + }() + + if err := watcher.Start(ctx.Done()); err != nil { + t.Fatalf("Failed to start watcher: %v", err) + } + + grp.Go(func() error { return ctrl.Run(1, ctx.Done()) }) + + ingress := ingressWithTLSAndStatus("reconciling-ingress", 1234, + ingressTLS, + v1alpha1.IngressStatus{ + LoadBalancer: &v1alpha1.LoadBalancerStatus{ + Ingress: []v1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: originDomainInternal}, + }, + }, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: v1alpha1.IngressConditionLoadBalancerReady, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionNetworkConfigured, + Status: corev1.ConditionTrue, + }, { + Type: v1alpha1.IngressConditionReady, + Status: corev1.ConditionTrue, + }}, + }, + }, + ) + + ingressClient := fakeservingclient.Get(ctx).NetworkingV1alpha1().Ingresses("test-ns") + + // Create a ingress. + ingressClient.Create(ingress) + + gatewayClient := istioClient.NetworkingV1alpha3().Gateways(system.Namespace()) + // Create a Gateway + gatewayClient.Create(gateway("knative-test-gateway", system.Namespace(), []*istiov1alpha3.Server{})) + + // Create origin secret. "ns" namespace is the namespace of ingress gateway service. + secretClient := fakekubeclient.Get(ctx).CoreV1().Secrets("istio-system") + secretClient.Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret0", + Namespace: "istio-system", + UID: "123", + }, + }) + + // Test changes in autoTLS of config-network ConfigMap. Ingress should get updated appropriately. + networkConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "autoTLS": "Enabled", + "httpProtocol": "Redirected", + }, + } + watcher.OnChange(&networkConfig) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Error(err) + } +} + +func makeGatewayMap(publicGateways []string, privateGateways []string) map[v1alpha1.IngressVisibility]sets.String { + return map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString(publicGateways...), + v1alpha1.IngressVisibilityClusterLocal: sets.NewString(privateGateways...), + } +} + +type fakeStatusManager struct { + FakeIsReady func(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) +} + +func (m *fakeStatusManager) IsReady(ctx context.Context, ing *v1alpha1.Ingress) (bool, error) { + return m.FakeIsReady(ctx, ing) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister.go new file mode 100644 index 0000000000..49bfe78e6d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister.go @@ -0,0 +1,175 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "fmt" + "net/url" + "sort" + "strconv" + + "go.uber.org/zap" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + istiolisters "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/network/ingress" + "knative.dev/serving/pkg/network/status" +) + +func NewProbeTargetLister( + logger *zap.SugaredLogger, + gatewayLister istiolisters.GatewayLister, + endpointsLister corev1listers.EndpointsLister, + serviceLister corev1listers.ServiceLister) status.ProbeTargetLister { + return &gatewayPodTargetLister{ + logger: logger, + gatewayLister: gatewayLister, + endpointsLister: endpointsLister, + serviceLister: serviceLister, + } +} + +type gatewayPodTargetLister struct { + logger *zap.SugaredLogger + + gatewayLister istiolisters.GatewayLister + endpointsLister corev1listers.EndpointsLister + serviceLister corev1listers.ServiceLister +} + +func (l *gatewayPodTargetLister) ListProbeTargets(ctx context.Context, ing *v1alpha1.Ingress) ([]status.ProbeTarget, error) { + results := []status.ProbeTarget{} + gatewayHosts := ingress.HostsPerVisibility(ing, qualifiedGatewayNamesFromContext(ctx)) + gatewayNames := []string{} + for gatewayName := range gatewayHosts { + gatewayNames = append(gatewayNames, gatewayName) + } + // Sort the gateway names for a consistent ordering. + sort.Strings(gatewayNames) + for _, gatewayName := range gatewayNames { + gateway, err := l.getGateway(gatewayName) + if err != nil { + return nil, fmt.Errorf("failed to get Gateway %q: %w", gatewayName, err) + } + targets, err := l.listGatewayTargets(gateway) + if err != nil { + return nil, fmt.Errorf("failed to list the probing URLs of Gateway %q: %w", gatewayName, err) + } + if len(targets) == 0 { + continue + } + for _, target := range targets { + qualifiedTarget := status.ProbeTarget{ + PodIPs: target.PodIPs, + PodPort: target.PodPort, + Port: target.Port, + URLs: make([]*url.URL, len(gatewayHosts[gatewayName])), + } + // Use sorted hosts list for consistent ordering. + for i, host := range gatewayHosts[gatewayName].List() { + newURL := *target.URLs[0] + newURL.Host = host + ":" + target.Port + qualifiedTarget.URLs[i] = &newURL + } + results = append(results, qualifiedTarget) + } + } + return results, nil +} + +func (l *gatewayPodTargetLister) getGateway(name string) (*v1alpha3.Gateway, error) { + namespace, name, err := cache.SplitMetaNamespaceKey(name) + if err != nil { + return nil, fmt.Errorf("failed to parse Gateway name %q: %w", name, err) + } + if namespace == "" { + return nil, fmt.Errorf("unexpected unqualified Gateway name %q", name) + } + return l.gatewayLister.Gateways(namespace).Get(name) +} + +// listGatewayPodsURLs returns a probe targets for a given Gateway. +func (l *gatewayPodTargetLister) listGatewayTargets(gateway *v1alpha3.Gateway) ([]status.ProbeTarget, error) { + selector := labels.SelectorFromSet(gateway.Spec.Selector) + + services, err := l.serviceLister.List(selector) + if err != nil { + return nil, fmt.Errorf("failed to list Services: %w", err) + } + if len(services) == 0 { + l.logger.Infof("Skipping Gateway %s/%s because it has no corresponding Service", gateway.Namespace, gateway.Name) + return nil, nil + } + service := services[0] + + endpoints, err := l.endpointsLister.Endpoints(service.Namespace).Get(service.Name) + if err != nil { + return nil, fmt.Errorf("failed to get Endpoints: %w", err) + } + + targets := []status.ProbeTarget{} + for _, server := range gateway.Spec.Servers { + tURL := &url.URL{} + switch server.Port.Protocol { + case "HTTP", "HTTP2": + if server.Tls != nil && server.Tls.HttpsRedirect { + // ignoring HTTPS redirects. + continue + } + tURL.Scheme = "http" + case "HTTPS": + tURL.Scheme = "https" + default: + l.logger.Infof("Skipping Server %q because protocol %q is not supported", server.Port.Name, server.Port.Protocol) + continue + } + + portName, err := network.NameForPortNumber(service, int32(server.Port.Number)) + if err != nil { + l.logger.Infof("Skipping Server %q because Service %s/%s doesn't contain a port %d", server.Port.Name, service.Namespace, service.Name, server.Port.Number) + continue + } + for _, sub := range endpoints.Subsets { + // The translation from server.Port.Number -> portName -> portNumber is intentional. + // We can't simply translate from the Service.Spec because Service.Spec.Target.Port + // could be either a name or a number. In the Endpoints spec, all ports are provided + // as numbers. + portNumber, err := network.PortNumberForName(sub, portName) + if err != nil { + l.logger.Infof("Skipping Subset %v because it doesn't contain a port name %q", sub.Addresses, portName) + continue + } + target := status.ProbeTarget{ + PodIPs: sets.NewString(), + PodPort: strconv.Itoa(int(portNumber)), + Port: strconv.Itoa(int(server.Port.Number)), + URLs: []*url.URL{tURL}, + } + for _, addr := range sub.Addresses { + target.PodIPs.Insert(addr.IP) + } + targets = append(targets, target) + } + } + return targets, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister_test.go new file mode 100644 index 0000000000..7ee238b6a5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/lister_test.go @@ -0,0 +1,1294 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "errors" + "log" + "net/url" + "sort" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + istiov1alpha3 "istio.io/api/networking/v1alpha3" + v1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network/status" + "knative.dev/serving/pkg/reconciler/ingress/config" + + "go.uber.org/zap/zaptest" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + corev1listers "k8s.io/client-go/listers/core/v1" + istiolisters "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" +) + +func TestListProbeTargets(t *testing.T) { + tests := []struct { + name string + ingress *v1alpha1.Ingress + ingressGateways []config.Gateway + localGateways []config.Gateway + gatewayLister istiolisters.GatewayLister + endpointsLister corev1listers.EndpointsLister + serviceLister corev1listers.ServiceLister + errMessage string + results []status.ProbeTarget + }{{ + name: "unqualified gateway", + ingressGateways: []config.Gateway{{ + Name: "gateway", + }}, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + errMessage: "unexpected unqualified Gateway name", + }, { + name: "gateway error", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{fails: true}, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + errMessage: "failed to get Gateway", + }, { + name: "service error", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + serviceLister: &fakeServiceLister{fails: true}, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + errMessage: "failed to get Services", + }, { + name: "endpoints error", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{fails: true}, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + errMessage: "failed to get Endpoints", + }, { + name: "service port not found", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }}, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + }, { + name: "no endpoints", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "http", + Port: 80, + }}, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{}, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + }, { + name: "no services", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + serviceLister: &fakeServiceLister{}, + endpointsLister: &fakeEndpointsLister{}, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + }, { + name: "unsupported protocol", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Number: 80, + Protocol: "Mongo", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "http", + Port: 80, + }}, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{}, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + }, { + name: "one gateway", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 8080, + Protocol: "HTTP", + }, + }, { + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "https", + Number: 443, + Protocol: "HTTPS", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8081, + }, { + Name: "real", + Port: 8080, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8081, + }, { + Name: "real", + Port: 8080, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + results: []status.ProbeTarget{{ + PodIPs: sets.NewString("1.1.1.1"), + PodPort: "8080", + Port: "8080", + URLs: []*url.URL{{Scheme: "http", Host: "foo.bar.com:8080"}}, + }}, + }, { + name: "Different port between endpoint and gateway service", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "HTTP", + }, + }, { + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "https", + Number: 443, + Protocol: "HTTPS", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8081, + }, { + Name: "real", + Port: 8080, // Different port number between Endpoint and Gateway Service. + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8081, + }, { + Name: "real", + Port: 80, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + results: []status.ProbeTarget{{ + PodIPs: sets.NewString("1.1.1.1"), + PodPort: "8080", + Port: "80", + URLs: []*url.URL{{Scheme: "http", Host: "foo.bar.com:80"}}, + }}, + }, { + name: "one gateway, https redirect", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "HTTP", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + HttpsRedirect: true, + }, + }, { + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "https", + Number: 443, + Protocol: "HTTPS", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + results: []status.ProbeTarget{}, + }, { + name: "unsupported protocols", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "GRPC", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + results: []status.ProbeTarget{}, + }, { + name: "subsets with no ports", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "unknown", + Port: 9999, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + results: []status.ProbeTarget{}, + }, { + name: "two gateways", + ingressGateways: []config.Gateway{{ + Name: "gateway", + Namespace: "default", + }, { + Name: "gateway-two", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway-two", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 90, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "gateway-two", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway-two", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 90, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "2.2.2.2", + }, { + IP: "2.2.2.3", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway-two", + Labels: map[string]string{ + "gwt": "gateway-two", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 90, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }}, + }, + }, + results: []status.ProbeTarget{{ + PodIPs: sets.NewString("1.1.1.1"), + PodPort: "80", + Port: "80", + URLs: []*url.URL{{Scheme: "http", Host: "foo.bar.com:80"}}, + }, { + PodIPs: sets.NewString("2.2.2.2", "2.2.2.3"), + PodPort: "90", + Port: "90", + URLs: []*url.URL{{Scheme: "http", Host: "foo.bar.com:90"}}, + }}, + }, { + name: "local gateways", + localGateways: []config.Gateway{{ + Name: "local-gateway", + Namespace: "default", + }}, + gatewayLister: &fakeGatewayLister{ + gateways: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "HTTP", + }, + }, { + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "https", + Number: 443, + Protocol: "HTTPS", + }, + }}, + Selector: map[string]string{ + "gwt": "istio", + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "local-gateway", + }, + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "http", + Number: 80, + Protocol: "HTTP", + }, + }}, + Selector: map[string]string{ + "gwt": "local-gateway", + }, + }, + }}, + }, + endpointsLister: &fakeEndpointsLister{ + endpointses: []*v1.Endpoints{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "1.1.1.1", + }}, + }}, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "local-gateway", + }, + Subsets: []v1.EndpointSubset{{ + Ports: []v1.EndpointPort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + Addresses: []v1.EndpointAddress{{ + IP: "2.2.2.2", + }, { + IP: "2.2.2.3", + }}, + }}, + }}, + }, + serviceLister: &fakeServiceLister{ + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "gateway", + Labels: map[string]string{ + "gwt": "istio", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "local-gateway", + Labels: map[string]string{ + "gwt": "local-gateway", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "bogus", + Port: 8080, + }, { + Name: "real", + Port: 80, + }}, + }, + }}, + }, + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "whatever", + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "foo.bar.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityClusterLocal, + }}, + }, + }, + results: []status.ProbeTarget{{ + PodIPs: sets.NewString("2.2.2.2", "2.2.2.3"), + PodPort: "80", + Port: "80", + URLs: []*url.URL{ + {Scheme: "http", Host: "foo.bar:80"}, + {Scheme: "http", Host: "foo.bar.svc:80"}, + {Scheme: "http", Host: "foo.bar.svc.cluster.local:80"}}, + }}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lister := gatewayPodTargetLister{ + logger: zaptest.NewLogger(t).Sugar(), + gatewayLister: test.gatewayLister, + endpointsLister: test.endpointsLister, + serviceLister: test.serviceLister, + } + ctx := config.ToContext(context.Background(), &config.Config{ + Istio: &config.Istio{ + IngressGateways: test.ingressGateways, + LocalGateways: test.localGateways, + }, + }) + results, err := lister.ListProbeTargets(ctx, test.ingress) + if err == nil { + if test.errMessage != "" { + t.Fatalf("expected error message %q, saw no error", test.errMessage) + } + } else if !strings.Contains(err.Error(), test.errMessage) { + t.Errorf("expected error message %q but saw %v", test.errMessage, err) + } + if len(test.results)+len(results) > 0 { // consider nil map == empty map + // Sort by port number + sort.Slice(results, func(i, j int) bool { + return results[i].Port < results[j].Port + }) + if diff := cmp.Diff(test.results, results); diff != "" { + t.Errorf("Unexpected probe targets (-want +got): %s", diff) + } + } + }) + } +} + +type fakeGatewayLister struct { + gateways []*v1alpha3.Gateway + fails bool +} + +func (l *fakeGatewayLister) Gateways(namespace string) istiolisters.GatewayNamespaceLister { + if l.fails { + return &fakeGatewayNamespaceLister{fails: true} + } + + var matches []*v1alpha3.Gateway + for _, gateway := range l.gateways { + if gateway.Namespace == namespace { + matches = append(matches, gateway) + } + } + return &fakeGatewayNamespaceLister{ + gateways: matches, + } +} + +func (l *fakeGatewayLister) List(selector labels.Selector) ([]*v1alpha3.Gateway, error) { + log.Panic("not implemented") + return nil, nil +} + +type fakeGatewayNamespaceLister struct { + gateways []*v1alpha3.Gateway + fails bool +} + +func (l *fakeGatewayNamespaceLister) List(selector labels.Selector) ([]*v1alpha3.Gateway, error) { + log.Panic("not implemented") + return nil, nil +} + +func (l *fakeGatewayNamespaceLister) Get(name string) (*v1alpha3.Gateway, error) { + if l.fails { + return nil, errors.New("failed to get Gateway") + } + + for _, gateway := range l.gateways { + if gateway.Name == name { + return gateway, nil + } + } + return nil, errors.New("not found") +} + +type fakeEndpointsLister struct { + // Golum, golum. + endpointses []*v1.Endpoints + fails bool +} + +func (l *fakeEndpointsLister) List(selector labels.Selector) ([]*v1.Endpoints, error) { + log.Panic("not implemented") + return nil, nil +} + +func (l *fakeEndpointsLister) Endpoints(namespace string) corev1listers.EndpointsNamespaceLister { + return l +} + +func (l *fakeEndpointsLister) Get(name string) (*v1.Endpoints, error) { + if l.fails { + return nil, errors.New("failed to get Endpoints") + } + for _, ep := range l.endpointses { + if ep.Name == name { + return ep, nil + } + } + return nil, errors.New("not found") +} + +type fakeServiceLister struct { + services []*v1.Service + fails bool +} + +func (l *fakeServiceLister) List(selector labels.Selector) ([]*v1.Service, error) { + if l.fails { + return nil, errors.New("failed to get Services") + } + results := []*v1.Service{} + for _, svc := range l.services { + if selector.Matches(labels.Set(svc.Labels)) { + results = append(results, svc) + } + } + return results, nil +} + +func (l *fakeServiceLister) Services(namespace string) corev1listers.ServiceNamespaceLister { + log.Panic("not implemented") + return nil +} + +func (l *fakeServiceLister) GetPodServices(pod *v1.Pod) ([]*v1.Service, error) { + log.Panic("not implemented") + return nil, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/doc.go new file mode 100644 index 0000000000..b45e92f377 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources holds simple functions for synthesizing child resources from +// an Ingress resource and any relevant Ingress controller configuration. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway.go new file mode 100644 index 0000000000..22ceb7f102 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway.go @@ -0,0 +1,290 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "fmt" + "hash/adler32" + "sort" + "strings" + + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + corev1listers "k8s.io/client-go/listers/core/v1" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/ingress/config" +) + +var httpServerPortName = "http-server" + +// Istio Gateway requires to have at least one server. This placeholderServer is used when +// all of the real servers are deleted. +var placeholderServer = istiov1alpha3.Server{ + Hosts: []string{"place-holder.place-holder"}, + Port: &istiov1alpha3.Port{ + Name: "place-holder", + Number: 9999, + Protocol: "HTTP", + }, +} + +// GetServers gets the `Servers` from `Gateway` that belongs to the given Ingress. +func GetServers(gateway *v1alpha3.Gateway, ing *v1alpha1.Ingress) []*istiov1alpha3.Server { + servers := []*istiov1alpha3.Server{} + for i := range gateway.Spec.Servers { + if belongsToIngress(gateway.Spec.Servers[i], ing) { + servers = append(servers, gateway.Spec.Servers[i]) + } + } + return SortServers(servers) +} + +// GetHTTPServer gets the HTTP `Server` from `Gateway`. +func GetHTTPServer(gateway *v1alpha3.Gateway) *istiov1alpha3.Server { + for _, server := range gateway.Spec.Servers { + if server.Port.Name == httpServerPortName { + return server + } + } + return nil +} + +func belongsToIngress(server *istiov1alpha3.Server, ing *v1alpha1.Ingress) bool { + // The format of the portName should be "/:". + // For example, default/routetest:0. + portNameSplits := strings.Split(server.Port.Name, ":") + if len(portNameSplits) != 2 { + return false + } + return portNameSplits[0] == ing.GetNamespace()+"/"+ing.GetName() +} + +// SortServers sorts `Server` according to its port name. +func SortServers(servers []*istiov1alpha3.Server) []*istiov1alpha3.Server { + sort.Slice(servers, func(i, j int) bool { + return strings.Compare(servers[i].Port.Name, servers[j].Port.Name) < 0 + }) + return servers +} + +// MakeIngressGateways creates Gateways for a given Ingress. +func MakeIngressGateways(ctx context.Context, ing *v1alpha1.Ingress, originSecrets map[string]*corev1.Secret, svcLister corev1listers.ServiceLister) ([]*v1alpha3.Gateway, error) { + gatewayServices, err := getGatewayServices(ctx, svcLister) + if err != nil { + return nil, err + } + gateways := make([]*v1alpha3.Gateway, len(gatewayServices)) + for i, gatewayService := range gatewayServices { + gateway, err := makeIngressGateway(ctx, ing, originSecrets, gatewayService.Spec.Selector, gatewayService) + if err != nil { + return nil, err + } + gateways[i] = gateway + } + return gateways, nil +} + +func makeIngressGateway(ctx context.Context, ing *v1alpha1.Ingress, originSecrets map[string]*corev1.Secret, selector map[string]string, gatewayService *corev1.Service) (*v1alpha3.Gateway, error) { + ns := ing.GetNamespace() + if len(ns) == 0 { + ns = system.Namespace() + } + servers, err := MakeTLSServers(ing, gatewayService.Namespace, originSecrets) + if err != nil { + return nil, err + } + hosts := sets.String{} + for _, rule := range ing.Spec.Rules { + hosts.Insert(rule.Hosts...) + } + servers = append(servers, MakeHTTPServer(config.FromContext(ctx).Network.HTTPProtocol, hosts.List())) + return &v1alpha3.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: GatewayName(ing, gatewayService), + Namespace: ns, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing)}, + Labels: map[string]string{ + // We need this label to find out all of Gateways of a given Ingress. + networking.IngressLabelKey: ing.GetName(), + }, + }, + Spec: istiov1alpha3.Gateway{ + Selector: selector, + Servers: servers, + }, + }, nil +} + +func getGatewayServices(ctx context.Context, svcLister corev1listers.ServiceLister) ([]*corev1.Service, error) { + ingressSvcMetas, err := getIngressGatewaySvcNameNamespaces(ctx) + if err != nil { + return nil, err + } + services := make([]*corev1.Service, len(ingressSvcMetas)) + for i, ingressSvcMeta := range ingressSvcMetas { + svc, err := svcLister.Services(ingressSvcMeta.Namespace).Get(ingressSvcMeta.Name) + if err != nil { + return nil, err + } + services[i] = svc + } + return services, nil +} + +// GatewayName create a name for the Gateway that is built based on the given Ingress and bonds to the +// given ingress gateway service. +func GatewayName(accessor kmeta.Accessor, gatewaySvc *corev1.Service) string { + gatewayServiceKey := fmt.Sprintf("%s/%s", gatewaySvc.Namespace, gatewaySvc.Name) + return fmt.Sprintf("%s-%d", accessor.GetName(), adler32.Checksum([]byte(gatewayServiceKey))) +} + +// MakeTLSServers creates the expected Gateway TLS `Servers` based on the given Ingress. +func MakeTLSServers(ing *v1alpha1.Ingress, gatewayServiceNamespace string, originSecrets map[string]*corev1.Secret) ([]*istiov1alpha3.Server, error) { + servers := make([]*istiov1alpha3.Server, len(ing.Spec.TLS)) + // TODO(zhiminx): for the hosts that does not included in the IngressTLS but listed in the IngressRule, + // do we consider them as hosts for HTTP? + for i, tls := range ing.Spec.TLS { + credentialName := tls.SecretName + // If the origin secret is not in the target namespace, then it should have been + // copied into the target namespace. So we use the name of the copy. + if tls.SecretNamespace != gatewayServiceNamespace { + originSecret, ok := originSecrets[secretKey(tls)] + if !ok { + return nil, fmt.Errorf("unable to get the original secret %s/%s", tls.SecretNamespace, tls.SecretName) + } + credentialName = targetSecret(originSecret, ing) + } + + port := ing.GetNamespace() + "/" + ing.GetName() + + servers[i] = &istiov1alpha3.Server{ + Hosts: tls.Hosts, + Port: &istiov1alpha3.Port{ + Name: fmt.Sprintf("%s:%d", port, i), + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + CredentialName: credentialName, + }, + } + } + return SortServers(servers), nil +} + +// MakeHTTPServer creates a HTTP Gateway `Server` based on the HTTPProtocol +// configureation. +func MakeHTTPServer(httpProtocol network.HTTPProtocol, hosts []string) *istiov1alpha3.Server { + if httpProtocol == network.HTTPDisabled { + return nil + } + server := &istiov1alpha3.Server{ + Hosts: hosts, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, + } + if httpProtocol == network.HTTPRedirected { + server.Tls = &istiov1alpha3.Server_TLSOptions{ + HttpsRedirect: true, + } + } + return server +} + +// ServiceNamespaceFromURL extracts the namespace part from the service URL. +// TODO(nghia): Remove this by parsing at config parsing time. +func ServiceNamespaceFromURL(svc string) (string, error) { + parts := strings.SplitN(svc, ".", 3) + if len(parts) != 3 { + return "", fmt.Errorf("unexpected service URL form: %s", svc) + } + return parts[1], nil +} + +// TODO(nghia): Remove this by parsing at config parsing time. +func getIngressGatewaySvcNameNamespaces(ctx context.Context) ([]metav1.ObjectMeta, error) { + cfg := config.FromContext(ctx).Istio + nameNamespaces := make([]metav1.ObjectMeta, len(cfg.IngressGateways)) + for i, ingressgateway := range cfg.IngressGateways { + parts := strings.SplitN(ingressgateway.ServiceURL, ".", 3) + if len(parts) != 3 { + return nil, fmt.Errorf("unexpected service URL form: %s", ingressgateway.ServiceURL) + } + nameNamespaces[i] = metav1.ObjectMeta{ + Name: parts[0], + Namespace: parts[1], + } + } + return nameNamespaces, nil +} + +// UpdateGateway replaces the existing servers with the wanted servers. +func UpdateGateway(gateway *v1alpha3.Gateway, want []*istiov1alpha3.Server, existing []*istiov1alpha3.Server) *v1alpha3.Gateway { + existingServers := sets.String{} + for i := range existing { + existingServers.Insert(existing[i].Port.Name) + } + + servers := []*istiov1alpha3.Server{} + for _, server := range gateway.Spec.Servers { + // We remove + // 1) the existing servers + // 2) the default HTTP server and HTTPS server in the gateway because they are only used for the scenario of not reconciling gateway. + // 3) the placeholder servers. + if existingServers.Has(server.Port.Name) || isDefaultServer(server) || isPlaceHolderServer(server) { + continue + } + servers = append(servers, server) + } + servers = append(servers, want...) + + // Istio Gateway requires to have at least one server. So if the final gateway does not have any server, + // we add "placeholder" server back. + if len(servers) == 0 { + servers = append(servers, &placeholderServer) + } + + SortServers(servers) + gateway.Spec.Servers = servers + return gateway +} + +func isDefaultServer(server *istiov1alpha3.Server) bool { + if server.Port.Name == "https" { + return len(server.Hosts) > 0 && server.Hosts[0] == "*" + } + return server.Port.Name == "http" +} + +func isPlaceHolderServer(server *istiov1alpha3.Server) bool { + return equality.Semantic.DeepEqual(server, &placeholderServer) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway_test.go new file mode 100644 index 0000000000..e0a9814338 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/gateway_test.go @@ -0,0 +1,736 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "fmt" + "hash/adler32" + "testing" + + "github.com/google/go-cmp/cmp" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakeserviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + "knative.dev/pkg/kmeta" + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/ingress/config" +) + +var secret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret0", + Namespace: system.Namespace(), + }, + Data: map[string][]byte{ + "test": []byte("test"), + }, +} + +var originSecrets = map[string]*corev1.Secret{ + fmt.Sprintf("%s/secret0", system.Namespace()): &secret, +} + +var selector = map[string]string{ + "istio": "ingressgateway", +} + +var gateway = v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: servers, + }, +} + +var servers = []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, +}, { + Hosts: []string{"host2.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/non-ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, +}} + +var httpServer = istiov1alpha3.Server{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, +} + +var gatewayWithPlaceholderServer = v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{&placeholderServer}, + }, +} + +var gatewayWithDefaultWildcardTLSServer = v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "https", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + }}, + }, + }, +} + +var gatewayWithModifiedWildcardTLSServer = v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{&modifiedDefaultTLSServer}, + }, +} + +var modifiedDefaultTLSServer = istiov1alpha3.Server{ + Hosts: []string{"added.by.user.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "https", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, +} + +var ingressSpec = v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{"host1.example.com"}, + }}, + TLS: []v1alpha1.IngressTLS{{ + Hosts: []string{"host1.example.com"}, + SecretName: "secret0", + SecretNamespace: system.Namespace(), + }}, +} + +var ingressResource = v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress", + Namespace: "test-ns", + }, + Spec: ingressSpec, +} + +func TestGetServers(t *testing.T) { + servers := GetServers(&gateway, &ingressResource) + expected := []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }} + + if diff := cmp.Diff(expected, servers); diff != "" { + t.Errorf("Unexpected servers (-want +got): %v", diff) + } +} + +func TestGetHTTPServer(t *testing.T) { + newGateway := gateway + newGateway.Spec.Servers = append(newGateway.Spec.Servers, &httpServer) + server := GetHTTPServer(&newGateway) + expected := &istiov1alpha3.Server{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, + } + if diff := cmp.Diff(expected, server); diff != "" { + t.Errorf("Unexpected server (-want +got): %v", diff) + } +} + +func TestMakeTLSServers(t *testing.T) { + cases := []struct { + name string + ci *v1alpha1.Ingress + gatewayServiceNamespace string + originSecrets map[string]*corev1.Secret + expected []*istiov1alpha3.Server + wantErr bool + }{{ + name: "secret namespace is the different from the gateway service namespace", + ci: &ingressResource, + // gateway service namespace is "istio-system", while the secret namespace is system.Namespace()("knative-testing"). + gatewayServiceNamespace: "istio-system", + originSecrets: originSecrets, + expected: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + CredentialName: targetSecret(&secret, &ingressResource), + }, + }}, + }, { + name: "secret namespace is the same as the gateway service namespace", + ci: &ingressResource, + // gateway service namespace and the secret namespace are both in system.Namespace(). + gatewayServiceNamespace: system.Namespace(), + originSecrets: originSecrets, + expected: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + CredentialName: "secret0", + }, + }}, + }, { + name: "port name is created with ingress namespace-name", + ci: &ingressResource, + gatewayServiceNamespace: system.Namespace(), + originSecrets: originSecrets, + expected: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + // port name is created with / + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + CredentialName: "secret0", + }, + }}, + }, { + name: "error to make servers because of incorrect originSecrets", + ci: &ingressResource, + gatewayServiceNamespace: "istio-system", + originSecrets: map[string]*corev1.Secret{}, + wantErr: true, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + servers, err := MakeTLSServers(c.ci, c.gatewayServiceNamespace, c.originSecrets) + if (err != nil) != c.wantErr { + t.Fatalf("Test: %s; MakeServers error = %v, WantErr %v", c.name, err, c.wantErr) + } + if diff := cmp.Diff(c.expected, servers); diff != "" { + t.Errorf("Unexpected servers (-want, +got): %v", diff) + } + }) + } +} + +func TestMakeHTTPServer(t *testing.T) { + cases := []struct { + name string + httpProtocol network.HTTPProtocol + expected *istiov1alpha3.Server + }{{ + name: "nil HTTP Server", + httpProtocol: network.HTTPDisabled, + expected: nil, + }, { + name: "HTTP server", + httpProtocol: network.HTTPEnabled, + expected: &istiov1alpha3.Server{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, + }, + }, { + name: "Redirect HTTP server", + httpProtocol: network.HTTPRedirected, + expected: &istiov1alpha3.Server{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + HttpsRedirect: true, + }, + }, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got := MakeHTTPServer(c.httpProtocol, []string{"*"}) + if diff := cmp.Diff(c.expected, got); diff != "" { + t.Errorf("Unexpected HTTP Server (-want, +got): %v", diff) + } + }) + } +} + +func TestUpdateGateway(t *testing.T) { + cases := []struct { + name string + existingServers []*istiov1alpha3.Server + newServers []*istiov1alpha3.Server + original v1alpha3.Gateway + expected v1alpha3.Gateway + }{{ + name: "Update Gateway servers.", + existingServers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + newServers: []*istiov1alpha3.Server{{ + Hosts: []string{"host-new.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + original: gateway, + expected: v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + // The host name was updated to the one in "newServers". + Hosts: []string{"host-new.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }, { + Hosts: []string{"host2.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/non-ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + }, + }, + }, { + name: "Delete servers from Gateway", + existingServers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + newServers: []*istiov1alpha3.Server{}, + original: gateway, + expected: v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + // Only one server is left. The other one is deleted. + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"host2.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/non-ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + }, + }, + }, { + name: "Delete servers from Gateway and no real servers are left", + + // All of the servers in the original gateway will be deleted. + existingServers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }, { + Hosts: []string{"host2.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/non-ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + newServers: []*istiov1alpha3.Server{}, + original: gateway, + expected: gatewayWithPlaceholderServer, + }, { + name: "Add servers to the gateway with only placeholder server", + existingServers: []*istiov1alpha3.Server{}, + newServers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + original: gatewayWithPlaceholderServer, + // The placeholder server should be deleted. + expected: v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + }, + }, + }, { + name: "Delete wildcard servers from gateway", + existingServers: []*istiov1alpha3.Server{}, + newServers: servers, + original: gatewayWithDefaultWildcardTLSServer, + // The wildcard server should be deleted. + expected: gateway, + }, { + name: "Do not delete modified wildcard servers from gateway", + existingServers: []*istiov1alpha3.Server{}, + newServers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "clusteringress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }}, + original: gatewayWithModifiedWildcardTLSServer, + expected: v1alpha3.Gateway{ + Spec: istiov1alpha3.Gateway{ + Servers: []*istiov1alpha3.Server{ + { + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "clusteringress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + }, + }, + &modifiedDefaultTLSServer, + }, + }, + }, + }} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + g := UpdateGateway(&c.original, c.newServers, c.existingServers) + if diff := cmp.Diff(&c.expected, g); diff != "" { + t.Errorf("Unexpected gateway (-want, +got): %v", diff) + } + }) + } +} + +func TestMakeIngressGateways(t *testing.T) { + cases := []struct { + name string + ia *v1alpha1.Ingress + originSecrets map[string]*corev1.Secret + gatewayService *corev1.Service + want []*v1alpha3.Gateway + wantErr bool + }{{ + name: "happy path: secret namespace is the different from the gateway service namespace", + ia: &ingressResource, + originSecrets: originSecrets, + gatewayService: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "istio-ingressgateway", + Namespace: "istio-system", + }, + Spec: corev1.ServiceSpec{ + Selector: selector, + }, + }, + want: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ingress-%d", adler32.Checksum([]byte("istio-system/istio-ingressgateway"))), + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(&ingressResource)}, + Labels: map[string]string{ + networking.IngressLabelKey: "ingress", + }, + }, + Spec: istiov1alpha3.Gateway{ + Selector: selector, + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + CredentialName: targetSecret(&secret, &ingressResource), + }, + }, { + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, + }}, + }, + }}, + }, { + name: "happy path: secret namespace is the same as the gateway service namespace", + ia: &ingressResource, + originSecrets: originSecrets, + // The namespace of gateway service is the same as the secrets. + gatewayService: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "istio-ingressgateway", + Namespace: system.Namespace(), + }, + Spec: corev1.ServiceSpec{ + Selector: selector, + }, + }, + want: []*v1alpha3.Gateway{{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ingress-%d", adler32.Checksum([]byte(system.Namespace()+"/istio-ingressgateway"))), + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(&ingressResource)}, + Labels: map[string]string{ + networking.IngressLabelKey: "ingress", + }, + }, + Spec: istiov1alpha3.Gateway{ + Selector: selector, + Servers: []*istiov1alpha3.Server{{ + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: "test-ns/ingress:0", + Number: 443, + Protocol: "HTTPS", + }, + Tls: &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + ServerCertificate: corev1.TLSCertKey, + PrivateKey: corev1.TLSPrivateKeyKey, + CredentialName: secret.Name, + }, + }, { + Hosts: []string{"host1.example.com"}, + Port: &istiov1alpha3.Port{ + Name: httpServerPortName, + Number: 80, + Protocol: "HTTP", + }, + }}, + }, + }}, + }, { + name: "error to make gateway because of incorrect originSecrets", + ia: &ingressResource, + originSecrets: map[string]*corev1.Secret{}, + gatewayService: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "istio-ingressgateway", + Namespace: "istio-system", + }, + Spec: corev1.ServiceSpec{ + Selector: selector, + }, + }, + wantErr: true, + }} + + for _, c := range cases { + ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) + defer cancel() + svcLister := serviceLister(ctx, c.gatewayService) + ctx = config.ToContext(context.Background(), &config.Config{ + Istio: &config.Istio{ + IngressGateways: []config.Gateway{{ + Name: networking.KnativeIngressGateway, + ServiceURL: fmt.Sprintf("%s.%s.svc.cluster.local", c.gatewayService.Name, c.gatewayService.Namespace), + }}, + }, + Network: &network.Config{ + HTTPProtocol: network.HTTPEnabled, + }, + }) + t.Run(c.name, func(t *testing.T) { + got, err := MakeIngressGateways(ctx, c.ia, c.originSecrets, svcLister) + if (err != nil) != c.wantErr { + t.Fatalf("Test: %s; MakeIngressGateways error = %v, WantErr %v", c.name, err, c.wantErr) + } + if diff := cmp.Diff(c.want, got); diff != "" { + t.Errorf("Unexpected Gateways (-want, +got): %v", diff) + } + }) + } +} + +func serviceLister(ctx context.Context, svcs ...*corev1.Service) corev1listers.ServiceLister { + fake := fakekubeclient.Get(ctx) + informer := fakeserviceinformer.Get(ctx) + + for _, svc := range svcs { + fake.CoreV1().Services(svc.Namespace).Create(svc) + informer.Informer().GetIndexer().Add(svc) + } + + return informer.Lister() +} + +func TestGatewayName(t *testing.T) { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gateway", + Namespace: "istio-system", + }, + } + ingress := &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress", + Namespace: "default", + }, + } + + want := fmt.Sprintf("ingress-%d", adler32.Checksum([]byte("istio-system/gateway"))) + got := GatewayName(ingress, svc) + if got != want { + t.Errorf("Unexpected gateway name. want %q, got %q", want, got) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/doc.go new file mode 100644 index 0000000000..879db4f663 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names holds simple functions for synthesizing resource names. +package names diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names.go new file mode 100644 index 0000000000..336270d40b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "knative.dev/pkg/kmeta" +) + +// IngressVirtualService returns the name of the VirtualService child +// resource for given Ingress that programs traffic for Ingress +// Gateways. +func IngressVirtualService(i kmeta.Accessor) string { + return kmeta.ChildName(i.GetName(), "") +} + +// MeshVirtualService returns the name of the VirtualService child +// resource for given Ingress that programs traffic for Service +// Mesh. +func MeshVirtualService(i kmeta.Accessor) string { + return kmeta.ChildName(i.GetName(), "-mesh") +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names_test.go new file mode 100644 index 0000000000..b7db84588b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/names/names_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +func TestNamer(t *testing.T) { + tests := []struct { + name string + ingress *v1alpha1.Ingress + f func(kmeta.Accessor) string + want string + }{{ + name: "IngressVirtualService", + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + f: IngressVirtualService, + want: "foo", + }, { + name: "IngressVirtualService", + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns1", + }, + }, + f: IngressVirtualService, + want: "foo", + }, { + name: "MeshVirtualService", + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + f: MeshVirtualService, + want: "foo-mesh", + }, { + name: "MeshVirtualService", + ingress: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "ns2", + }, + }, + f: MeshVirtualService, + want: "foo-mesh", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.f(test.ingress) + if got != test.want { + t.Errorf("%s() = %v, wanted %v", test.name, got, test.want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret.go new file mode 100644 index 0000000000..4f111241db --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// GetSecrets gets the all of the secrets referenced by the given Ingress, and +// returns a map whose key is the a secret namespace/name key and value is pointer of the secret. +func GetSecrets(ing *v1alpha1.Ingress, secretLister corev1listers.SecretLister) (map[string]*corev1.Secret, error) { + secrets := map[string]*corev1.Secret{} + for _, tls := range ing.Spec.TLS { + ref := secretKey(tls) + if _, ok := secrets[ref]; ok { + continue + } + secret, err := secretLister.Secrets(tls.SecretNamespace).Get(tls.SecretName) + if err != nil { + return nil, err + } + secrets[ref] = secret + } + return secrets, nil +} + +// MakeSecrets makes copies of the origin Secrets under the namespace of Istio gateway service. +func MakeSecrets(ctx context.Context, originSecrets map[string]*corev1.Secret, accessor kmeta.OwnerRefableAccessor) ([]*corev1.Secret, error) { + nameNamespaces, err := getIngressGatewaySvcNameNamespaces(ctx) + if err != nil { + return nil, err + } + secrets := []*corev1.Secret{} + for _, originSecret := range originSecrets { + for _, meta := range nameNamespaces { + if meta.Namespace == originSecret.Namespace { + // no need to copy secret when the target namespace is the same + // as the origin namespace + continue + } + secrets = append(secrets, makeSecret(originSecret, meta.Namespace, accessor)) + } + } + return secrets, nil +} + +func makeSecret(originSecret *corev1.Secret, targetNamespace string, accessor kmeta.OwnerRefableAccessor) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: targetSecret(originSecret, accessor), + Namespace: targetNamespace, + Labels: map[string]string{ + networking.OriginSecretNameLabelKey: originSecret.Name, + networking.OriginSecretNamespaceLabelKey: originSecret.Namespace, + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(accessor)}, + }, + Data: originSecret.Data, + Type: originSecret.Type, + } +} + +// targetSecret returns the name of the Secret that is copied from the origin Secret. +func targetSecret(originSecret *corev1.Secret, accessor kmeta.OwnerRefable) string { + return fmt.Sprintf("%s-%s", accessor.GetObjectMeta().GetName(), originSecret.UID) +} + +// SecretRef returns the ObjectReference of a secret given the namespace and name of the secret. +func SecretRef(namespace, name string) corev1.ObjectReference { + gvk := corev1.SchemeGroupVersion.WithKind("Secret") + apiVersion, kind := gvk.ToAPIVersionAndKind() + return corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Namespace: namespace, + Name: name, + } +} + +// Generates the k8s secret key with the given TLS. +func secretKey(tls v1alpha1.IngressTLS) string { + return fmt.Sprintf("%s/%s", tls.SecretNamespace, tls.SecretName) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret_test.go new file mode 100644 index 0000000000..f2e53ecb7d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/secret_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "testing" + + "knative.dev/pkg/kmeta" + "knative.dev/pkg/system" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + fakek8s "k8s.io/client-go/kubernetes/fake" + . "knative.dev/pkg/logging/testing" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler/ingress/config" +) + +var testSecret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret0", + Namespace: "knative-serving", + }, + Data: map[string][]byte{ + "test": []byte("abcd"), + }, +} + +var ci = v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress", + Namespace: system.Namespace(), + }, + Spec: v1alpha1.IngressSpec{ + TLS: []v1alpha1.IngressTLS{{ + Hosts: []string{"example.com"}, + SecretName: "secret0", + SecretNamespace: "knative-serving", + }}, + }, +} + +func TestGetSecrets(t *testing.T) { + kubeClient := fakek8s.NewSimpleClientset() + secretClient := kubeinformers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Secrets() + createSecret := func(secret *corev1.Secret) { + kubeClient.CoreV1().Secrets(secret.Namespace).Create(secret) + secretClient.Informer().GetIndexer().Add(secret) + } + + cases := []struct { + name string + secret *corev1.Secret + ci *v1alpha1.Ingress + expected map[string]*corev1.Secret + wantErr bool + }{{ + name: "Get secrets successfully.", + secret: &testSecret, + ci: &ci, + expected: map[string]*corev1.Secret{ + "knative-serving/secret0": &testSecret, + }, + }, { + name: "Fail to get secrets", + secret: &corev1.Secret{}, + ci: &v1alpha1.Ingress{ + Spec: v1alpha1.IngressSpec{ + TLS: []v1alpha1.IngressTLS{{ + Hosts: []string{"example.com"}, + SecretName: "no-exist-secret", + SecretNamespace: "no-exist-namespace", + }}, + }, + }, + wantErr: true, + }} + for _, c := range cases { + createSecret(c.secret) + t.Run(c.name, func(t *testing.T) { + secrets, err := GetSecrets(c.ci, secretClient.Lister()) + if (err != nil) != c.wantErr { + t.Fatalf("Test: %s; GetSecrets error = %v, WantErr %v", c.name, err, c.wantErr) + } + if diff := cmp.Diff(c.expected, secrets); diff != "" { + t.Errorf("Unexpected secrets (-want, +got): %v", diff) + } + }) + } +} + +func TestMakeSecrets(t *testing.T) { + ctx := TestContextWithLogger(t) + ctx = config.ToContext(ctx, &config.Config{ + Istio: &config.Istio{ + IngressGateways: []config.Gateway{{ + Name: "test-gateway", + // The namespace of Istio gateway service is istio-system. + ServiceURL: "istio-ingressgateway.istio-system.svc.cluster.local", + }}, + }, + }) + + cases := []struct { + name string + originSecret *corev1.Secret + expected []*corev1.Secret + wantErr bool + }{{ + name: "target secret namespace (istio-system) is the same as the origin secret namespace (istio-system).", + originSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "istio-system", + UID: "1234", + }, + Data: map[string][]byte{ + "test-data": []byte("abcd"), + }}, + expected: []*corev1.Secret{}, + }, { + name: "target secret namespace (istio-system) is different from the origin secret namespace (knative-serving).", + originSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "knative-serving", + UID: "1234", + }, + Data: map[string][]byte{ + "test-data": []byte("abcd"), + }}, + expected: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + // Name is generated by TargetSecret function. + Name: "ingress-1234", + // Expected secret should be in istio-system which is + // the ns of Istio gateway service. + Namespace: "istio-system", + Labels: map[string]string{ + networking.OriginSecretNameLabelKey: "test-secret", + networking.OriginSecretNamespaceLabelKey: "knative-serving", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(&ci)}, + }, + Data: map[string][]byte{ + "test-data": []byte("abcd"), + }, + }}, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + originSecrets := map[string]*corev1.Secret{ + fmt.Sprintf("%s/%s", c.originSecret.Namespace, c.originSecret.Name): c.originSecret, + } + secrets, err := MakeSecrets(ctx, originSecrets, &ci) + if (err != nil) != c.wantErr { + t.Fatalf("Test: %q; MakeSecrets() error = %v, WantErr %v", c.name, err, c.wantErr) + } + if diff := cmp.Diff(c.expected, secrets); diff != "" { + t.Errorf("Unexpected secrets (-want, +got): %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service.go new file mode 100644 index 0000000000..64ee639bfa --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service.go @@ -0,0 +1,291 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/gogo/protobuf/types" + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/network" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/network/ingress" + "knative.dev/serving/pkg/reconciler/ingress/resources/names" + "knative.dev/serving/pkg/resources" +) + +var retriableConditions = strings.Join([]string{ + "5xx", + "connect-failure", + "refused-stream", + "cancelled", + "resource-exhausted", + "retriable-status-codes"}, ",") + +// VirtualServiceNamespace gives the namespace of the child +// VirtualServices for a given Ingress. +func VirtualServiceNamespace(ing *v1alpha1.Ingress) string { + if len(ing.GetNamespace()) == 0 { + return system.Namespace() + } + return ing.GetNamespace() +} + +// MakeIngressVirtualService creates Istio VirtualService as network +// programming for Istio Gateways other than 'mesh'. +func MakeIngressVirtualService(ing *v1alpha1.Ingress, gateways map[v1alpha1.IngressVisibility]sets.String) *v1alpha3.VirtualService { + vs := &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.IngressVirtualService(ing), + Namespace: VirtualServiceNamespace(ing), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing)}, + Annotations: ing.GetAnnotations(), + }, + Spec: *makeVirtualServiceSpec(ing, gateways, ingress.ExpandedHosts(getHosts(ing))), + } + + // Populate the Ingress labels. + vs.Labels = resources.FilterMap(ing.GetLabels(), func(k string) bool { + return k != serving.RouteLabelKey && k != serving.RouteNamespaceLabelKey + }) + vs.Labels[networking.IngressLabelKey] = ing.Name + return vs +} + +// MakeMeshVirtualService creates a mesh Virtual Service +func MakeMeshVirtualService(ing *v1alpha1.Ingress, gateways map[v1alpha1.IngressVisibility]sets.String) *v1alpha3.VirtualService { + hosts := keepLocalHostnames(getHosts(ing)) + // If cluster local gateway is configured, we need to expand hosts because of + // https://github.com/knative/serving/issues/6488#issuecomment-573513768. + if len(gateways[v1alpha1.IngressVisibilityClusterLocal]) != 0 { + hosts = ingress.ExpandedHosts(hosts) + } + if len(hosts) == 0 { + return nil + } + vs := &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.MeshVirtualService(ing), + Namespace: VirtualServiceNamespace(ing), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(ing)}, + Annotations: ing.GetAnnotations(), + }, + Spec: *makeVirtualServiceSpec(ing, map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString("mesh"), + v1alpha1.IngressVisibilityClusterLocal: sets.NewString("mesh"), + }, hosts), + } + // Populate the Ingress labels. + vs.Labels = resources.FilterMap(ing.GetLabels(), func(k string) bool { + return k != serving.RouteLabelKey && k != serving.RouteNamespaceLabelKey + }) + vs.Labels[networking.IngressLabelKey] = ing.Name + return vs +} + +// MakeVirtualServices creates a mesh virtualservice and a virtual service for each gateway +func MakeVirtualServices(ing *v1alpha1.Ingress, gateways map[v1alpha1.IngressVisibility]sets.String) ([]*v1alpha3.VirtualService, error) { + // Insert probe header + ing = ing.DeepCopy() + if _, err := ingress.InsertProbe(ing); err != nil { + return nil, fmt.Errorf("failed to insert a probe into the Ingress: %w", err) + } + vss := []*v1alpha3.VirtualService{} + if meshVs := MakeMeshVirtualService(ing, gateways); meshVs != nil { + vss = append(vss, meshVs) + } + requiredGatewayCount := 0 + if len(getPublicIngressRules(ing)) > 0 { + requiredGatewayCount += gateways[v1alpha1.IngressVisibilityExternalIP].Len() + } + + if len(getClusterLocalIngressRules(ing)) > 0 { + requiredGatewayCount += gateways[v1alpha1.IngressVisibilityClusterLocal].Len() + } + + if requiredGatewayCount > 0 { + vss = append(vss, MakeIngressVirtualService(ing, gateways)) + } + + return vss, nil +} + +func makeVirtualServiceSpec(ing *v1alpha1.Ingress, gateways map[v1alpha1.IngressVisibility]sets.String, hosts sets.String) *istiov1alpha3.VirtualService { + gw := sets.String{}.Union(gateways[v1alpha1.IngressVisibilityClusterLocal]).Union(gateways[v1alpha1.IngressVisibilityExternalIP]) + spec := istiov1alpha3.VirtualService{ + Gateways: gw.List(), + Hosts: hosts.List(), + } + + for _, rule := range ing.Spec.Rules { + for _, p := range rule.HTTP.Paths { + hosts := hosts.Intersection(sets.NewString(rule.Hosts...)) + if hosts.Len() != 0 { + spec.Http = append(spec.Http, makeVirtualServiceRoute(hosts, &p, gateways, rule.Visibility)) + } + } + } + return &spec +} + +func makeVirtualServiceRoute(hosts sets.String, http *v1alpha1.HTTPIngressPath, gateways map[v1alpha1.IngressVisibility]sets.String, visibility v1alpha1.IngressVisibility) *istiov1alpha3.HTTPRoute { + matches := []*istiov1alpha3.HTTPMatchRequest{} + clusterDomainName := network.GetClusterDomainName() + for _, host := range hosts.List() { + g := gateways[visibility] + if strings.HasSuffix(host, clusterDomainName) && len(gateways[v1alpha1.IngressVisibilityClusterLocal]) > 0 { + // For local hostname, always use private gateway + g = gateways[v1alpha1.IngressVisibilityClusterLocal] + } + matches = append(matches, makeMatch(host, http.Path, g)) + } + weights := []*istiov1alpha3.HTTPRouteDestination{} + for _, split := range http.Splits { + + var h *istiov1alpha3.Headers + + if len(split.AppendHeaders) > 0 { + h = &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: split.AppendHeaders, + }, + } + } + + weights = append(weights, &istiov1alpha3.HTTPRouteDestination{ + Destination: &istiov1alpha3.Destination{ + Host: network.GetServiceHostname( + split.ServiceName, split.ServiceNamespace), + Port: &istiov1alpha3.PortSelector{ + Number: uint32(split.ServicePort.IntValue()), + }, + }, + Weight: int32(split.Percent), + Headers: h, + }) + } + + var h *istiov1alpha3.Headers + if len(http.AppendHeaders) > 0 { + h = &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: http.AppendHeaders, + }, + } + } + + // TODO(https://github.com/knative/serving/issues/6367): Allow customization of this. + retryOn := retriableConditions + perTryTimeout := types.DurationProto(http.Retries.PerTryTimeout.Duration) + + // retry config must be empty when retry is disabled by setting attempt to 0. Otherwise, istio galley validation check fails. + if http.Retries.Attempts == 0 { + retryOn = "" + perTryTimeout = nil + } + + return &istiov1alpha3.HTTPRoute{ + Match: matches, + Route: weights, + Timeout: types.DurationProto(http.Timeout.Duration), + Retries: &istiov1alpha3.HTTPRetry{ + RetryOn: retryOn, + Attempts: int32(http.Retries.Attempts), + PerTryTimeout: perTryTimeout, + }, + Headers: h, + WebsocketUpgrade: true, + } +} + +func keepLocalHostnames(hosts sets.String) sets.String { + localSvcSuffix := ".svc." + network.GetClusterDomainName() + retained := sets.NewString() + for _, h := range hosts.List() { + if strings.HasSuffix(h, localSvcSuffix) { + retained.Insert(h) + } + } + return retained +} + +func makeMatch(host string, pathRegExp string, gateways sets.String) *istiov1alpha3.HTTPMatchRequest { + match := &istiov1alpha3.HTTPMatchRequest{ + Gateways: gateways.List(), + Authority: &istiov1alpha3.StringMatch{ + // Do not use Regex as Istio 1.4 or later has 100 bytes limitation. + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: hostPrefix(host)}, + }, + } + // Empty pathRegExp is considered match all path. We only need to + // consider pathRegExp when it's non-empty. + if pathRegExp != "" { + match.Uri = &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Regex{Regex: pathRegExp}, + } + } + return match +} + +// hostPrefix returns an host to match either host or host:. +// For clusterLocalHost, it trims .svc. from the host to match short host. +func hostPrefix(host string) string { + localDomainSuffix := ".svc." + network.GetClusterDomainName() + if !strings.HasSuffix(host, localDomainSuffix) { + return host + } + return strings.TrimSuffix(host, localDomainSuffix) +} + +func getHosts(ing *v1alpha1.Ingress) sets.String { + hosts := sets.NewString() + for _, rule := range ing.Spec.Rules { + hosts.Insert(rule.Hosts...) + } + return hosts +} + +func getClusterLocalIngressRules(i *v1alpha1.Ingress) []v1alpha1.IngressRule { + var result []v1alpha1.IngressRule + for _, rule := range i.Spec.Rules { + if rule.Visibility == v1alpha1.IngressVisibilityClusterLocal { + result = append(result, rule) + } + } + + return result +} + +func getPublicIngressRules(i *v1alpha1.Ingress) []v1alpha1.IngressRule { + var result []v1alpha1.IngressRule + for _, rule := range i.Spec.Rules { + if rule.Visibility == v1alpha1.IngressVisibilityExternalIP || rule.Visibility == "" { + result = append(result, rule) + } + } + + return result +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service_test.go new file mode 100644 index 0000000000..a6faa23158 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/ingress/resources/virtual_service_test.go @@ -0,0 +1,754 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + "time" + + "github.com/gogo/protobuf/types" + "github.com/google/go-cmp/cmp" + istiov1alpha3 "istio.io/api/networking/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" + apiconfig "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" +) + +var ( + defaultMaxRevisionTimeout = time.Duration(apiconfig.DefaultMaxRevisionTimeoutSeconds) * time.Second + defaultGateways = makeGatewayMap([]string{"gateway"}, []string{"private-gateway"}) + defaultIngress = v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + }, + Spec: v1alpha1.IngressSpec{Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}}, + } +) + +func TestMakeVirtualServices_CorrectMetadata(t *testing.T) { + for _, tc := range []struct { + name string + gateways map[v1alpha1.IngressVisibility]sets.String + ci *v1alpha1.Ingress + expected []metav1.ObjectMeta + }{{ + name: "mesh and ingress", + gateways: makeGatewayMap([]string{"gateway"}, []string{"private-gateway"}), + ci: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + networking.IngressLabelKey: "test-ingress", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}}, + }, + expected: []metav1.ObjectMeta{{ + Name: "test-ingress-mesh", + Namespace: system.Namespace(), + Labels: map[string]string{ + networking.IngressLabelKey: "test-ingress", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, { + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + networking.IngressLabelKey: "test-ingress", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }}, + }, { + name: "ingress only", + gateways: makeGatewayMap([]string{"gateway"}, []string{"private-gateway"}), + ci: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.example.com", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}}, + }, + expected: []metav1.ObjectMeta{{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + networking.IngressLabelKey: "test-ingress", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }}, + }, { + name: "mesh only", + gateways: nil, + ci: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}}, + }, + expected: []metav1.ObjectMeta{{ + Name: "test-ingress-mesh", + Namespace: system.Namespace(), + Labels: map[string]string{ + networking.IngressLabelKey: "test-ingress", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }}, + }, { + name: "mesh only with namespace", + gateways: nil, + ci: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "test-ns", + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}}, + }, + expected: []metav1.ObjectMeta{{ + Name: "test-ingress-mesh", + Namespace: "test-ns", + Labels: map[string]string{ + networking.IngressLabelKey: "test-ingress", + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }}, + }} { + t.Run(tc.name, func(t *testing.T) { + vss, err := MakeVirtualServices(tc.ci, tc.gateways) + if err != nil { + t.Fatalf("MakeVirtualServices failed: %v", err) + } + if len(vss) != len(tc.expected) { + t.Fatalf("Expected %d VirtualService, saw %d", len(tc.expected), len(vss)) + } + for i := range tc.expected { + tc.expected[i].OwnerReferences = []metav1.OwnerReference{*kmeta.NewControllerRef(tc.ci)} + if diff := cmp.Diff(tc.expected[i], vss[i].ObjectMeta); diff != "" { + t.Errorf("Unexpected metadata (-want +got): %v", diff) + } + } + }) + } +} + +func TestMakeMeshVirtualServiceSpec_CorrectGateways(t *testing.T) { + ci := &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{}, + }}}, + } + expected := []string{"mesh"} + gateways := MakeMeshVirtualService(ci, defaultGateways).Spec.Gateways + if diff := cmp.Diff(expected, gateways); diff != "" { + t.Errorf("Unexpected gateways (-want +got): %v", diff) + } +} + +func TestMakeMeshVirtualServiceSpecCorrectHosts(t *testing.T) { + for _, tc := range []struct { + name string + gateways map[v1alpha1.IngressVisibility]sets.String + expectedHosts sets.String + }{{ + name: "with cluster local gateway: expanding hosts", + gateways: map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityClusterLocal: sets.NewString("cluster-local"), + }, + expectedHosts: sets.NewString( + "test-route.test-ns.svc.cluster.local", + "test-route.test-ns.svc", + "test-route.test-ns", + ), + }, { + name: "with mesh: no exapnding hosts", + gateways: map[v1alpha1.IngressVisibility]sets.String{}, + expectedHosts: sets.NewString("test-route.test-ns.svc.cluster.local"), + }} { + t.Run(tc.name, func(t *testing.T) { + vs := MakeMeshVirtualService(&defaultIngress, tc.gateways) + vsHosts := sets.NewString(vs.Spec.Hosts...) + if !vsHosts.Equal(tc.expectedHosts) { + t.Errorf("Unexpected hosts want %v; got %v", tc.expectedHosts, vsHosts) + } + }) + } + +} + +func TestMakeMeshVirtualServiceSpec_CorrectRetries(t *testing.T) { + for _, tc := range []struct { + name string + ci *v1alpha1.Ingress + expected *istiov1alpha3.HTTPRetry + }{{ + name: "default retries", + ci: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + }}, + }, + }}}, + }, + expected: &istiov1alpha3.HTTPRetry{ + RetryOn: retriableConditions, + Attempts: int32(networking.DefaultRetryCount), + PerTryTimeout: types.DurationProto(defaultMaxRevisionTimeout), + }, + }, { + name: "disabling retries", + ci: &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: 0, + }, + }}, + }, + }}}, + }, + expected: &istiov1alpha3.HTTPRetry{ + RetryOn: "", + Attempts: int32(0), + PerTryTimeout: nil, + }, + }} { + t.Run(tc.name, func(t *testing.T) { + for _, h := range MakeMeshVirtualService(tc.ci, defaultGateways).Spec.Http { + if diff := cmp.Diff(tc.expected, h.Retries); diff != "" { + t.Errorf("Unexpected retries (-want +got): %v", diff) + } + } + }) + } +} + +func TestMakeMeshVirtualServiceSpec_CorrectRoutes(t *testing.T) { + ci := &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Path: "^/pets/(.*?)?", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "v2-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "ugh": "blah", + }, + }}, + AppendHeaders: map[string]string{ + "foo": "bar", + }, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + }}, + }, + }, { + Hosts: []string{ + "v1.domain.com", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Path: "^/pets/(.*?)?", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "v1-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + }}, + AppendHeaders: map[string]string{ + "foo": "baz", + }, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + }}, + }, + }}, + }, + } + expected := []*istiov1alpha3.HTTPRoute{{ + Match: []*istiov1alpha3.HTTPMatchRequest{{ + Uri: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Regex{Regex: "^/pets/(.*?)?"}, + }, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `test-route.test-ns`}, + }, + Gateways: []string{"mesh"}, + }}, + Route: []*istiov1alpha3.HTTPRouteDestination{{ + Destination: &istiov1alpha3.Destination{ + Host: "v2-service.test-ns.svc.cluster.local", + Port: &istiov1alpha3.PortSelector{Number: 80}, + }, + Weight: 100, + Headers: &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: map[string]string{ + "ugh": "blah", + }, + }, + }, + }}, + Headers: &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: map[string]string{ + "foo": "bar", + }, + }, + }, + Timeout: types.DurationProto(defaultMaxRevisionTimeout), + Retries: &istiov1alpha3.HTTPRetry{ + RetryOn: retriableConditions, + Attempts: int32(networking.DefaultRetryCount), + PerTryTimeout: types.DurationProto(defaultMaxRevisionTimeout), + }, + WebsocketUpgrade: true, + }} + + routes := MakeMeshVirtualService(ci, defaultGateways).Spec.Http + if diff := cmp.Diff(expected, routes); diff != "" { + t.Errorf("Unexpected routes (-want +got): %v", diff) + } +} + +func TestMakeIngressVirtualServiceSpec_CorrectGateways(t *testing.T) { + ci := &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: "test-ns", + }, + }, + Spec: v1alpha1.IngressSpec{}, + } + expected := []string{"knative-testing/gateway-one", "knative-testing/gateway-two"} + gateways := MakeIngressVirtualService(ci, makeGatewayMap([]string{"knative-testing/gateway-one", "knative-testing/gateway-two"}, nil)).Spec.Gateways + if diff := cmp.Diff(expected, gateways); diff != "" { + t.Errorf("Unexpected gateways (-want +got): %v", diff) + } +} + +func TestMakeIngressVirtualServiceSpec_CorrectRoutes(t *testing.T) { + ci := &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: system.Namespace(), + }, + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{ + "domain.com", + "test-route.test-ns.svc.cluster.local", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Path: "^/pets/(.*?)?", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "v2-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "ugh": "blah", + }, + }}, + AppendHeaders: map[string]string{ + "foo": "bar", + }, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + }}, + }, + Visibility: v1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "v1.domain.com", + }, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Path: "^/pets/(.*?)?", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "v1-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + }}, + AppendHeaders: map[string]string{ + "foo": "baz", + }, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + }}, + }, + }}, + }, + } + + expected := []*istiov1alpha3.HTTPRoute{{ + Match: []*istiov1alpha3.HTTPMatchRequest{{ + Uri: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Regex{Regex: "^/pets/(.*?)?"}, + }, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `domain.com`}, + }, + Gateways: []string{"gateway.public"}, + }, { + Uri: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Regex{Regex: "^/pets/(.*?)?"}, + }, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `test-route.test-ns`}, + }, + Gateways: []string{"gateway.private"}, + }}, + Route: []*istiov1alpha3.HTTPRouteDestination{{ + Destination: &istiov1alpha3.Destination{ + Host: "v2-service.test-ns.svc.cluster.local", + Port: &istiov1alpha3.PortSelector{Number: 80}, + }, + Weight: 100, + Headers: &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: map[string]string{ + "ugh": "blah", + }, + }, + }, + }}, + Headers: &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: map[string]string{ + "foo": "bar", + }, + }, + }, + Timeout: types.DurationProto(defaultMaxRevisionTimeout), + Retries: &istiov1alpha3.HTTPRetry{ + RetryOn: retriableConditions, + Attempts: int32(networking.DefaultRetryCount), + PerTryTimeout: types.DurationProto(defaultMaxRevisionTimeout), + }, + WebsocketUpgrade: true, + }, { + Match: []*istiov1alpha3.HTTPMatchRequest{{ + Uri: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Regex{Regex: "^/pets/(.*?)?"}, + }, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `v1.domain.com`}, + }, + Gateways: []string{}, + }}, + Route: []*istiov1alpha3.HTTPRouteDestination{{ + Destination: &istiov1alpha3.Destination{ + Host: "v1-service.test-ns.svc.cluster.local", + Port: &istiov1alpha3.PortSelector{Number: 80}, + }, + Weight: 100, + }}, + Headers: &istiov1alpha3.Headers{ + Request: &istiov1alpha3.Headers_HeaderOperations{ + Set: map[string]string{ + "foo": "baz", + }, + }, + }, + Timeout: types.DurationProto(defaultMaxRevisionTimeout), + Retries: &istiov1alpha3.HTTPRetry{ + RetryOn: retriableConditions, + Attempts: int32(networking.DefaultRetryCount), + PerTryTimeout: types.DurationProto(defaultMaxRevisionTimeout), + }, + WebsocketUpgrade: true, + }} + + routes := MakeIngressVirtualService(ci, makeGatewayMap([]string{"gateway.public"}, []string{"gateway.private"})).Spec.Http + if diff := cmp.Diff(expected, routes); diff != "" { + t.Errorf("Unexpected routes (-want +got): %v", diff) + } +} + +// One active target. +func TestMakeVirtualServiceRoute_Vanilla(t *testing.T) { + ingressPath := &v1alpha1.HTTPIngressPath{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + + ServiceNamespace: "test-ns", + ServiceName: "revision-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + }}, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + } + route := makeVirtualServiceRoute(sets.NewString("a.com", "b.org"), ingressPath, makeGatewayMap([]string{"gateway-1"}, nil), v1alpha1.IngressVisibilityExternalIP) + expected := &istiov1alpha3.HTTPRoute{ + Match: []*istiov1alpha3.HTTPMatchRequest{{ + Gateways: []string{"gateway-1"}, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `a.com`}, + }, + }, { + Gateways: []string{"gateway-1"}, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `b.org`}, + }, + }}, + Route: []*istiov1alpha3.HTTPRouteDestination{{ + Destination: &istiov1alpha3.Destination{ + Host: "revision-service.test-ns.svc.cluster.local", + Port: &istiov1alpha3.PortSelector{Number: 80}, + }, + Weight: 100, + }}, + Timeout: types.DurationProto(defaultMaxRevisionTimeout), + Retries: &istiov1alpha3.HTTPRetry{ + RetryOn: retriableConditions, + Attempts: int32(networking.DefaultRetryCount), + PerTryTimeout: types.DurationProto(defaultMaxRevisionTimeout), + }, + WebsocketUpgrade: true, + } + if diff := cmp.Diff(expected, route); diff != "" { + t.Errorf("Unexpected route (-want +got): %v", diff) + } +} + +// Two active targets. +func TestMakeVirtualServiceRoute_TwoTargets(t *testing.T) { + ingressPath := &v1alpha1.HTTPIngressPath{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "revision-service", + ServicePort: intstr.FromInt(80), + }, + Percent: 90, + }, { + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "new-revision-service", + ServicePort: intstr.FromInt(81), + }, + Percent: 10, + }}, + Timeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Retries: &v1alpha1.HTTPRetry{ + PerTryTimeout: &metav1.Duration{Duration: defaultMaxRevisionTimeout}, + Attempts: networking.DefaultRetryCount, + }, + } + route := makeVirtualServiceRoute(sets.NewString("test.org"), ingressPath, makeGatewayMap([]string{"knative-testing/gateway-1"}, nil), v1alpha1.IngressVisibilityExternalIP) + expected := &istiov1alpha3.HTTPRoute{ + Match: []*istiov1alpha3.HTTPMatchRequest{{ + Gateways: []string{"knative-testing/gateway-1"}, + Authority: &istiov1alpha3.StringMatch{ + MatchType: &istiov1alpha3.StringMatch_Prefix{Prefix: `test.org`}, + }, + }}, + Route: []*istiov1alpha3.HTTPRouteDestination{{ + Destination: &istiov1alpha3.Destination{ + Host: "revision-service.test-ns.svc.cluster.local", + Port: &istiov1alpha3.PortSelector{Number: 80}, + }, + Weight: 90, + }, { + Destination: &istiov1alpha3.Destination{ + Host: "new-revision-service.test-ns.svc.cluster.local", + Port: &istiov1alpha3.PortSelector{Number: 81}, + }, + Weight: 10, + }}, + Timeout: types.DurationProto(defaultMaxRevisionTimeout), + Retries: &istiov1alpha3.HTTPRetry{ + RetryOn: retriableConditions, + Attempts: int32(networking.DefaultRetryCount), + PerTryTimeout: types.DurationProto(defaultMaxRevisionTimeout), + }, + WebsocketUpgrade: true, + } + if diff := cmp.Diff(expected, route); diff != "" { + t.Errorf("Unexpected route (-want +got): %v", diff) + } +} + +func TestGetHosts_Duplicate(t *testing.T) { + ci := &v1alpha1.Ingress{ + Spec: v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{"test-route1", "test-route2"}, + }, { + Hosts: []string{"test-route1", "test-route3"}, + }}, + }, + } + hosts := getHosts(ci) + expected := sets.NewString("test-route1", "test-route2", "test-route3") + if diff := cmp.Diff(expected, hosts); diff != "" { + t.Errorf("Unexpected hosts (-want +got): %v", diff) + } +} + +func makeGatewayMap(publicGateways []string, privateGateways []string) map[v1alpha1.IngressVisibility]sets.String { + return map[v1alpha1.IngressVisibility]sets.String{ + v1alpha1.IngressVisibilityExternalIP: sets.NewString(publicGateways...), + v1alpha1.IngressVisibilityClusterLocal: sets.NewString(privateGateways...), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/labeler/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/controller.go new file mode 100644 index 0000000000..c48c4068c6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/controller.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labeler + +import ( + "context" + + configurationinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + routeinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/serving/pkg/reconciler" +) + +const ( + controllerAgentName = "labeler-controller" +) + +// NewController wraps a new instance of the labeler that labels +// Configurations with Routes in a controller. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + + routeInformer := routeinformer.Get(ctx) + configInformer := configurationinformer.Get(ctx) + revisionInformer := revisioninformer.Get(ctx) + + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + routeLister: routeInformer.Lister(), + configurationLister: configInformer.Lister(), + revisionLister: revisionInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, "Labels") + + c.Logger.Info("Setting up event handlers") + routeInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_validation.go b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/doc.go similarity index 67% rename from test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_validation.go rename to test/vendor/knative.dev/serving/pkg/reconciler/labeler/doc.go index 49dd1dbdcc..1192cf12dc 100644 --- a/test/vendor/github.com/knative/serving/pkg/apis/networking/v1alpha1/clusteringress_validation.go +++ b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/doc.go @@ -14,15 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 - -import ( - "context" - - "github.com/knative/pkg/apis" -) - -// Validate inspects and validates ClusterIngress object. -func (ci *ClusterIngress) Validate(ctx context.Context) *apis.FieldError { - return ci.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec") -} +// Package labeler holds the logic that applies Route labels to +// Configurations to implement knative/serving#226. We run this +// as a separate reconciliation because we may choose to relax the +// 1:N relationship between Route:Configuration in the future. +package labeler diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go new file mode 100644 index 0000000000..12879425d1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labeler + +import ( + "context" + + "go.uber.org/zap" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" +) + +// Reconciler implements controller.Reconciler for Route resources. +type Reconciler struct { + *reconciler.Base + + // Listers index properties about resources + routeLister listers.RouteLister + configurationLister listers.ConfigurationLister + revisionLister listers.RevisionLister +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. In this case, it attempts to label all Configurations +// with the Routes that direct traffic to their Revisions. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + // Get the Route resource with this namespace/name + route, err := c.routeLister.Routes(namespace).Get(name) + if apierrs.IsNotFound(err) { + logger.Infof("Clearing labels for deleted Route: %q", key) + return c.clearLabels(ctx, namespace, name) + } else if err != nil { + return err + } + + logger.Infof("Time to sync the labels: %#v", route) + return c.syncLabels(ctx, route) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler_test.go new file mode 100644 index 0000000000..53c2ee27d1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler_test.go @@ -0,0 +1,288 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labeler + +import ( + "context" + "fmt" + "testing" + + // Inject the fake informers that this controller needs. + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + // Make sure Reconcile handles bad keys. + Key: "too/many/parts", + }, { + Name: "key not found", + // Make sure Reconcile handles good keys that don't exist. + Key: "foo/not-found", + }, { + Name: "label runLatest configuration", + Objects: []runtime.Object{ + simpleRunLatest("default", "first-reconcile", "the-config"), + simpleConfig("default", "the-config"), + rev("default", "the-config"), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddLabel("default", rev("default", "the-config").Name, + "serving.knative.dev/route", "first-reconcile"), + patchAddLabel("default", "the-config", "serving.knative.dev/route", "first-reconcile"), + }, + Key: "default/first-reconcile", + }, { + Name: "steady state", + Objects: []runtime.Object{ + simpleRunLatest("default", "steady-state", "the-config"), + simpleConfig("default", "the-config", + WithConfigLabel("serving.knative.dev/route", "steady-state")), + rev("default", "the-config", + WithRevisionLabel("serving.knative.dev/route", "steady-state")), + }, + Key: "default/steady-state", + }, { + Name: "failure adding label (revision)", + // Induce a failure during patching + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("patch", "revisions"), + }, + Objects: []runtime.Object{ + simpleRunLatest("default", "add-label-failure", "the-config"), + simpleConfig("default", "the-config"), + rev("default", "the-config"), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddLabel("default", rev("default", "the-config").Name, + "serving.knative.dev/route", "add-label-failure"), + }, + Key: "default/add-label-failure", + }, { + Name: "failure adding label (configuration)", + // Induce a failure during patching + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("patch", "configurations"), + }, + Objects: []runtime.Object{ + simpleRunLatest("default", "add-label-failure", "the-config"), + simpleConfig("default", "the-config"), + rev("default", "the-config", + WithRevisionLabel("serving.knative.dev/route", "add-label-failure")), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchAddLabel("default", "the-config", "serving.knative.dev/route", "add-label-failure"), + }, + Key: "default/add-label-failure", + }, { + Name: "label config with incorrect label", + WantErr: true, + Objects: []runtime.Object{ + simpleRunLatest("default", "the-route", "the-config"), + simpleConfig("default", "the-config", + WithConfigLabel("serving.knative.dev/route", "another-route")), + rev("default", "the-config", + WithRevisionLabel("serving.knative.dev/route", "another-route")), + }, + Key: "default/the-route", + }, { + Name: "change configurations", + Objects: []runtime.Object{ + simpleRunLatest("default", "config-change", "new-config"), + simpleConfig("default", "old-config", + WithConfigLabel("serving.knative.dev/route", "config-change")), + rev("default", "old-config", + WithRevisionLabel("serving.knative.dev/route", "config-change")), + simpleConfig("default", "new-config"), + rev("default", "new-config"), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchRemoveLabel("default", rev("default", "old-config").Name, + "serving.knative.dev/route"), + patchAddLabel("default", rev("default", "new-config").Name, + "serving.knative.dev/route", "config-change"), + patchRemoveLabel("default", "old-config", "serving.knative.dev/route"), + patchAddLabel("default", "new-config", "serving.knative.dev/route", "config-change"), + }, + Key: "default/config-change", + }, { + Name: "delete route", + Objects: []runtime.Object{ + simpleConfig("default", "the-config", + WithConfigLabel("serving.knative.dev/route", "delete-route")), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchRemoveLabel("default", "the-config", "serving.knative.dev/route"), + }, + Key: "default/delete-route", + }, { + Name: "failure while removing a cfg annotation should return an error", + // Induce a failure during patching + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("patch", "configurations"), + }, + Objects: []runtime.Object{ + simpleRunLatest("default", "delete-label-failure", "new-config"), + simpleConfig("default", "old-config", + WithConfigLabel("serving.knative.dev/route", "delete-label-failure")), + simpleConfig("default", "new-config", + WithConfigLabel("serving.knative.dev/route", "delete-label-failure")), + rev("default", "new-config", + WithRevisionLabel("serving.knative.dev/route", "delete-label-failure")), + rev("default", "old-config"), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchRemoveLabel("default", "old-config", "serving.knative.dev/route"), + }, + Key: "default/delete-label-failure", + }, { + Name: "failure while removing a rev annotation should return an error", + // Induce a failure during patching + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("patch", "revisions"), + }, + Objects: []runtime.Object{ + simpleRunLatest("default", "delete-label-failure", "new-config"), + simpleConfig("default", "old-config", + WithConfigLabel("serving.knative.dev/route", "delete-label-failure")), + simpleConfig("default", "new-config", + WithConfigLabel("serving.knative.dev/route", "delete-label-failure")), + rev("default", "new-config", + WithRevisionLabel("serving.knative.dev/route", "delete-label-failure")), + rev("default", "old-config", + WithRevisionLabel("serving.knative.dev/route", "delete-label-failure")), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchRemoveLabel("default", rev("default", "old-config").Name, + "serving.knative.dev/route"), + }, + Key: "default/delete-label-failure", + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + routeLister: listers.GetRouteLister(), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + } + })) +} + +func routeWithTraffic(namespace, name string, traffic v1alpha1.TrafficTarget) *v1alpha1.Route { + return Route(namespace, name, WithStatusTraffic(traffic)) +} + +func simpleRunLatest(namespace, name, config string) *v1alpha1.Route { + return routeWithTraffic(namespace, name, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: config + "-dbnfd", + Percent: ptr.Int64(100), + }, + }) +} + +func simpleConfig(namespace, name string, opts ...ConfigOption) *v1alpha1.Configuration { + cfg := &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + ResourceVersion: "v1", + }, + } + cfg.Status.InitializeConditions() + cfg.Status.SetLatestCreatedRevisionName(name + "-dbnfd") + cfg.Status.SetLatestReadyRevisionName(name + "-dbnfd") + + for _, opt := range opts { + opt(cfg) + } + return cfg +} + +func rev(namespace, name string, opts ...RevisionOption) *v1alpha1.Revision { + cfg := simpleConfig(namespace, name) + rev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: cfg.Status.LatestCreatedRevisionName, + ResourceVersion: "v1", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(cfg)}, + }, + } + + for _, opt := range opts { + opt(rev) + } + return rev +} + +func patchRemoveLabel(namespace, name, key string) clientgotesting.PatchActionImpl { + action := clientgotesting.PatchActionImpl{} + action.Name = name + action.Namespace = namespace + + patch := fmt.Sprintf(`{"metadata":{"labels":{%q:null}}}`, key) + + action.Patch = []byte(patch) + return action +} + +func patchAddLabel(namespace, name, key, value string) clientgotesting.PatchActionImpl { + action := clientgotesting.PatchActionImpl{} + action.Name = name + action.Namespace = namespace + + patch := fmt.Sprintf(`{"metadata":{"labels":{%q:%q}}}`, key, value) + + action.Patch = []byte(patch) + return action +} + +func TestNew(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + c := NewController(ctx, configmap.NewStaticWatcher()) + + if c == nil { + t.Fatal("Expected NewController to return a non-nil value") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labels.go b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labels.go new file mode 100644 index 0000000000..9f464e5005 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/labeler/labels.go @@ -0,0 +1,226 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labeler + +import ( + "context" + "encoding/json" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/kmeta" + + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// accessor defines an abstraction for manipulating labeled entity +// (Configuration, Revision) with shared logic. +type accessor interface { + get(ns, name string) (kmeta.Accessor, error) + list(ns, name string) ([]kmeta.Accessor, error) + patch(ns, name string, pt types.PatchType, p []byte) error +} + +// syncLabels makes sure that the revisions and configurations referenced from +// a Route are labeled with route labels. +func (c *Reconciler) syncLabels(ctx context.Context, r *v1alpha1.Route) error { + revisions := sets.NewString() + configs := sets.NewString() + + // Walk the revisions in Route's .status.traffic and build a list + // of Configurations to label from their OwnerReferences. + for _, tt := range r.Status.Traffic { + rev, err := c.revisionLister.Revisions(r.Namespace).Get(tt.RevisionName) + if err != nil { + return err + } + revisions.Insert(tt.RevisionName) + owner := metav1.GetControllerOf(rev) + if owner != nil && owner.Kind == "Configuration" { + configs.Insert(owner.Name) + } + } + + // Use a revision accessor to manipulate the revisions. + racc := &revision{r: c} + if err := deleteLabelForNotListed(ctx, r.Namespace, r.Name, racc, revisions); err != nil { + return err + } + if err := setLabelForListed(ctx, r, racc, revisions); err != nil { + return err + } + + // Use a config access to manipulate the configs. + cacc := &configuration{r: c} + if err := deleteLabelForNotListed(ctx, r.Namespace, r.Name, cacc, configs); err != nil { + return err + } + return setLabelForListed(ctx, r, cacc, configs) +} + +// clearLabels removes any labels for a named route from configurations and revisions. +func (c *Reconciler) clearLabels(ctx context.Context, ns, name string) error { + racc := &revision{r: c} + if err := deleteLabelForNotListed(ctx, ns, name, racc, sets.NewString()); err != nil { + return err + } + cacc := &configuration{r: c} + return deleteLabelForNotListed(ctx, ns, name, cacc, sets.NewString()) +} + +// setLabelForListed uses the accessor to attach the label for this route to every element +// listed within "names" in the same namespace. +func setLabelForListed(ctx context.Context, route *v1alpha1.Route, acc accessor, names sets.String) error { + for name := range names { + elt, err := acc.get(route.Namespace, name) + if err != nil { + return err + } + routeName, ok := elt.GetLabels()[serving.RouteLabelKey] + if ok { + if routeName != route.Name { + return fmt.Errorf("%s %q is already in use by %q, and cannot be used by %q", + elt.GroupVersionKind(), elt.GetName(), routeName, route.Name) + } + } else { + if err := setRouteLabel(acc, elt, &route.Name); err != nil { + return fmt.Errorf("failed to add route label to %s %q: %w", + elt.GroupVersionKind(), elt.GetName(), err) + } + } + } + + return nil +} + +// deleteLabelForNotListed uses the accessor to delete the label from any listable entity that is +// not named within our list. Unlike setLabelForListed, this function takes ns/name instead of a +// Route so that it can clean things up when a Route ceases to exist. +func deleteLabelForNotListed(ctx context.Context, ns, name string, acc accessor, names sets.String) error { + oldList, err := acc.list(ns, name) + if err != nil { + return err + } + + // Delete label for newly removed traffic targets. + for _, elt := range oldList { + if names.Has(elt.GetName()) { + continue + } + + if err := setRouteLabel(acc, elt, nil); err != nil { + return fmt.Errorf("failed to remove route label to %s %q: %w", + elt.GroupVersionKind(), elt.GetName(), err) + } + } + + return nil +} + +// setRouteLabel toggles the route label on the specified element through the provided accessor. +// a nil route name will cause the route label to be deleted, and a non-nil route will cause +// that route name to be attached to the element. +func setRouteLabel(acc accessor, elt kmeta.Accessor, routeName *string) error { + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + serving.RouteLabelKey: routeName, + }, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return err + } + + return acc.patch(elt.GetNamespace(), elt.GetName(), types.MergePatchType, patch) +} + +// revision is an implementation of accessor for Revisions +type revision struct { + r *Reconciler +} + +// revision implements accessor +var _ accessor = (*revision)(nil) + +// get implements accessor +func (r *revision) get(ns, name string) (kmeta.Accessor, error) { + return r.r.revisionLister.Revisions(ns).Get(name) +} + +// list implements accessor +func (r *revision) list(ns, name string) ([]kmeta.Accessor, error) { + rl, err := r.r.revisionLister.Revisions(ns).List(labels.SelectorFromSet(labels.Set{ + serving.RouteLabelKey: name, + })) + if err != nil { + return nil, err + } + // Need a copy to change types in Go + kl := make([]kmeta.Accessor, 0, len(rl)) + for _, r := range rl { + kl = append(kl, r) + } + return kl, err +} + +// patch implements accessor +func (r *revision) patch(ns, name string, pt types.PatchType, p []byte) error { + _, err := r.r.ServingClientSet.ServingV1alpha1().Revisions(ns).Patch(name, pt, p) + return err +} + +// configuration is an implementation of accessor for Configurations +type configuration struct { + r *Reconciler +} + +// configuration implements accessor +var _ accessor = (*configuration)(nil) + +// get implements accessor +func (c *configuration) get(ns, name string) (kmeta.Accessor, error) { + return c.r.configurationLister.Configurations(ns).Get(name) +} + +// list implements accessor +func (c *configuration) list(ns, name string) ([]kmeta.Accessor, error) { + rl, err := c.r.configurationLister.Configurations(ns).List(labels.SelectorFromSet(labels.Set{ + serving.RouteLabelKey: name, + })) + if err != nil { + return nil, err + } + // Need a copy to change types in Go + kl := make([]kmeta.Accessor, 0, len(rl)) + for _, r := range rl { + kl = append(kl, r) + } + return kl, err +} + +// patch implements accessor +func (c *configuration) patch(ns, name string, pt types.PatchType, p []byte) error { + _, err := c.r.ServingClientSet.ServingV1alpha1().Configurations(ns).Patch(name, pt, p) + return err +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/metric/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/metric/controller.go new file mode 100644 index 0000000000..0f0d3a2676 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/metric/controller.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metric + +import ( + "context" + + "knative.dev/serving/pkg/autoscaler" + metricinformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric" + pkgreconciler "knative.dev/serving/pkg/reconciler" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" +) + +const ( + controllerAgentName = "metric-controller" +) + +// NewController initializes the controller and is called by the generated code. +// Registers eventhandlers to enqueue events. +func NewController( + ctx context.Context, + cmw configmap.Watcher, + collector autoscaler.Collector, +) *controller.Impl { + metricInformer := metricinformer.Get(ctx) + + c := &reconciler{ + Base: pkgreconciler.NewBase(ctx, controllerAgentName, cmw), + collector: collector, + metricLister: metricInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, reconcilerName) + + c.Logger.Info("Setting up event handlers") + + // Watch all the Metric objects. + metricInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/metric/metric.go b/test/vendor/knative.dev/serving/pkg/reconciler/metric/metric.go new file mode 100644 index 0000000000..d753b23239 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/metric/metric.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metric + +import ( + "context" + "fmt" + "reflect" + + "go.uber.org/zap" + "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + listers "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1" + rbase "knative.dev/serving/pkg/reconciler" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +const reconcilerName = "Metrics" + +// reconciler implements controller.Reconciler for Metric resources. +type reconciler struct { + *rbase.Base + collector autoscaler.Collector + metricLister listers.MetricLister +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. +func (r *reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + original, err := r.metricLister.Metrics(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The metric object is gone, so delete the collection. + logger.Info("Stopping to collect metrics") + return r.collector.Delete(namespace, name) + } else if err != nil { + return fmt.Errorf("failed to fetch metric %s: %w", key, err) + } + + // Don't mess with informer's copy. + metric := original.DeepCopy() + metric.SetDefaults(ctx) + metric.Status.InitializeConditions() + + if err = r.reconcileCollection(ctx, metric); err != nil { + logger.Errorw("Error reconciling metric collection", zap.Error(err)) + r.Recorder.Event(metric, corev1.EventTypeWarning, "InternalError", err.Error()) + } else { + metric.Status.MarkMetricReady() + } + + if !equality.Semantic.DeepEqual(original.Status, metric.Status) { + // Change of status, need to update the object. + if uErr := r.updateStatus(original, metric); uErr != nil { + logger.Warnw("Failed to update metric status", zap.Error(uErr)) + r.Recorder.Eventf(metric, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update metric status: %v", uErr) + return uErr + } + r.Recorder.Eventf(metric, corev1.EventTypeNormal, "Updated", "Successfully updated metric status %s", key) + } + return err +} + +func (r *reconciler) reconcileCollection(ctx context.Context, metric *v1alpha1.Metric) error { + err := r.collector.CreateOrUpdate(metric) + if err != nil { + // If create or update failes, we won't be able to collect at all. + metric.Status.MarkMetricFailed("CollectionFailed", "Failed to reconcile metric collection") + return fmt.Errorf("failed to initiate or update scraping: %w", err) + } + return nil +} + +func (r *reconciler) updateStatus(existing *v1alpha1.Metric, desired *v1alpha1.Metric) error { + existing = existing.DeepCopy() + return rbase.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = r.ServingClientSet.AutoscalingV1alpha1().Metrics(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = r.ServingClientSet.AutoscalingV1alpha1().Metrics(existing.Namespace).UpdateStatus(existing) + return err + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/metric/metric_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/metric/metric_test.go new file mode 100644 index 0000000000..3220bc09e1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/metric/metric_test.go @@ -0,0 +1,259 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metric + +import ( + "context" + "errors" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clientgotesting "k8s.io/client-go/testing" + "knative.dev/serving/pkg/autoscaler" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + . "knative.dev/pkg/reconciler/testing" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + metricinformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/metric/fake" + rpkg "knative.dev/serving/pkg/reconciler" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" +) + +type collectorKey struct{} + +func TestNewController(t *testing.T) { + ctx, _ := SetupFakeContext(t) + c := NewController(ctx, configmap.NewStaticWatcher(), &testCollector{}) + if c == nil { + t.Fatal("Expected NewController to return a non-nil value") + } +} + +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key, Part I", + Key: "too/many/parts", + }, { + Name: "bad workqueue key, Part II", + Key: "too-few-parts", + }, { + Name: "update status", + Key: "status/update", + Objects: []runtime.Object{ + metric("status", "update"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: metric("status", "update", ready), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", + "Successfully updated metric status status/update"), + }, + }, { + Name: "update status failed", + Key: "status/update-failed", + Objects: []runtime.Object{ + metric("status", "update-failed"), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "metrics"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: metric("status", "update-failed", ready), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + "Failed to update metric status: inducing failure for update metrics"), + }, + WantErr: true, + }, { + Name: "cannot create collection-part I", + Ctx: context.WithValue(context.Background(), collectorKey{}, + &testCollector{createOrUpdateError: errors.New("the-error")}, + ), + Key: "bad/collector", + Objects: []runtime.Object{ + metric("bad", "collector"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + "failed to initiate or update scraping: the-error"), + Eventf(corev1.EventTypeNormal, "Updated", + "Successfully updated metric status bad/collector"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: metric("bad", "collector", failed("CollectionFailed", + "Failed to reconcile metric collection")), + }}, + WantErr: true, + }, { + Name: "cannot create collection-part II", + Ctx: context.WithValue(context.Background(), collectorKey{}, + &testCollector{createOrUpdateError: errors.New("the-error")}, + ), + Key: "bad/collector", + Objects: []runtime.Object{ + metric("bad", "collector", failed("CollectionFailed", + "Failed to reconcile metric collection")), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + "failed to initiate or update scraping: the-error"), + }, + WantErr: true, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + col := &testCollector{} + if c := ctx.Value(collectorKey{}); c != nil { + col = c.(*testCollector) + } + return &reconciler{ + Base: rpkg.NewBase(ctx, controllerAgentName, cmw), + collector: col, + metricLister: listers.GetMetricLister(), + } + })) +} + +func TestReconcileWithCollector(t *testing.T) { + updateError := errors.New("update error") + deleteError := errors.New("delete error") + + tests := []struct { + name string + key string + metric *av1alpha1.Metric + collector *testCollector + createOrUpdateCalls int + deleteCalls int + expectErr error + }{{ + name: "new", + key: "new/metric", + metric: metric("new", "metric"), + collector: &testCollector{}, + createOrUpdateCalls: 1, + }, { + name: "delete", + key: "old/metric", + metric: metric("new", "metric"), + collector: &testCollector{}, + deleteCalls: 1, + }, { + name: "error on create", + key: "new/metric", + metric: metric("new", "metric"), + collector: &testCollector{createOrUpdateError: updateError}, + createOrUpdateCalls: 1, + expectErr: updateError, + }, { + name: "error on delete", + key: "old/metric", + metric: metric("new", "metric"), + collector: &testCollector{deleteError: deleteError}, + deleteCalls: 1, + expectErr: deleteError, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, _ := SetupFakeContext(t) + metricInformer := metricinformer.Get(ctx) + + r := &reconciler{ + Base: rpkg.NewBase(ctx, controllerAgentName, configmap.NewStaticWatcher()), + collector: tt.collector, + metricLister: metricInformer.Lister(), + } + + // Make sure the provided metric is available via the fake clients/informers. + r.ServingClientSet.AutoscalingV1alpha1().Metrics(tt.metric.Namespace).Create(tt.metric) + metricInformer.Informer().GetIndexer().Add(tt.metric) + + if err := r.Reconcile(ctx, tt.key); !errors.Is(err, tt.expectErr) { + t.Errorf("Reconcile() = %v, wanted %v", err, tt.expectErr) + } + + if tt.createOrUpdateCalls != tt.collector.createOrUpdateCalls { + t.Errorf("CreateOrUpdate() called %d times, want %d times", tt.collector.createOrUpdateCalls, tt.createOrUpdateCalls) + } + if tt.deleteCalls != tt.collector.deleteCalls { + t.Errorf("Delete() called %d times, want %d times", tt.collector.deleteCalls, tt.deleteCalls) + } + }) + } +} + +type metricOption func(*av1alpha1.Metric) + +func failed(r, m string) metricOption { + return func(metric *av1alpha1.Metric) { + metric.Status.MarkMetricFailed(r, m) + } +} + +func ready(m *av1alpha1.Metric) { + m.Status.MarkMetricReady() +} + +func metric(namespace, name string, opts ...metricOption) *av1alpha1.Metric { + m := &av1alpha1.Metric{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: av1alpha1.MetricSpec{ + // Doesn't really matter what is by default, but we need something, so that + // Spec is not empty. + StableWindow: time.Minute, + }, + } + for _, o := range opts { + o(m) + } + return m +} + +type testCollector struct { + createOrUpdateCalls int + createOrUpdateError error + + recordCalls int + + deleteCalls int + deleteError error +} + +func (c *testCollector) CreateOrUpdate(metric *av1alpha1.Metric) error { + c.createOrUpdateCalls++ + return c.createOrUpdateError +} + +func (c *testCollector) Record(key types.NamespacedName, stat autoscaler.Stat) { + c.recordCalls++ +} + +func (c *testCollector) Delete(namespace, name string) error { + c.deleteCalls++ + return c.deleteError +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/config/store.go new file mode 100644 index 0000000000..a47f5007f8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/config/store.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/pkg/configmap" + "knative.dev/serving/pkg/network" + routecfg "knative.dev/serving/pkg/reconciler/route/config" +) + +type cfgKey struct{} + +// Config of namespace controller +// +k8s:deepcopy-gen=false +type Config struct { + Network *network.Config + Domain *routecfg.Domain +} + +// FromContext fetches config from context. +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +// ToContext adds config to given context. +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// Store is based on configmap.UntypedStore and is used to store and watch for +// updates to configuration related to routes (currently only config-domain). +// +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a configmap.UntypedStore based config store. +// +// logger must be non-nil implementation of configmap.Logger (commonly used +// loggers conform) +// +// onAfterStore is a variadic list of callbacks to run +// after the ConfigMap has been processed and stored. +// +// See also: configmap.NewUntypedStore(). +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "namespace", + logger, + configmap.Constructors{ + network.ConfigName: network.NewConfigFromConfigMap, + routecfg.DomainConfigName: routecfg.NewDomainFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +// ToContext adds Store contents to given context. +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +// Load fetches config from Store. +func (s *Store) Load() *Config { + return &Config{ + Network: s.UntypedLoad(network.ConfigName).(*network.Config).DeepCopy(), + Domain: s.UntypedLoad(routecfg.DomainConfigName).(*routecfg.Domain).DeepCopy(), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/controller.go new file mode 100644 index 0000000000..fa66a108cf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/controller.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nscert + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + nsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/serving/pkg/apis/networking" + kcertinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate" + routecfg "knative.dev/serving/pkg/reconciler/route/config" + + "knative.dev/serving/pkg/network" + pkgreconciler "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/nscert/config" +) + +const ( + controllerAgentName = "namespace-controller" +) + +type configStore interface { + ToContext(ctx context.Context) context.Context + WatchConfigs(w configmap.Watcher) +} + +// NewController initializes the controller and is called by the generated code +// Registers eventhandlers to enqueue events. +func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { + nsInformer := nsinformer.Get(ctx) + knCertificateInformer := kcertinformer.Get(ctx) + + c := &reconciler{ + Base: pkgreconciler.NewBase(ctx, controllerAgentName, cmw), + nsLister: nsInformer.Lister(), + knCertificateLister: knCertificateInformer.Lister(), + } + + impl := controller.NewImpl(c, c.Logger, "Namespace") + + c.Logger.Info("Setting up event handlers") + nsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: pkgreconciler.Not(pkgreconciler.LabelExistsFilterFunc(networking.DisableWildcardCertLabelKey)), + Handler: controller.HandleAll(impl.Enqueue), + }) + + knCertificateInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(corev1.SchemeGroupVersion.WithKind("Namespace")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + c.Logger.Info("Setting up ConfigMap receivers") + configsToResync := []interface{}{ + &network.Config{}, + &routecfg.Domain{}, + } + resync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + impl.GlobalResync(nsInformer.Informer()) + }) + c.configStore = config.NewStore(c.Logger.Named("config-store"), resync) + c.configStore.WatchConfigs(cmw) + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go new file mode 100644 index 0000000000..f8a45848d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go @@ -0,0 +1,207 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nscert + +import ( + "bytes" + "context" + "fmt" + "regexp" + "text/template" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubelabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + kubelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + listers "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + "knative.dev/serving/pkg/network" + rbase "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/nscert/config" + "knative.dev/serving/pkg/reconciler/nscert/resources" +) + +// Reconciler implements controller.Reconciler for Certificate resources. +type reconciler struct { + *rbase.Base + + // listers index properties about resources + nsLister kubelisters.NamespaceLister + knCertificateLister listers.CertificateLister + + configStore configStore +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*reconciler)(nil) +var domainTemplateRegex *regexp.Regexp = regexp.MustCompile(`^\*\..+$`) + +// Reconciler implements controller.Reconciler for Namespace resources. +func (c *reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.configStore.ToContext(ctx) + + if !config.FromContext(ctx).Network.AutoTLS { + logger.Debug("AutoTLS is disabled. Skipping wildcard certificate creation") + return nil + } + + _, ns, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + namespace, err := c.nsLister.Get(ns) + if apierrs.IsNotFound(err) { + logger.Info("Namespace in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + if _, ok := namespace.Labels[networking.DisableWildcardCertLabelKey]; ok { + logger.Infof("Skipping wildcard certificate creation for excluded namespace %s", namespace.Name) + return nil + } + + err = c.reconcile(ctx, namespace) + if err != nil { + c.Recorder.Event(namespace, corev1.EventTypeWarning, "InternalError", err.Error()) + } + return err +} + +func (c *reconciler) reconcile(ctx context.Context, ns *corev1.Namespace) error { + cfg := config.FromContext(ctx) + + labelSelector := kubelabels.NewSelector() + req, err := kubelabels.NewRequirement(networking.WildcardCertDomainLabelKey, selection.Exists, nil) + if err != nil { + return fmt.Errorf("failed to create requirement: %v", err) + } + labelSelector = labelSelector.Add(*req) + + existingCerts, err := c.knCertificateLister.Certificates(ns.Name).List(labelSelector) + if err != nil { + return fmt.Errorf("failed to list certificates: %w", err) + } + + // Only create wildcard certs for the default domain + defaultDomain := cfg.Domain.LookupDomainForLabels(nil /* labels */) + + dnsName, err := wildcardDomain(cfg.Network.DomainTemplate, defaultDomain, ns.Name) + if err != nil { + return fmt.Errorf("failed to apply domain template %s to domain %s and namespace %s: %w", + cfg.Network.DomainTemplate, defaultDomain, ns.Name, err) + } + + // If any labeled cert has been issued for our DNSName then there's nothing to do + matchingCert := findMatchingCert(dnsName, existingCerts) + if matchingCert != nil { + return nil + } + + desiredCert := resources.MakeWildcardCertificate(ns, dnsName, defaultDomain) + + // If there is no matching cert find one previously created by this reconciler which may + // need to be updated. + existingCert, err := findNamespaceCert(ns, existingCerts) + + if apierrs.IsNotFound(err) { + cert, err := c.ServingClientSet.NetworkingV1alpha1().Certificates(ns.Name).Create(desiredCert) + if err != nil { + c.Recorder.Eventf(ns, corev1.EventTypeWarning, "CreationFailed", + "Failed to create Knative certificate %s/%s: %v", ns.Name, desiredCert.ObjectMeta.Name, err) + return fmt.Errorf("failed to create namespace certificate: %w", err) + } + + c.Recorder.Eventf(cert, corev1.EventTypeNormal, "Created", + "Created Knative Certificate %s/%s", ns.Name, cert.ObjectMeta.Name) + } else if err != nil { + return fmt.Errorf("failed to get namespace certificate: %w", err) + } else if !metav1.IsControlledBy(existingCert, ns) { + return fmt.Errorf("namespace %s does not own Knative Certificate: %s", ns.Name, existingCert.Name) + } else if !equality.Semantic.DeepEqual(existingCert.Spec, desiredCert.Spec) { + copy := existingCert.DeepCopy() + copy.Spec = desiredCert.Spec + copy.ObjectMeta.Labels[networking.WildcardCertDomainLabelKey] = desiredCert.ObjectMeta.Labels[networking.WildcardCertDomainLabelKey] + + _, err := c.ServingClientSet.NetworkingV1alpha1().Certificates(copy.Namespace).Update(copy) + if err != nil { + c.Recorder.Eventf(existingCert, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update Knative Certificate %s/%s: %v", existingCert.Namespace, existingCert.Name, err) + return fmt.Errorf("failed to update namespace certificate: %w", err) + } + c.Recorder.Eventf(existingCert, corev1.EventTypeNormal, "Updated", + "Updated Spec for Knative Certificate %s/%s", desiredCert.Namespace, desiredCert.Name) + return nil + } + + return nil +} + +func wildcardDomain(tmpl, domain, namespace string) (string, error) { + data := network.DomainTemplateValues{ + Name: "*", + Domain: domain, + Namespace: namespace, + } + + t, err := template.New("domain-template").Parse(tmpl) + if err != nil { + return "", err + } + + buf := bytes.Buffer{} + if err := t.Execute(&buf, data); err != nil { + return "", err + } + + dom := buf.String() + if !domainTemplateRegex.MatchString(dom) { + return "", fmt.Errorf("invalid DomainTemplate: %s", dom) + } + return dom, nil +} + +func findMatchingCert(domain string, certs []*v1alpha1.Certificate) *v1alpha1.Certificate { + for _, cert := range certs { + if dnsNames := sets.NewString(cert.Spec.DNSNames...); dnsNames.Has(domain) { + return cert + } + } + return nil +} + +func findNamespaceCert(ns *corev1.Namespace, certs []*v1alpha1.Certificate) (*v1alpha1.Certificate, error) { + for _, cert := range certs { + if metav1.IsControlledBy(cert, ns) { + return cert, nil + } + } + return nil, apierrs.NewNotFound(v1alpha1.Resource("certificate"), ns.Name) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert_test.go new file mode 100644 index 0000000000..710d53a4a3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert_test.go @@ -0,0 +1,477 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nscert + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "golang.org/x/sync/errgroup" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + . "knative.dev/pkg/reconciler/testing" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/network" + pkgreconciler "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/nscert/config" + "knative.dev/serving/pkg/reconciler/nscert/resources/names" + routecfg "knative.dev/serving/pkg/reconciler/route/config" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakensinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace/fake" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakecertinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake" + + _ "knative.dev/pkg/system/testing" +) + +var ( + wildcardDNSNames = []string{"*.foo.example.com"} + defaultCertName = names.WildcardCertificate(wildcardDNSNames[0]) + defaultDomainTemplate = "{{.Name}}.{{.Namespace}}.{{.Domain}}" + defaultDomain = "example.com" +) + +func newTestSetup(t *testing.T, configs ...*corev1.ConfigMap) ( + context.Context, context.CancelFunc, chan *v1alpha1.Certificate, *configmap.ManualWatcher) { + t.Helper() + + ctx, ccl, ifs := SetupFakeContextWithCancel(t) + wf, err := controller.RunInformers(ctx.Done(), ifs...) + if err != nil { + t.Fatalf("Error starting informers: %v", err) + } + cancel := func() { + ccl() + wf() + } + + configMapWatcher := &configmap.ManualWatcher{Namespace: system.Namespace()} + + ctl := NewController(ctx, configMapWatcher) + + cms := []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-network", + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "domainTemplate": defaultDomainTemplate, + "autoTLS": "true", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: routecfg.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "example.com": "", + }, + }} + cms = append(cms, configs...) + + for _, cfg := range cms { + configMapWatcher.OnChange(cfg) + } + if err := configMapWatcher.Start(ctx.Done()); err != nil { + t.Fatalf("failed to start config manager: %v", err) + } + + certEvents := make(chan *v1alpha1.Certificate) + fakecertinformer.Get(ctx).Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(corev1.SchemeGroupVersion.WithKind("Namespace")), + Handler: controller.HandleAll(func(obj interface{}) { + certEvents <- obj.(*v1alpha1.Certificate) + }), + }) + + var eg errgroup.Group + eg.Go(func() error { return ctl.Run(1, ctx.Done()) }) + return ctx, func() { + cancel() + eg.Wait() + }, certEvents, configMapWatcher +} + +func TestNewController(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + configMapWatcher := configmap.NewStaticWatcher(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "DomainTemplate": defaultDomainTemplate, + }}, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: routecfg.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "example.com": "", + }}, + ) + + c := NewController(ctx, configMapWatcher) + + if c == nil { + t.Fatal("Expected NewController to return a non-nil value") + } +} + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + Key: "too/many/parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "create Knative certificate for namespace", + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + kubeNamespace("foo"), + }, + WantCreates: []runtime.Object{ + knCert(kubeNamespace("foo")), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Knative Certificate %s/%s", "foo", defaultCertName), + }, + Key: "foo", + }, { + Name: "certificate not created for excluded namespace", + Key: "foo", + Objects: []runtime.Object{ + kubeExcludedNamespace("foo"), + }, + }, { + Name: "certificate creation failed", + Key: "foo", + WantErr: true, + SkipNamespaceValidation: true, + Objects: []runtime.Object{ + kubeNamespace("foo"), + }, + WantCreates: []runtime.Object{ + knCert(kubeNamespace("foo")), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "certificates"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Knative certificate %s/%s: inducing failure for create certificates", "foo", defaultCertName), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create namespace certificate: inducing failure for create certificates"), + }, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &reconciler{ + Base: pkgreconciler.NewBase(ctx, controllerAgentName, cmw), + knCertificateLister: listers.GetKnCertificateLister(), + nsLister: listers.GetNamespaceLister(), + configStore: &testConfigStore{ + config: &config.Config{ + Network: networkConfig(), + Domain: domainConfig(), + }, + }, + } + })) +} + +func TestUpdateDomainTemplate(t *testing.T) { + netCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "autoTLS": "Enabled", + }, + } + ctx, cancel, certEvents, watcher := newTestSetup(t, netCfg) + defer cancel() + + namespace := kubeNamespace("testns") + fakekubeclient.Get(ctx).CoreV1().Namespaces().Create(namespace) + fakensinformer.Get(ctx).Informer().GetIndexer().Add(namespace) + + want := []string{fmt.Sprintf("*.%s.%s", namespace.Name, routecfg.DefaultDomain)} + cert := <-certEvents + if diff := cmp.Diff(want, cert.Spec.DNSNames); diff != "" { + t.Errorf("DNSNames (-want, +got) = %s", diff) + } + + // Update the domain template to something matched by the existing DNSName + netCfg = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "domainTemplate": "{{.Name}}-suffix.{{.Namespace}}.{{.Domain}}", + "autoTLS": "Enabled", + }, + } + watcher.OnChange(netCfg) + + // Since no new names should be added nothing should change + select { + case <-certEvents: + t.Error("Unexpected event") + case <-time.After(100 * time.Millisecond): + } + + // Update the domain template to something not matched by the existing DNSName + netCfg = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "domainTemplate": "{{.Name}}.subdomain.{{.Namespace}}.{{.Domain}}", + "autoTLS": "Enabled", + }, + } + watcher.OnChange(netCfg) + + // A new domain format not matched by the existing certificate should update the DNSName + want = []string{fmt.Sprintf("*.subdomain.%s.%s", namespace.Name, routecfg.DefaultDomain)} + cert = <-certEvents + if diff := cmp.Diff(want, cert.Spec.DNSNames); diff != "" { + t.Errorf("DNSNames (-want, +got) = %s", diff) + } + + // Invalid domain template for wildcard certs + netCfg = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "domainTemplate": "{{.Namespace}}.{{.Name}}.{{.Domain}}", + "autoTLS": "Enabled", + }, + } + watcher.OnChange(netCfg) + + // With an invalid domain template nothing change + select { + case <-certEvents: + t.Error("Unexpected event") + case <-time.After(100 * time.Millisecond): + } +} + +func TestDomainConfigDefaultDomain(t *testing.T) { + domCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: routecfg.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "other.com": "selector:\n app: dev", + }, + } + netCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "autoTLS": "Enabled", + }, + } + ctx, cancel, certEvents, _ := newTestSetup(t, domCfg, netCfg) + defer cancel() + + namespace := kubeNamespace("testns") + fakekubeclient.Get(ctx).CoreV1().Namespaces().Create(namespace) + fakensinformer.Get(ctx).Informer().GetIndexer().Add(namespace) + + cert := <-certEvents + if got, want := cert.Spec.DNSNames[0], "*.testns.example.com"; got != want { + t.Errorf("DNSName[0] = %s, want %s", got, want) + } +} + +func TestChangeDefaultDomain(t *testing.T) { + netCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "autoTLS": "Enabled", + }, + } + + ctx, cancel, certEvents, watcher := newTestSetup(t, netCfg) + defer cancel() + + namespace := kubeNamespace("testns") + fakekubeclient.Get(ctx).CoreV1().Namespaces().Create(namespace) + fakensinformer.Get(ctx).Informer().GetIndexer().Add(namespace) + + // The certificate should be created with the default domain. + cert := <-certEvents + if got, want := cert.Spec.DNSNames[0], "*.testns.example.com"; got != want { + t.Errorf("DNSName[0] = %s, want %s", got, want) + } + + // Change the domain settings. + domCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: routecfg.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "example.net": "", + }, + } + watcher.OnChange(domCfg) + + // The certificate should be updated with the new domain. + cert2 := <-certEvents + if got, want := cert2.Spec.DNSNames[0], "*.testns.example.net"; got != want { + t.Errorf("DNSName[0] = %s, want %s", got, want) + } + + // Assert we have exactly one certificate. + certs, _ := fakeservingclient.Get(ctx).NetworkingV1alpha1().Certificates(namespace.Name).List(metav1.ListOptions{}) + if len(certs.Items) > 1 { + t.Errorf("Expected 1 certificate, got %d.", len(certs.Items)) + } +} + +func TestDomainConfigExplicitDefaultDomain(t *testing.T) { + domCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: routecfg.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "default.com": "", + }, + } + netCfg := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "autoTLS": "Enabled", + }, + } + ctx, cancel, certEvents, _ := newTestSetup(t, domCfg, netCfg) + defer cancel() + + namespace := kubeNamespace("testns") + fakekubeclient.Get(ctx).CoreV1().Namespaces().Create(namespace) + fakensinformer.Get(ctx).Informer().GetIndexer().Add(namespace) + + cert := <-certEvents + if got, want := cert.Spec.DNSNames[0], "*.testns.default.com"; got != want { + t.Errorf("DNSName[0] = %s, want %s", got, want) + } +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +func (t *testConfigStore) WatchConfigs(w configmap.Watcher) {} + +var _ configStore = (*testConfigStore)(nil) + +func knCert(namespace *corev1.Namespace) *v1alpha1.Certificate { + return knCertWithStatus(namespace, &v1alpha1.CertificateStatus{}) +} + +func knCertWithStatus(namespace *corev1.Namespace, status *v1alpha1.CertificateStatus) *v1alpha1.Certificate { + return &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultCertName, + Namespace: namespace.Name, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(namespace, corev1.SchemeGroupVersion.WithKind("Namespace"))}, + Labels: map[string]string{ + networking.WildcardCertDomainLabelKey: defaultDomain, + }, + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: wildcardDNSNames, + SecretName: defaultCertName, + }, + Status: *status, + } +} + +func kubeNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } +} + +func kubeExcludedNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + networking.DisableWildcardCertLabelKey: "true", + }, + }, + } +} + +func networkConfig() *network.Config { + return &network.Config{ + DomainTemplate: defaultDomainTemplate, + AutoTLS: true, + } +} + +func domainConfig() *routecfg.Domain { + domainConfig := &routecfg.Domain{ + Domains: map[string]*routecfg.LabelSelector{ + "example.com": {}, + }, + } + return domainConfig +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names.go new file mode 100644 index 0000000000..5088af9374 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "strings" +) + +// WildcardCertificate returns the Certificate name for the given wildcard DNS name +func WildcardCertificate(dnsName string) string { + nameParts := strings.SplitAfterN(dnsName, ".", 2) + return nameParts[len(nameParts)-1] +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names_test.go new file mode 100644 index 0000000000..b1466b7c06 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/names/names_test.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestNamer(t *testing.T) { + for in, want := range map[string]string{"*.testns.example.com": "testns.example.com"} { + if got := WildcardCertificate(in); !cmp.Equal(got, want) { + t.Errorf("WildcardCertificate (-want, +got) = %s", cmp.Diff(want, got)) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate.go new file mode 100644 index 0000000000..328c9f7a52 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler/nscert/resources/names" +) + +// MakeWildcardCertificate creates a Knative certificate +func MakeWildcardCertificate(namespace *corev1.Namespace, dnsName, domain string) *v1alpha1.Certificate { + return &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.WildcardCertificate(dnsName), + Namespace: namespace.Name, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(namespace, corev1.SchemeGroupVersion.WithKind("Namespace"))}, + Labels: map[string]string{ + networking.WildcardCertDomainLabelKey: domain, + }, + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: []string{dnsName}, + SecretName: names.WildcardCertificate(dnsName), + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate_test.go new file mode 100644 index 0000000000..27897db7ff --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/nscert/resources/wildcard_certificate_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/reconciler/nscert/resources/names" +) + +var namespace = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testns", + }, +} + +const ( + domain = "example.com" + dnsName = "*.testns.example.com" +) + +func TestMakeWildcardCertificate(t *testing.T) { + want := &v1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.WildcardCertificate(dnsName), + Namespace: "testns", + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(namespace, corev1.SchemeGroupVersion.WithKind("Namespace"))}, + Labels: map[string]string{ + networking.WildcardCertDomainLabelKey: domain, + }, + }, + Spec: v1alpha1.CertificateSpec{ + DNSNames: []string{dnsName}, + SecretName: names.WildcardCertificate(dnsName), + }, + } + + got := MakeWildcardCertificate(namespace, dnsName, domain) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MakeWildcardCertificate (-want, +got) = %s", diff) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/reconciler.go b/test/vendor/knative.dev/serving/pkg/reconciler/reconciler.go new file mode 100644 index 0000000000..012c30b716 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/reconciler.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + + cachingclient "knative.dev/caching/pkg/client/injection/client" + kubeclient "knative.dev/pkg/client/injection/kube/client" + "knative.dev/pkg/injection/clients/dynamicclient" + servingclient "knative.dev/serving/pkg/client/injection/client" + istioclient "knative.dev/serving/pkg/client/istio/injection/client" + + cachingclientset "knative.dev/caching/pkg/client/clientset/versioned" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + clientset "knative.dev/serving/pkg/client/clientset/versioned" + servingScheme "knative.dev/serving/pkg/client/clientset/versioned/scheme" + istioclientset "knative.dev/serving/pkg/client/istio/clientset/versioned" +) + +const ( + ForceUpgradePatch = `[{ + "op":"add", + "path":"/metadata/annotations/serving.knative.dev~1forceUpgrade", + "value":"true" +}]` +) + +// ConfigStore is a minimal interface to the config stores used by our controllers. +type ConfigStore interface { + ToContext(ctx context.Context) context.Context +} + +// Base implements the core controller logic, given a Reconciler. +type Base struct { + // KubeClientSet allows us to talk to the k8s for core APIs + KubeClientSet kubernetes.Interface + + // IstioClientSet allows us to configure Istio objects + IstioClientSet istioclientset.Interface + + // ServingClientSet allows us to configure Serving objects + ServingClientSet clientset.Interface + + // DynamicClientSet allows us to configure pluggable Build objects + DynamicClientSet dynamic.Interface + + // CachingClientSet allows us to instantiate Image objects + CachingClientSet cachingclientset.Interface + + // ConfigMapWatcher allows us to watch for ConfigMap changes. + ConfigMapWatcher configmap.Watcher + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // StatsReporter reports reconciler's metrics. + StatsReporter StatsReporter + + // Sugared logger is easier to use but is not as performant as the + // raw logger. In performance critical paths, call logger.Desugar() + // and use the returned raw logger instead. In addition to the + // performance benefits, raw logger also preserves type-safety at + // the expense of slightly greater verbosity. + Logger *zap.SugaredLogger +} + +// NewBase instantiates a new instance of Base implementing +// the common & boilerplate code between our reconcilers. +func NewBase(ctx context.Context, controllerAgentName string, cmw configmap.Watcher) *Base { + // Enrich the logs with controller name + logger := logging.FromContext(ctx). + Named(controllerAgentName). + With(zap.String(logkey.ControllerType, controllerAgentName)) + + kubeClient := kubeclient.Get(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder( + scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + statsReporter := GetStatsReporter(ctx) + if statsReporter == nil { + logger.Debug("Creating stats reporter") + var err error + statsReporter, err = NewStatsReporter(controllerAgentName) + if err != nil { + logger.Fatal(err) + } + } + + base := &Base{ + KubeClientSet: kubeClient, + IstioClientSet: istioclient.Get(ctx), + DynamicClientSet: dynamicclient.Get(ctx), + ServingClientSet: servingclient.Get(ctx), + CachingClientSet: cachingclient.Get(ctx), + ConfigMapWatcher: cmw, + Recorder: recorder, + StatsReporter: statsReporter, + Logger: logger, + } + + return base +} + +func (b *Base) MarkNeedsUpgrade(gvr schema.GroupVersionResource, namespace, name string) error { + // Add the annotation serving.knative.dev/forceUpgrade=true to trigger webhook-based defaulting. + _, err := b.DynamicClientSet.Resource(gvr).Namespace(namespace).Patch(name, types.JSONPatchType, + []byte(ForceUpgradePatch), metav1.PatchOptions{}) + return err +} + +func init() { + // Add serving types to the default Kubernetes Scheme so Events can be + // logged for serving types. + servingScheme.AddToScheme(scheme.Scheme) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/reconciler_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/reconciler_test.go new file mode 100644 index 0000000000..d3004259a1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/reconciler_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + "testing" + + _ "knative.dev/caching/pkg/client/injection/client/fake" + _ "knative.dev/pkg/client/injection/kube/client/fake" + _ "knative.dev/pkg/injection/clients/dynamicclient/fake" + _ "knative.dev/serving/pkg/client/injection/client/fake" + _ "knative.dev/serving/pkg/client/istio/injection/client/fake" + + "k8s.io/client-go/rest" + "knative.dev/pkg/configmap" + "knative.dev/pkg/injection" + _ "knative.dev/pkg/system/testing" +) + +var reconcilerName = "test-reconciler" + +func TestNew(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := &rest.Config{} + ctx, _ = injection.Fake.SetupInformers(ctx, cfg) + + cmw := configmap.NewStaticWatcher() + + r := NewBase(ctx, reconcilerName, cmw) + + if r == nil { + t.Fatal("Expected NewBase to return a non-nil value") + } + if r.Recorder == nil { + t.Fatal("Expected NewBase to add a Recorder") + } + if r.StatsReporter == nil { + t.Fatal("Expected NewBase to add a StatsReporter") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/retry.go b/test/vendor/knative.dev/serving/pkg/reconciler/retry.go new file mode 100644 index 0000000000..26e99b74a4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/retry.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "k8s.io/client-go/util/retry" +) + +// RetryUpdateConflicts retries the inner function if it returns conflict errors. +// This can be used to retry status updates without constantly reenqueuing keys. +func RetryUpdateConflicts(updater func(int) error) error { + attempts := 0 + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + err := updater(attempts) + attempts++ + return err + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/retry_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/retry_test.go new file mode 100644 index 0000000000..7166ac978b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/retry_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "errors" + "testing" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + + v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" +) + +func TestRetryUpdateConflicts(t *testing.T) { + errAny := errors.New("foo") + errConflict := apierrs.NewConflict(v1alpha1.Resource("foo"), "bar", errAny) + + tests := []struct { + name string + returns []error + want error + wantAttempts int + }{{ + name: "all good", + returns: []error{nil}, + want: nil, + wantAttempts: 1, + }, { + name: "not retry on non-conflict error", + returns: []error{errAny}, + want: errAny, + wantAttempts: 1, + }, { + name: "retry up to 5 times on conflicts", + returns: []error{errConflict, errConflict, errConflict, errConflict, errConflict, errConflict}, + want: errConflict, + wantAttempts: 5, + }, { + name: "eventually succeed", + returns: []error{errConflict, errConflict, nil}, + want: nil, + wantAttempts: 3, + }, { + name: "eventually fail", + returns: []error{errConflict, errConflict, errAny}, + want: errAny, + wantAttempts: 3, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + attempts := 0 + got := RetryUpdateConflicts(func(i int) error { + attempts++ + return test.returns[i] + }) + + if got != test.want { + t.Errorf("RetryUpdateConflicts() = %v, want %v", got, test.want) + } + if attempts != test.wantAttempts { + t.Errorf("attempts = %d, want %d", attempts, test.wantAttempts) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/doc.go new file mode 100644 index 0000000000..16f5ba4860 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Revision controller depends. +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/observability.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/observability.go new file mode 100644 index 0000000000..5be683a13e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/observability.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store.go new file mode 100644 index 0000000000..750bb22fcf --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/serving/pkg/apis/config" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/logging" + pkgmetrics "knative.dev/pkg/metrics" + pkgtracing "knative.dev/pkg/tracing/config" + deployment "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" +) + +type cfgKey struct{} + +// +k8s:deepcopy-gen=false +type Config struct { + Deployment *deployment.Config + Network *network.Config + Observability *metrics.ObservabilityConfig + Logging *logging.Config + Tracing *pkgtracing.Config + Defaults *config.Defaults +} + +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a new store of Configs and optionally calls functions when ConfigMaps are updated for Revisions +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "revision", + logger, + configmap.Constructors{ + deployment.ConfigName: deployment.NewConfigFromConfigMap, + network.ConfigName: network.NewConfigFromConfigMap, + pkgmetrics.ConfigMapName(): metrics.NewObservabilityConfigFromConfigMap, + logging.ConfigMapName(): logging.NewConfigFromConfigMap, + pkgtracing.ConfigName: pkgtracing.NewTracingConfigFromConfigMap, + config.DefaultsConfigName: config.NewDefaultsConfigFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +func (s *Store) Load() *Config { + + return &Config{ + Deployment: s.UntypedLoad(deployment.ConfigName).(*deployment.Config).DeepCopy(), + Network: s.UntypedLoad(network.ConfigName).(*network.Config).DeepCopy(), + Observability: s.UntypedLoad(pkgmetrics.ConfigMapName()).(*metrics.ObservabilityConfig).DeepCopy(), + Logging: s.UntypedLoad((logging.ConfigMapName())).(*logging.Config).DeepCopy(), + Tracing: s.UntypedLoad(pkgtracing.ConfigName).(*pkgtracing.Config).DeepCopy(), + Defaults: s.UntypedLoad(config.DefaultsConfigName).(*config.Defaults).DeepCopy(), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store_test.go new file mode 100644 index 0000000000..65410f8b8c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/store_test.go @@ -0,0 +1,133 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "knative.dev/pkg/logging" + logtesting "knative.dev/pkg/logging/testing" + pkgmetrics "knative.dev/pkg/metrics" + pkgtracing "knative.dev/pkg/tracing/config" + apisconfig "knative.dev/serving/pkg/apis/config" + deployment "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + + . "knative.dev/pkg/configmap/testing" +) + +func TestStoreLoadWithContext(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + deploymentConfig := ConfigMapFromTestFile(t, deployment.ConfigName, deployment.QueueSidecarImageKey) + networkConfig := ConfigMapFromTestFile(t, network.ConfigName) + observabilityConfig := ConfigMapFromTestFile(t, pkgmetrics.ConfigMapName()) + loggingConfig := ConfigMapFromTestFile(t, logging.ConfigMapName()) + tracingConfig := ConfigMapFromTestFile(t, pkgtracing.ConfigName) + defaultConfig := ConfigMapFromTestFile(t, apisconfig.DefaultsConfigName) + + store.OnConfigChanged(deploymentConfig) + store.OnConfigChanged(networkConfig) + store.OnConfigChanged(observabilityConfig) + store.OnConfigChanged(loggingConfig) + store.OnConfigChanged(tracingConfig) + store.OnConfigChanged(defaultConfig) + + config := FromContext(store.ToContext(context.Background())) + + t.Run("Deployment", func(t *testing.T) { + expected, _ := deployment.NewConfigFromConfigMap(deploymentConfig) + if diff := cmp.Diff(expected, config.Deployment); diff != "" { + t.Errorf("Unexpected deployment (-want, +got): %v", diff) + } + }) + + t.Run("network", func(t *testing.T) { + expected, _ := network.NewConfigFromConfigMap(networkConfig) + ignoreDT := cmpopts.IgnoreFields(network.Config{}, "DomainTemplate") + + if diff := cmp.Diff(expected, config.Network, ignoreDT); diff != "" { + t.Errorf("Unexpected controller config (-want, +got): %v", diff) + } + }) + + t.Run("observability", func(t *testing.T) { + expected, _ := metrics.NewObservabilityConfigFromConfigMap(observabilityConfig) + if diff := cmp.Diff(expected, config.Observability); diff != "" { + t.Errorf("Unexpected observability config (-want, +got): %v", diff) + } + }) + + t.Run("logging", func(t *testing.T) { + expected, _ := logging.NewConfigFromConfigMap(loggingConfig) + if diff := cmp.Diff(expected, config.Logging); diff != "" { + t.Errorf("Unexpected logging config (-want, +got): %v", diff) + } + }) + + t.Run("tracing", func(t *testing.T) { + expected, _ := pkgtracing.NewTracingConfigFromConfigMap(tracingConfig) + if diff := cmp.Diff(expected, config.Tracing); diff != "" { + t.Errorf("Unexpected tracing config (-want, +got): %v", diff) + } + }) + + t.Run("defaults", func(t *testing.T) { + expected, _ := apisconfig.NewDefaultsConfigFromConfigMap(defaultConfig) + if diff := cmp.Diff(expected, config.Defaults); diff != "" { + t.Errorf("Unexpected defaults config (-want, +got): %v", diff) + } + }) +} + +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + + store.OnConfigChanged(ConfigMapFromTestFile(t, deployment.ConfigName, deployment.QueueSidecarImageKey)) + store.OnConfigChanged(ConfigMapFromTestFile(t, network.ConfigName)) + store.OnConfigChanged(ConfigMapFromTestFile(t, pkgmetrics.ConfigMapName())) + store.OnConfigChanged(ConfigMapFromTestFile(t, logging.ConfigMapName())) + store.OnConfigChanged(ConfigMapFromTestFile(t, pkgtracing.ConfigName)) + store.OnConfigChanged(ConfigMapFromTestFile(t, apisconfig.DefaultsConfigName)) + + config := store.Load() + + config.Deployment.QueueSidecarImage = "mutated" + config.Network.IstioOutboundIPRanges = "mutated" + config.Logging.LoggingConfig = "mutated" + ccMutated := int64(4) + config.Defaults.ContainerConcurrency = ccMutated + + newConfig := store.Load() + + if newConfig.Deployment.QueueSidecarImage == "mutated" { + t.Error("Controller config is not immutable") + } + if newConfig.Network.IstioOutboundIPRanges == "mutated" { + t.Error("Network config is not immutable") + } + if newConfig.Logging.LoggingConfig == "mutated" { + t.Error("Logging config is not immutable") + } + if newConfig.Defaults.ContainerConcurrency == ccMutated { + t.Error("Defaults config is not immutable") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml new file mode 120000 index 0000000000..4ed318ac46 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-autoscaler.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/autoscaler.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-defaults.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-defaults.yaml new file mode 120000 index 0000000000..f10641f859 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-defaults.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/defaults.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml new file mode 120000 index 0000000000..fa9039a1c3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-deployment.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/deployment.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml new file mode 120000 index 0000000000..a76506aeb2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-logging.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/logging.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-network.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-network.yaml new file mode 120000 index 0000000000..56cb332a04 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-network.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/network.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml new file mode 120000 index 0000000000..78663f4745 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-observability.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/observability.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-tracing.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-tracing.yaml new file mode 100644 index 0000000000..b49e7db451 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/testdata/config-tracing.yaml @@ -0,0 +1,52 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-tracing + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # + # If true we enable adding spans within our applications. + enable: "false" + + # URL to zipkin collector where traces are sent. + zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" + + # Enable zipkin debug mode. This allows all spans to be sent to the server + # bypassing sampling. + debug: "false" + + # Percentage (0-1) of requests to trace + sample-rate: "0.1" + diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..e1176acb84 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/config/zz_generated.deepcopy.go @@ -0,0 +1,21 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go new file mode 100644 index 0000000000..39f1d3d723 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go @@ -0,0 +1,124 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "net/http" + + imageinformer "knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image" + kubeclient "knative.dev/pkg/client/injection/kube/client" + deploymentinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" + configmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap" + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + painformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + apisconfig "knative.dev/serving/pkg/apis/config" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/revision/config" +) + +const ( + controllerAgentName = "revision-controller" +) + +// NewController initializes the controller and is called by the generated code +// Registers eventhandlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + transport := http.DefaultTransport + if rt, err := newResolverTransport(k8sCertPath); err != nil { + logging.FromContext(ctx).Errorf("Failed to create resolver transport: %v", err) + } else { + transport = rt + } + + deploymentInformer := deploymentinformer.Get(ctx) + serviceInformer := serviceinformer.Get(ctx) + configMapInformer := configmapinformer.Get(ctx) + imageInformer := imageinformer.Get(ctx) + revisionInformer := revisioninformer.Get(ctx) + paInformer := painformer.Get(ctx) + + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + revisionLister: revisionInformer.Lister(), + podAutoscalerLister: paInformer.Lister(), + imageLister: imageInformer.Lister(), + deploymentLister: deploymentInformer.Lister(), + serviceLister: serviceInformer.Lister(), + configMapLister: configMapInformer.Lister(), + resolver: &digestResolver{ + client: kubeclient.Get(ctx), + transport: transport, + }, + } + impl := controller.NewImpl(c, c.Logger, "Revisions") + + // Set up an event handler for when the resource types of interest change + c.Logger.Info("Setting up event handlers") + revisionInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Revision")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + paInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Revision")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + // We don't watch for changes to Image because we don't incorporate any of its + // properties into our own status and should work completely in the absence of + // a functioning Image controller. + + configMapInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Revision")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + configsToResync := []interface{}{ + &network.Config{}, + &metrics.ObservabilityConfig{}, + &deployment.Config{}, + &apisconfig.Defaults{}, + } + + resync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + // Triggers syncs on all revisions when configuration + // changes + impl.GlobalResync(revisionInformer.Informer()) + }) + + configStore := config.NewStore(c.Logger.Named("config-store"), resync) + configStore.WatchConfigs(c.ConfigMapWatcher) + c.configStore = configStore + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go new file mode 100644 index 0000000000..55b8834f62 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/equality" + caching "knative.dev/caching/pkg/apis/caching/v1alpha1" + "knative.dev/pkg/kmp" + "knative.dev/pkg/logging" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/revision/config" + "knative.dev/serving/pkg/reconciler/revision/resources" + presources "knative.dev/serving/pkg/resources" +) + +func (c *Reconciler) createDeployment(ctx context.Context, rev *v1alpha1.Revision) (*appsv1.Deployment, error) { + cfgs := config.FromContext(ctx) + + deployment, err := resources.MakeDeployment( + rev, + cfgs.Logging, + cfgs.Tracing, + cfgs.Network, + cfgs.Observability, + cfgs.Deployment, + ) + + if err != nil { + return nil, fmt.Errorf("failed to make deployment: %w", err) + } + + return c.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Create(deployment) +} + +func (c *Reconciler) checkAndUpdateDeployment(ctx context.Context, rev *v1alpha1.Revision, have *appsv1.Deployment) (*appsv1.Deployment, error) { + logger := logging.FromContext(ctx) + cfgs := config.FromContext(ctx) + + deployment, err := resources.MakeDeployment( + rev, + cfgs.Logging, + cfgs.Tracing, + cfgs.Network, + cfgs.Observability, + cfgs.Deployment, + ) + + if err != nil { + return nil, fmt.Errorf("failed to update deployment: %w", err) + } + + // Preserve the current scale of the Deployment. + deployment.Spec.Replicas = have.Spec.Replicas + + // Preserve the label selector since it's immutable. + // TODO(dprotaso): determine other immutable properties. + deployment.Spec.Selector = have.Spec.Selector + + // If the spec we want is the spec we have, then we're good. + if equality.Semantic.DeepEqual(have.Spec, deployment.Spec) { + return have, nil + } + + // Otherwise attempt an update (with ONLY the spec changes). + desiredDeployment := have.DeepCopy() + desiredDeployment.Spec = deployment.Spec + + // Carry over new labels. + desiredDeployment.Labels = presources.UnionMaps(deployment.Labels, desiredDeployment.Labels) + + d, err := c.KubeClientSet.AppsV1().Deployments(deployment.Namespace).Update(desiredDeployment) + if err != nil { + return nil, err + } + + // If what comes back from the update (with defaults applied by the API server) is the same + // as what we have then nothing changed. + if equality.Semantic.DeepEqual(have.Spec, d.Spec) { + return d, nil + } + diff, err := kmp.SafeDiff(have.Spec, d.Spec) + if err != nil { + return nil, err + } + + // If what comes back has a different spec, then signal the change. + logger.Infof("Reconciled deployment diff (-desired, +observed): %v", diff) + return d, nil +} + +func (c *Reconciler) createImageCache(ctx context.Context, rev *v1alpha1.Revision) (*caching.Image, error) { + image := resources.MakeImageCache(rev) + + return c.CachingClientSet.CachingV1alpha1().Images(image.Namespace).Create(image) +} + +func (c *Reconciler) createPA(ctx context.Context, rev *v1alpha1.Revision) (*av1alpha1.PodAutoscaler, error) { + pa := resources.MakePA(rev) + + return c.ServingClientSet.AutoscalingV1alpha1().PodAutoscalers(pa.Namespace).Create(pa) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/queueing_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/queueing_test.go new file mode 100644 index 0000000000..6218810c97 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/queueing_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "testing" + "time" + + "knative.dev/serving/pkg/apis/config" + + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "golang.org/x/sync/errgroup" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/metrics" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + "knative.dev/pkg/tracing" + tracingconfig "knative.dev/pkg/tracing/config" + tracetesting "knative.dev/pkg/tracing/testing" + autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/network" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + + . "knative.dev/pkg/reconciler/testing" +) + +type nopResolver struct{} + +func (r *nopResolver) Resolve(_ string, _ k8schain.Options, _ sets.String) (string, error) { + return "", nil +} + +const ( + testAutoscalerImage = "autoscalerImage" + testNamespace = "test" + testQueueImage = "queueImage" +) + +func testRevision() *v1alpha1.Revision { + rev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/revisions/test-rev", + Name: "test-rev", + Namespace: testNamespace, + Labels: map[string]string{ + "testLabel1": "foo", + "testLabel2": "bar", + serving.RouteLabelKey: "test-route", + }, + Annotations: map[string]string{ + "testAnnotation": "test", + }, + UID: "test-rev-uid", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + // corev1.Container has a lot of setting. We try to pass many + // of them here to verify that we pass through the settings to + // derived objects. + Containers: []corev1.Container{{ + Image: "gcr.io/repo/image", + Command: []string{"echo"}, + Args: []string{"hello", "world"}, + WorkingDir: "/tmp", + Env: []corev1.EnvVar{{ + Name: "EDITOR", + Value: "emacs", + }}, + LivenessProbe: &corev1.Probe{ + TimeoutSeconds: 42, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "health", + }, + }, + TimeoutSeconds: 43, + }, + TerminationMessagePath: "/dev/null", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + }, + }, + } + rev.SetDefaults(context.Background()) + return rev +} + +func getTestDeploymentConfig() *deployment.Config { + c, _ := deployment.NewConfigFromConfigMap(getTestDeploymentConfigMap()) + // ignoring error as test controller is generated + return c +} + +func getTestDeploymentConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployment.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "queueSidecarImage": testQueueImage, + "autoscalerImage": testAutoscalerImage, + }, + } +} + +func getTestDefaultsConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DefaultsConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "container-name-template": "user-container", + }, + } +} + +func newTestController(t *testing.T) ( + context.Context, + context.CancelFunc, + []controller.Informer, + *controller.Impl, + *configmap.ManualWatcher) { + + ctx, cancel, informers := SetupFakeContextWithCancel(t) + configMapWatcher := &configmap.ManualWatcher{Namespace: system.Namespace()} + controller := NewController(ctx, configMapWatcher) + + controller.Reconciler.(*Reconciler).resolver = &nopResolver{} + + configs := []*corev1.ConfigMap{ + getTestDeploymentConfigMap(), + getTestDefaultsConfigMap(), + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: network.ConfigName, + }}, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: logging.ConfigMapName(), + }, + Data: map[string]string{ + "zap-logger-config": "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}", + "loglevel.queueproxy": "info", + }}, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: tracingconfig.ConfigName, + }, + Data: map[string]string{ + "enable": "true", + "debug": "true", + "zipkin-endpoint": "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "true", + }}, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: autoscaler.ConfigName, + }, + Data: map[string]string{ + "max-scale-up-rate": "2.0", + "container-concurrency-target-percentage": "0.5", + "container-concurrency-target-default": "10.0", + "stable-window": "5m", + "panic-window": "10s", + "scale-to-zero-threshold": "10m", + "tick-interval": "2s", + }}, + } + for _, configMap := range configs { + configMapWatcher.OnChange(configMap) + } + + return ctx, cancel, informers, controller, configMapWatcher +} + +func TestNewRevisionCallsSyncHandler(t *testing.T) { + ctx, cancel, informers, ctrl, _ := newTestController(t) + // Create tracer with reporter recorder + reporter, co := tracetesting.FakeZipkinExporter() + defer reporter.Close() + oct := tracing.NewOpenCensusTracer(co) + defer oct.Finish() + + cfg := tracingconfig.Config{ + Backend: tracingconfig.Zipkin, + Debug: true, + } + if err := oct.ApplyConfig(&cfg); err != nil { + t.Errorf("Failed to apply tracer config: %v", err) + } + + eg := errgroup.Group{} + + rev := testRevision() + servingClient := fakeservingclient.Get(ctx) + + h := NewHooks() + + // Check for a service created as a signal that syncHandler ran + h.OnCreate(&servingClient.Fake, "podautoscalers", func(obj runtime.Object) HookResult { + pa := obj.(*autoscalingv1alpha1.PodAutoscaler) + t.Logf("PA created: %s", pa.Name) + return HookComplete + }) + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Error starting informers: %v", err) + } + defer func() { + cancel() + if err := eg.Wait(); err != nil { + t.Fatalf("Error running controller: %v", err) + } + waitInformers() + }() + + eg.Go(func() error { + return ctrl.Run(2, ctx.Done()) + }) + + if _, err := servingClient.ServingV1alpha1().Revisions(rev.Namespace).Create(rev); err != nil { + t.Fatalf("Error creating revision: %v", err) + } + + if err := h.WaitForHooks(time.Second * 3); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/reconcile_resources.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/reconcile_resources.go new file mode 100644 index 0000000000..8c53eb8099 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/reconcile_resources.go @@ -0,0 +1,184 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "fmt" + + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/revision/resources" + resourcenames "knative.dev/serving/pkg/reconciler/revision/resources/names" +) + +func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1alpha1.Revision) error { + ns := rev.Namespace + deploymentName := resourcenames.Deployment(rev) + logger := logging.FromContext(ctx).With(zap.String(logkey.Deployment, deploymentName)) + + deployment, err := c.deploymentLister.Deployments(ns).Get(deploymentName) + if apierrs.IsNotFound(err) { + // Deployment does not exist. Create it. + rev.Status.MarkResourcesAvailableUnknown(v1alpha1.Deploying, "") + rev.Status.MarkContainerHealthyUnknown(v1alpha1.Deploying, "") + deployment, err = c.createDeployment(ctx, rev) + if err != nil { + return fmt.Errorf("failed to create deployment %q: %w", deploymentName, err) + } + logger.Infof("Created deployment %q", deploymentName) + } else if err != nil { + return fmt.Errorf("failed to get deployment %q: %w", deploymentName, err) + } else if !metav1.IsControlledBy(deployment, rev) { + // Surface an error in the revision's status, and return an error. + rev.Status.MarkResourcesAvailableFalse(v1alpha1.NotOwned, v1alpha1.ResourceNotOwnedMessage("Deployment", deploymentName)) + return fmt.Errorf("revision: %q does not own Deployment: %q", rev.Name, deploymentName) + } else { + // The deployment exists, but make sure that it has the shape that we expect. + deployment, err = c.checkAndUpdateDeployment(ctx, rev, deployment) + if err != nil { + return fmt.Errorf("failed to update deployment %q: %w", deploymentName, err) + } + + // Now that we have a Deployment, determine whether there is any relevant + // status to surface in the Revision. + // + // TODO(jonjohnsonjr): Should we check Generation != ObservedGeneration? + // The autoscaler mutates the deployment pretty often, which would cause us + // to flip back and forth between Ready and Unknown every time we scale up + // or down. + if !rev.Status.IsActivationRequired() { + rev.Status.PropagateDeploymentStatus(&deployment.Status) + } + } + + // If a container keeps crashing (no active pods in the deployment although we want some) + if *deployment.Spec.Replicas > 0 && deployment.Status.AvailableReplicas == 0 { + pods, err := c.KubeClientSet.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector)}) + if err != nil { + logger.Errorw("Error getting pods", zap.Error(err)) + } else if len(pods.Items) > 0 { + // Arbitrarily grab the very first pod, as they all should be crashing + pod := pods.Items[0] + + // Update the revision status if pod cannot be scheduled(possibly resource constraints) + // If pod cannot be scheduled then we expect the container status to be empty. + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodScheduled && cond.Status == corev1.ConditionFalse { + rev.Status.MarkResourcesAvailableFalse(cond.Reason, cond.Message) + break + } + } + + for _, status := range pod.Status.ContainerStatuses { + if status.Name == rev.Spec.GetContainer().Name { + if t := status.LastTerminationState.Terminated; t != nil { + logger.Infof("%s marking exiting with: %d/%s", rev.Name, t.ExitCode, t.Message) + rev.Status.MarkContainerHealthyFalse(v1alpha1.ExitCodeReason(t.ExitCode), v1alpha1.RevisionContainerExitingMessage(t.Message)) + } else if w := status.State.Waiting; w != nil && hasDeploymentTimedOut(deployment) { + logger.Infof("%s marking resources unavailable with: %s: %s", rev.Name, w.Reason, w.Message) + rev.Status.MarkResourcesAvailableFalse(w.Reason, w.Message) + } + break + } + } + } + } + + return nil +} + +func (c *Reconciler) reconcileImageCache(ctx context.Context, rev *v1alpha1.Revision) error { + logger := logging.FromContext(ctx) + + ns := rev.Namespace + imageName := resourcenames.ImageCache(rev) + _, err := c.imageLister.Images(ns).Get(imageName) + if apierrs.IsNotFound(err) { + _, err := c.createImageCache(ctx, rev) + if err != nil { + return fmt.Errorf("failed to create image cache %q: %w", imageName, err) + } + logger.Infof("Created image cache %q", imageName) + } else if err != nil { + return fmt.Errorf("failed to get image cache %q: %w", imageName, err) + } + + return nil +} + +func (c *Reconciler) reconcilePA(ctx context.Context, rev *v1alpha1.Revision) error { + ns := rev.Namespace + paName := resourcenames.PA(rev) + logger := logging.FromContext(ctx) + logger.Info("Reconciling PA: ", paName) + + pa, err := c.podAutoscalerLister.PodAutoscalers(ns).Get(paName) + if apierrs.IsNotFound(err) { + // PA does not exist. Create it. + pa, err = c.createPA(ctx, rev) + if err != nil { + return fmt.Errorf("failed to create PA %q: %w", paName, err) + } + logger.Info("Created PA: ", paName) + } else if err != nil { + return fmt.Errorf("failed to get PA %q: %w", paName, err) + } else if !metav1.IsControlledBy(pa, rev) { + // Surface an error in the revision's status, and return an error. + rev.Status.MarkResourcesAvailableFalse(v1alpha1.NotOwned, v1alpha1.ResourceNotOwnedMessage("PodAutoscaler", paName)) + return fmt.Errorf("revision: %q does not own PodAutoscaler: %q", rev.Name, paName) + } + + // Perhaps tha PA spec changed underneath ourselves? + // We no longer require immutability, so need to reconcile PA each time. + tmpl := resources.MakePA(rev) + if !equality.Semantic.DeepEqual(tmpl.Spec, pa.Spec) { + logger.Infof("PA %s needs reconciliation", pa.Name) + + want := pa.DeepCopy() + want.Spec = tmpl.Spec + if pa, err = c.ServingClientSet.AutoscalingV1alpha1().PodAutoscalers(pa.Namespace).Update(want); err != nil { + return fmt.Errorf("failed to update PA %q: %w", paName, err) + } + } + + rev.Status.PropagateAutoscalerStatus(&pa.Status) + return nil +} + +func hasDeploymentTimedOut(deployment *appsv1.Deployment) bool { + // as per https://kubernetes.io/docs/concepts/workloads/controllers/deployment + for _, cond := range deployment.Status.Conditions { + // Look for Deployment with status False + if cond.Status != corev1.ConditionFalse { + continue + } + // with Type Progressing and Reason Timeout + // TODO(arvtiwar): hard coding "ProgressDeadlineExceeded" to avoid import kubernetes/kubernetes + if cond.Type == appsv1.DeploymentProgressing && cond.Reason == "ProgressDeadlineExceeded" { + return true + } + } + return false +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go new file mode 100644 index 0000000000..140a70119a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go @@ -0,0 +1,136 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "runtime" + "time" + + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes" +) + +type digestResolver struct { + client kubernetes.Interface + transport http.RoundTripper +} + +const ( + // Kubernetes CA certificate bundle is mounted into the pod here, see: + // https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/#trusting-tls-in-a-cluster + k8sCertPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +) + +// newResolverTransport returns an http.Transport that appends the certs bundle +// at path to the system cert pool. +// +// Use this with k8sCertPath to trust the same certs as the cluster. +func newResolverTransport(path string) (*http.Transport, error) { + pool, err := x509.SystemCertPool() + if err != nil { + pool = x509.NewCertPool() + } + + if crt, err := ioutil.ReadFile(path); err != nil { + return nil, err + } else if ok := pool.AppendCertsFromPEM(crt); !ok { + return nil, errors.New("failed to append k8s cert bundle to cert pool") + } + + // Copied from https://github.com/golang/go/blob/release-branch.go1.12/src/net/http/transport.go#L42-L53 + // We want to use the DefaultTransport but change its TLSClientConfig. There + // isn't a clean way to do this yet: https://github.com/golang/go/issues/26013 + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ResponseHeaderTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + // Use the cert pool with k8s cert bundle appended. + TLSClientConfig: &tls.Config{ + RootCAs: pool, + }, + }, nil +} + +// Resolve resolves the image references that use tags to digests. +func (r *digestResolver) Resolve( + image string, + opt k8schain.Options, + registriesToSkip sets.String) (string, error) { + kc, err := k8schain.New(r.client, opt) + if err != nil { + return "", fmt.Errorf("failed to initialize authentication: %w", err) + } + + if _, err := name.NewDigest(image, name.WeakValidation); err == nil { + // Already a digest + return image, nil + } + + tag, err := name.NewTag(image, name.WeakValidation) + if err != nil { + return "", fmt.Errorf("failed to parse image name %q into a tag: %w", image, err) + } + + if registriesToSkip.Has(tag.Registry.RegistryStr()) { + return "", nil + } + platform := v1.Platform{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + desc, err := remote.Get(tag, remote.WithTransport(r.transport), remote.WithAuthFromKeychain(kc), remote.WithPlatform(platform)) + if err != nil { + return "", fmt.Errorf("failed to fetch image information: %w", err) + } + + // TODO(#3997): Use remote.Get to resolve manifest lists to digests as well + // once CRI-O is fixed: https://github.com/cri-o/cri-o/issues/2157 + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + img, err := desc.Image() + if err != nil { + return "", fmt.Errorf("failed to get image reference: %w", err) + } + dgst, err := img.Digest() + if err != nil { + return "", fmt.Errorf("failed to get image digest: %w", err) + } + return fmt.Sprintf("%s@%s", tag.Repository.String(), dgst), nil + default: + return fmt.Sprintf("%s@%s", tag.Repository.String(), desc.Digest), nil + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve_test.go new file mode 100644 index 0000000000..0a5f4d156f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resolve_test.go @@ -0,0 +1,514 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/random" + "github.com/google/go-containerregistry/pkg/v1/types" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + fakeclient "k8s.io/client-go/kubernetes/fake" +) + +var emptyRegistrySet = sets.NewString() + +type digestible interface { + Digest() (v1.Hash, error) +} + +func mustDigest(t *testing.T, d digestible) v1.Hash { + h, err := d.Digest() + if err != nil { + t.Fatalf("Digest() = %v", err) + } + return h +} + +func mustRawManifest(t *testing.T, img partial.WithRawManifest) []byte { + m, err := img.RawManifest() + if err != nil { + t.Fatalf("RawManifest() = %v", err) + } + return m +} + +func fakeRegistry(t *testing.T, repo, username, password string, img v1.Image, idx v1.ImageIndex) *httptest.Server { + indexPath := fmt.Sprintf("/v2/%s/manifests/latest", repo) + imagePath := fmt.Sprintf("/v2/%s/manifests/%s", repo, mustDigest(t, img)) + schema1Path := fmt.Sprintf("/v2/%s/manifests/schema1", repo) + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/": + // Issue a "Basic" auth challenge, so we can check the auth sent to the registry. + w.Header().Set("WWW-Authenticate", `Basic `) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + case indexPath: + // Check that we get an auth header with base64 encoded username:password + hdr := r.Header.Get("Authorization") + if !strings.HasPrefix(hdr, "Basic ") { + t.Errorf("Header.Get(Authorization); got %v, want Basic prefix", hdr) + } + if want := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)); !strings.HasSuffix(hdr, want) { + t.Errorf("Header.Get(Authorization); got %v, want suffix %v", hdr, want) + } + if r.Method != http.MethodGet { + t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) + } + w.Header().Set("Content-Type", string(types.OCIImageIndex)) + w.Write(mustRawManifest(t, idx)) + case imagePath: + w.Write(mustRawManifest(t, img)) + case schema1Path: + w.Header().Set("Content-Type", string(types.DockerManifestSchema1Signed)) + w.Header().Set("Docker-Content-Digest", mustDigest(t, idx).String()) + w.Write([]byte("{}")) + default: + t.Fatalf("Unexpected path: %v", r.URL.Path) + } + })) +} + +func fakeRegistryPingFailure(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/": + http.Error(w, "Oops", http.StatusInternalServerError) + default: + t.Fatalf("Unexpected path: %v", r.URL.Path) + } + })) +} + +func fakeRegistryManifestFailure(t *testing.T, repo string) *httptest.Server { + manifestPath := fmt.Sprintf("/v2/%s/manifests/latest", repo) + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/": + // Issue a "Basic" auth challenge, so we can check the auth sent to the registry. + w.Header().Set("WWW-Authenticate", `Basic `) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + case manifestPath: + http.Error(w, "Boom", http.StatusInternalServerError) + default: + t.Fatalf("Unexpected path: %v", r.URL.Path) + } + })) +} + +func TestResolve(t *testing.T) { + username, password := "foo", "bar" + ns, svcacct := "user-project", "user-robot" + + idx, err := random.Index(1, 3, 1024) + if err != nil { + t.Fatalf("random.Index() = %v", err) + } + manifest, err := idx.IndexManifest() + if err != nil { + t.Fatalf("idx.IndexManifest() = %v", err) + } + img, err := idx.Image(manifest.Manifests[0].Digest) + if err != nil { + t.Fatalf("idx.Image(%v) = %v", manifest.Manifests[0].Digest, err) + } + // Make sure we resolve this child no matter which platform this test is being run on. + manifest.Manifests[0].Platform = &v1.Platform{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + + // Stand up a fake registry + expectedRepo := "booger/nose" + server := fakeRegistry(t, expectedRepo, username, password, img, idx) + defer server.Close() + u, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("url.Parse(%v) = %v", server.URL, err) + } + + for ref, dgst := range map[string]v1.Hash{ + "latest": mustDigest(t, img), + // This is a bit silly, but we are just going to pretend that the + // registry has a schema1 manifest with the same digest as our index. + "schema1": mustDigest(t, idx), + } { + // Create a tag pointing to an image on our fake registry + tag, err := name.NewTag(fmt.Sprintf("%s/%s:%s", u.Host, expectedRepo, ref), name.WeakValidation) + if err != nil { + t.Fatalf("NewTag() = %v", err) + } + + // Set up a fake service account with pull secrets for our fake registry + client := fakeclient.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcacct, + Namespace: ns, + }, + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "secret", + }}, + }, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: ns, + }, + Type: corev1.SecretTypeDockercfg, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte( + fmt.Sprintf(`{%q: {"username": %q, "password": %q}}`, + tag.RegistryStr(), username, password), + ), + }, + }) + + // Resolve our tag on the fake registry to the digest of the random.Image() + dr := &digestResolver{client: client, transport: http.DefaultTransport} + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + resolvedDigest, err := dr.Resolve(tag.String(), opt, emptyRegistrySet) + if err != nil { + t.Fatalf("Resolve() = %v", err) + } + + // Make sure that we get back the appropriate digest. + digest, err := name.NewDigest(resolvedDigest, name.WeakValidation) + if err != nil { + t.Fatalf("NewDigest() = %v", err) + } + if got, want := digest.DigestStr(), dgst.String(); got != want { + t.Fatalf("Resolve() = %v, want %v", got, want) + } + } +} + +func TestResolveWithDigest(t *testing.T) { + ns, svcacct := "foo", "default" + client := fakeclient.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: ns, + }, + }) + originalDigest := "ubuntu@sha256:e7def0d56013d50204d73bb588d99e0baa7d69ea1bc1157549b898eb67287612" + dr := &digestResolver{client: client, transport: http.DefaultTransport} + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + resolvedDigest, err := dr.Resolve(originalDigest, opt, emptyRegistrySet) + if err != nil { + t.Fatalf("Resolve() = %v", err) + } + + if diff := cmp.Diff(originalDigest, resolvedDigest); diff != "" { + t.Errorf("Digest should not change (-want +got): %s", diff) + } +} + +func TestResolveWithBadTag(t *testing.T) { + ns, svcacct := "foo", "default" + client := fakeclient.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: ns, + }, + }) + dr := &digestResolver{client: client, transport: http.DefaultTransport} + + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + + // Invalid character + invalidImage := "ubuntu%latest" + if resolvedDigest, err := dr.Resolve(invalidImage, opt, emptyRegistrySet); err == nil { + t.Fatalf("Resolve() = %v, want error", resolvedDigest) + } +} + +func TestResolveWithPingFailure(t *testing.T) { + ns, svcacct := "user-project", "user-robot" + + // Stand up a fake registry + expectedRepo := "booger/nose" + server := fakeRegistryPingFailure(t) + defer server.Close() + u, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("url.Parse(%v) = %v", server.URL, err) + } + + // Create a tag pointing to an image on our fake registry + tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo), name.WeakValidation) + if err != nil { + t.Fatalf("NewTag() = %v", err) + } + + // Set up a fake service account with pull secrets for our fake registry + client := fakeclient.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcacct, + Namespace: ns, + }, + }) + + // Resolve our tag on the fake registry to the digest of the random.Image() + dr := &digestResolver{client: client, transport: http.DefaultTransport} + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + if resolvedDigest, err := dr.Resolve(tag.String(), opt, emptyRegistrySet); err == nil { + t.Fatalf("Resolve() = %v, want error", resolvedDigest) + } +} + +func TestResolveWithManifestFailure(t *testing.T) { + ns, svcacct := "user-project", "user-robot" + + // Stand up a fake registry + expectedRepo := "booger/nose" + server := fakeRegistryManifestFailure(t, expectedRepo) + defer server.Close() + u, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("url.Parse(%v) = %v", server.URL, err) + } + + // Create a tag pointing to an image on our fake registry + tag, err := name.NewTag(fmt.Sprintf("%s/%s:latest", u.Host, expectedRepo), name.WeakValidation) + if err != nil { + t.Fatalf("NewTag() = %v", err) + } + + // Set up a fake service account with pull secrets for our fake registry + client := fakeclient.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcacct, + Namespace: ns, + }, + }) + + // Resolve our tag on the fake registry to the digest of the random.Image() + dr := &digestResolver{client: client, transport: http.DefaultTransport} + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + if resolvedDigest, err := dr.Resolve(tag.String(), opt, emptyRegistrySet); err == nil { + t.Fatalf("Resolve() = %v, want error", resolvedDigest) + } +} + +func TestResolveNoAccess(t *testing.T) { + ns, svcacct := "foo", "default" + client := fakeclient.NewSimpleClientset() + dr := &digestResolver{client: client, transport: http.DefaultTransport} + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + // If there is a failure accessing the ServiceAccount for this Pod, then we should see an error. + if resolvedDigest, err := dr.Resolve("ubuntu:latest", opt, emptyRegistrySet); err == nil { + t.Fatalf("Resolve() = %v, want error", resolvedDigest) + } +} + +func TestResolveSkippingRegistry(t *testing.T) { + username, password := "foo", "bar" + ns, svcacct := "user-project", "user-robot" + + // Set up a fake service account with pull secrets for our fake registry + client := fakeclient.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcacct, + Namespace: ns, + }, + ImagePullSecrets: []corev1.LocalObjectReference{{ + Name: "secret", + }}, + }, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: ns, + }, + Type: corev1.SecretTypeDockercfg, + Data: map[string][]byte{ + corev1.DockerConfigKey: []byte( + fmt.Sprintf(`{%q: {"username": %q, "password": %q}}`, + "localhost:5000", username, password), + ), + }, + }) + dr := &digestResolver{ + client: client, + transport: http.DefaultTransport, + } + + registriesToSkip := sets.NewString("localhost:5000") + + opt := k8schain.Options{ + Namespace: ns, + ServiceAccountName: svcacct, + } + + resolvedDigest, err := dr.Resolve("localhost:5000/ubuntu:latest", opt, registriesToSkip) + if err != nil { + t.Fatalf("Resolve() = %v", err) + } + + if got, want := resolvedDigest, ""; got != want { + t.Fatalf("Resolve() got %q want of %q", got, want) + } +} + +func TestNewResolverTransport(t *testing.T) { + // Cert stolen from crypto/x509/example_test.go + const certPEM = ` +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIIE31FZVaPXTUwDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE +BhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMTHEdvb2dsZSBJbnRl +cm5ldCBBdXRob3JpdHkgRzIwHhcNMTQwMTI5MTMyNzQzWhcNMTQwNTI5MDAwMDAw +WjBpMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN +TW91bnRhaW4gVmlldzETMBEGA1UECgwKR29vZ2xlIEluYzEYMBYGA1UEAwwPbWFp +bC5nb29nbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEfRrObuSW5T7q +5CnSEqefEmtH4CCv6+5EckuriNr1CjfVvqzwfAhopXkLrq45EQm8vkmf7W96XJhC +7ZM0dYi1/qOCAU8wggFLMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAa +BgNVHREEEzARgg9tYWlsLmdvb2dsZS5jb20wCwYDVR0PBAQDAgeAMGgGCCsGAQUF +BwEBBFwwWjArBggrBgEFBQcwAoYfaHR0cDovL3BraS5nb29nbGUuY29tL0dJQUcy +LmNydDArBggrBgEFBQcwAYYfaHR0cDovL2NsaWVudHMxLmdvb2dsZS5jb20vb2Nz +cDAdBgNVHQ4EFgQUiJxtimAuTfwb+aUtBn5UYKreKvMwDAYDVR0TAQH/BAIwADAf +BgNVHSMEGDAWgBRK3QYWG7z2aLV29YG2u2IaulqBLzAXBgNVHSAEEDAOMAwGCisG +AQQB1nkCBQEwMAYDVR0fBCkwJzAloCOgIYYfaHR0cDovL3BraS5nb29nbGUuY29t +L0dJQUcyLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAH6RYHxHdcGpMpFE3oxDoFnP+ +gtuBCHan2yE2GRbJ2Cw8Lw0MmuKqHlf9RSeYfd3BXeKkj1qO6TVKwCh+0HdZk283 +TZZyzmEOyclm3UGFYe82P/iDFt+CeQ3NpmBg+GoaVCuWAARJN/KfglbLyyYygcQq +0SgeDh8dRKUiaW3HQSoYvTvdTuqzwK4CXsr3b5/dAOY8uMuG/IAR3FgwTbZ1dtoW +RvOTa8hYiU6A475WuZKyEHcwnGYe57u2I2KbMgcKjPniocj4QzgYsVAVKW3IwaOh +yE+vPxsiUkvQHdO2fojCkY8jg70jxM+gu59tPDNbw3Uh/2Ij310FgTHsnGQMyA== +-----END CERTIFICATE-----` + + cases := []struct { + name string + + certBundle string + certBundleContents []byte + + wantErr bool + }{{ + name: "valid cert", + certBundle: "valid-cert.crt", + certBundleContents: []byte(certPEM), + wantErr: false, + }, { + // Fails with file not found for path. + name: "cert not found", + certBundle: "not-found.crt", + certBundleContents: nil, + wantErr: true, + }, { + // Fails with invalid cert for path. + name: "invalid cert", + certBundle: "invalid-cert.crt", + certBundleContents: []byte("this will not parse"), + wantErr: true, + }} + + tmpDir, err := ioutil.TempDir("", "TestNewResolverTransport-") + if err != nil { + t.Fatalf("failed to create tempdir for certs: %v", err) + } + defer os.RemoveAll(tmpDir) + + for i, tc := range cases { + i, tc := i, tc + t.Run(fmt.Sprintf("cases[%d]", i), func(t *testing.T) { + // Setup. + path, err := writeCertFile(tmpDir, tc.certBundle, tc.certBundleContents) + if err != nil { + t.Fatalf("Failed to write cert bundle file: %v", err) + } + + // The actual test. + if tr, err := newResolverTransport(path); err != nil && !tc.wantErr { + t.Errorf("Got unexpected err: %v", err) + } else if tc.wantErr && err == nil { + t.Error("Didn't get an error when we wanted it") + } else if err == nil { + // If we didn't get an error, make sure everything we wanted to happen happened. + subjects := tr.TLSClientConfig.RootCAs.Subjects() + + if !containsSubject(t, subjects, tc.certBundleContents) { + t.Error("cert pool does not contain certBundleContents") + } + } + }) + } +} + +func writeCertFile(dir, path string, contents []byte) (string, error) { + fp := filepath.Join(dir, path) + if contents != nil { + if err := ioutil.WriteFile(fp, contents, os.ModePerm); err != nil { + return "", err + } + } + return fp, nil +} + +func containsSubject(t *testing.T, subjects [][]byte, contents []byte) bool { + block, _ := pem.Decode([]byte(contents)) + if block == nil { + t.Fatal("failed to parse certificate PEM") + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("failed to parse certificate: %v", err) + } + + for _, b := range subjects { + if bytes.EqualFold(b, cert.RawSubject) { + return true + } + } + + return false +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/constants.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/constants.go new file mode 100644 index 0000000000..5fdf2bf9ab --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/constants.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // QueueContainerName is the name of the queue proxy side car + QueueContainerName = "queue-proxy" + + sidecarIstioInjectAnnotation = "sidecar.istio.io/inject" + // IstioOutboundIPRangeAnnotation defines the outbound ip ranges istio allows. + // TODO(mattmoor): Make this private once we remove revision_test.go + IstioOutboundIPRangeAnnotation = "traffic.sidecar.istio.io/includeOutboundIPRanges" + + // AppLabelKey is the label defining the application's name. + AppLabelKey = "app" + + // ProgressDeadlineSeconds is the time in seconds we wait for the deployment to + // be ready before considering it failed. + ProgressDeadlineSeconds = int32(120) +) + +var ( + // See https://github.com/knative/serving/pull/1124#issuecomment-397120430 + // for how CPU and memory values were calculated. + queueContainerCPU = resource.MustParse("25m") +) diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go new file mode 100644 index 0000000000..8ebd177eda --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go @@ -0,0 +1,253 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "strconv" + + "knative.dev/pkg/kmeta" + "knative.dev/pkg/logging" + "knative.dev/pkg/ptr" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/reconciler/revision/resources/names" + "knative.dev/serving/pkg/resources" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + varLogVolumeName = "knative-var-log" + varLogVolumePath = "/var/log" + internalVolumeName = "knative-internal" + internalVolumePath = "/var/knative-internal" +) + +var ( + varLogVolume = corev1.Volume{ + Name: varLogVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + + varLogVolumeMount = corev1.VolumeMount{ + Name: varLogVolumeName, + MountPath: varLogVolumePath, + } + + internalVolume = corev1.Volume{ + Name: internalVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + + internalVolumeMount = corev1.VolumeMount{ + Name: internalVolumeName, + MountPath: internalVolumePath, + } + + // This PreStop hook is actually calling an endpoint on the queue-proxy + // because of the way PreStop hooks are called by kubelet. We use this + // to block the user-container from exiting before the queue-proxy is ready + // to exit so we can guarantee that there are no more requests in flight. + userLifecycle = &corev1.Lifecycle{ + PreStop: &corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(networking.QueueAdminPort), + Path: queue.RequestQueueDrainPath, + }, + }, + } +) + +func rewriteUserProbe(p *corev1.Probe, userPort int) { + if p == nil { + return + } + switch { + case p.HTTPGet != nil: + // For HTTP probes, we route them through the queue container + // so that we know the queue proxy is ready/live as well. + // It doesn't matter to which queue serving port we are forwarding the probe. + p.HTTPGet.Port = intstr.FromInt(networking.BackendHTTPPort) + // With mTLS enabled, Istio rewrites probes, but doesn't spoof the kubelet + // user agent, so we need to inject an extra header to be able to distinguish + // between probes and real requests. + p.HTTPGet.HTTPHeaders = append(p.HTTPGet.HTTPHeaders, corev1.HTTPHeader{ + Name: network.KubeletProbeHeaderName, + Value: "queue", + }) + case p.TCPSocket != nil: + p.TCPSocket.Port = intstr.FromInt(userPort) + } +} + +func makePodSpec(rev *v1alpha1.Revision, loggingConfig *logging.Config, tracingConfig *tracingconfig.Config, observabilityConfig *metrics.ObservabilityConfig, deploymentConfig *deployment.Config) (*corev1.PodSpec, error) { + queueContainer, err := makeQueueContainer(rev, loggingConfig, tracingConfig, observabilityConfig, deploymentConfig) + + if err != nil { + return nil, fmt.Errorf("failed to create queue-proxy container: %w", err) + } + + userContainer := rev.Spec.GetContainer().DeepCopy() + // Adding or removing an overwritten corev1.Container field here? Don't forget to + // update the fieldmasks / validations in pkg/apis/serving + + userContainer.VolumeMounts = append(userContainer.VolumeMounts, varLogVolumeMount) + userContainer.Lifecycle = userLifecycle + userPort := getUserPort(rev) + userPortInt := int(userPort) + userPortStr := strconv.Itoa(userPortInt) + // Replacement is safe as only up to a single port is allowed on the Revision + userContainer.Ports = buildContainerPorts(userPort) + userContainer.Env = append(userContainer.Env, buildUserPortEnv(userPortStr)) + userContainer.Env = append(userContainer.Env, getKnativeEnvVar(rev)...) + // Explicitly disable stdin and tty allocation + userContainer.Stdin = false + userContainer.TTY = false + + // Prefer imageDigest from revision if available + if rev.Status.ImageDigest != "" { + userContainer.Image = rev.Status.ImageDigest + } + + if userContainer.TerminationMessagePolicy == "" { + userContainer.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError + } + + if userContainer.ReadinessProbe != nil { + if userContainer.ReadinessProbe.HTTPGet != nil || userContainer.ReadinessProbe.TCPSocket != nil { + // HTTP and TCP ReadinessProbes are executed by the queue-proxy directly against the + // user-container instead of via kubelet. + userContainer.ReadinessProbe = nil + } + } + + // If the client provides probes, we should fill in the port for them. + rewriteUserProbe(userContainer.LivenessProbe, userPortInt) + + podSpec := &corev1.PodSpec{ + Containers: []corev1.Container{ + *userContainer, + *queueContainer, + }, + Volumes: append([]corev1.Volume{varLogVolume}, rev.Spec.Volumes...), + ServiceAccountName: rev.Spec.ServiceAccountName, + TerminationGracePeriodSeconds: rev.Spec.TimeoutSeconds, + ImagePullSecrets: rev.Spec.ImagePullSecrets, + } + + // Add the Knative internal volume only if /var/log collection is enabled + if observabilityConfig.EnableVarLogCollection { + podSpec.Volumes = append(podSpec.Volumes, internalVolume) + } + + return podSpec, nil +} + +func getUserPort(rev *v1alpha1.Revision) int32 { + ports := rev.Spec.GetContainer().Ports + + if len(ports) > 0 && ports[0].ContainerPort != 0 { + return ports[0].ContainerPort + } + + return v1alpha1.DefaultUserPort +} + +func buildContainerPorts(userPort int32) []corev1.ContainerPort { + return []corev1.ContainerPort{{ + Name: v1alpha1.UserPortName, + ContainerPort: userPort, + }} +} + +func buildUserPortEnv(userPort string) corev1.EnvVar { + return corev1.EnvVar{ + Name: "PORT", + Value: userPort, + } +} + +// MakeDeployment constructs a K8s Deployment resource from a revision. +func MakeDeployment(rev *v1alpha1.Revision, + loggingConfig *logging.Config, tracingConfig *tracingconfig.Config, networkConfig *network.Config, observabilityConfig *metrics.ObservabilityConfig, + deploymentConfig *deployment.Config) (*appsv1.Deployment, error) { + + podTemplateAnnotations := resources.FilterMap(rev.GetAnnotations(), func(k string) bool { + return k == serving.RevisionLastPinnedAnnotationKey + }) + + // TODO(mattmoor): Once we have a mechanism for decorating arbitrary deployments (and opting + // out via annotation) we should explicitly disable that here to avoid redundant Image + // resources. + + // Inject the IP ranges for istio sidecar configuration. + // We will inject this value only if all of the following are true: + // - the config map contains a non-empty value + // - the user doesn't specify this annotation in configuration's pod template + // - configured values are valid CIDR notation IP addresses + // If these conditions are not met, this value will be left untouched. + // * is a special value that is accepted as a valid. + // * intercepts calls to all IPs: in cluster as well as outside the cluster. + if _, ok := podTemplateAnnotations[IstioOutboundIPRangeAnnotation]; !ok { + if len(networkConfig.IstioOutboundIPRanges) > 0 { + podTemplateAnnotations[IstioOutboundIPRangeAnnotation] = networkConfig.IstioOutboundIPRanges + } + } + podSpec, err := makePodSpec(rev, loggingConfig, tracingConfig, observabilityConfig, deploymentConfig) + if err != nil { + return nil, fmt.Errorf("failed to create PodSpec: %w", err) + } + + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Deployment(rev), + Namespace: rev.Namespace, + Labels: makeLabels(rev), + Annotations: resources.FilterMap(rev.GetAnnotations(), func(k string) bool { + // Exclude the heartbeat label, which can have high variance. + return k == serving.RevisionLastPinnedAnnotationKey + }), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(rev)}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.Int32(1), + Selector: makeSelector(rev), + ProgressDeadlineSeconds: ptr.Int32(ProgressDeadlineSeconds), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: makeLabels(rev), + Annotations: podTemplateAnnotations, + }, + Spec: *podSpec, + }, + }, + }, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy_test.go new file mode 100644 index 0000000000..9479708fa0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy_test.go @@ -0,0 +1,972 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/logging" + pkgmetrics "knative.dev/pkg/metrics" + _ "knative.dev/pkg/metrics/testing" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" +) + +var ( + containerName = "my-container-name" + defaultUserContainer = &corev1.Container{ + Name: containerName, + Image: "busybox", + Ports: buildContainerPorts(v1alpha1.DefaultUserPort), + VolumeMounts: []corev1.VolumeMount{varLogVolumeMount}, + Lifecycle: userLifecycle, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + Stdin: false, + TTY: false, + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: "8080", + }, { + Name: "K_REVISION", + Value: "bar", + }, { + Name: "K_CONFIGURATION", + Value: "cfg", + }, { + Name: "K_SERVICE", + Value: "svc", + }}, + } + + defaultQueueContainer = &corev1.Container{ + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "0"}, + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 10, + }, + SecurityContext: queueSecurityContext, + Env: []corev1.EnvVar{{ + Name: "SERVING_NAMESPACE", + Value: "foo", // matches namespace + }, { + Name: "SERVING_SERVICE", + Value: "svc", // matches service name + }, { + Name: "SERVING_CONFIGURATION", + // No OwnerReference + }, { + Name: "SERVING_REVISION", + Value: "bar", // matches name + }, { + Name: "QUEUE_SERVING_PORT", + Value: "8012", + }, { + Name: "CONTAINER_CONCURRENCY", + Value: "0", + }, { + Name: "REVISION_TIMEOUT_SECONDS", + Value: "45", + }, { + Name: "SERVING_POD", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, { + Name: "SERVING_POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }, { + Name: "SERVING_LOGGING_CONFIG", + // No logging configuration + }, { + Name: "SERVING_LOGGING_LEVEL", + // No logging level + }, { + Name: "SERVING_REQUEST_LOG_TEMPLATE", + Value: "", + }, { + Name: "SERVING_REQUEST_METRICS_BACKEND", + Value: "", + }, { + Name: "TRACING_CONFIG_BACKEND", + Value: "", + }, { + Name: "TRACING_CONFIG_ZIPKIN_ENDPOINT", + Value: "", + }, { + Name: "TRACING_CONFIG_STACKDRIVER_PROJECT_ID", + Value: "", + }, { + Name: "TRACING_CONFIG_DEBUG", + Value: "false", + }, { + Name: "TRACING_CONFIG_SAMPLE_RATE", + Value: "0.000000", + }, { + Name: "USER_PORT", + Value: "8080", + }, { + Name: "SYSTEM_NAMESPACE", + Value: system.Namespace(), + }, { + Name: "METRICS_DOMAIN", + Value: pkgmetrics.Domain(), + }, { + Name: "USER_CONTAINER_NAME", + Value: containerName, + }, { + Name: "ENABLE_VAR_LOG_COLLECTION", + Value: "false", + }, { + Name: "VAR_LOG_VOLUME_NAME", + Value: varLogVolumeName, + }, { + Name: "INTERNAL_VOLUME_PATH", + Value: internalVolumePath, + }, { + Name: "SERVING_READINESS_PROBE", + Value: fmt.Sprintf(`{"tcpSocket":{"port":%d,"host":"127.0.0.1"}}`, v1alpha1.DefaultUserPort), + }, { + Name: "ENABLE_PROFILING", + Value: "false", + }, { + Name: "SERVING_ENABLE_PROBE_REQUEST_LOG", + Value: "false", + }}, + } + + defaultPodSpec = &corev1.PodSpec{ + Volumes: []corev1.Volume{varLogVolume}, + TerminationGracePeriodSeconds: refInt64(45), + } + + defaultDeployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-deployment", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.Int32(1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + serving.RevisionUID: "1234", + }, + }, + ProgressDeadlineSeconds: ptr.Int32(ProgressDeadlineSeconds), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{}, + }, + // Spec: filled in by makePodSpec + }, + }, + } + + defaultRevision = &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.ConfigurationLabelKey: "cfg", + serving.ServiceLabelKey: "svc", + serving.RouteLabelKey: "im-a-route", + }, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: containerName, + Image: "busybox", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(45), + }, + }, + } +) + +func refInt64(num int64) *int64 { + return &num +} + +type containerOption func(*corev1.Container) +type podSpecOption func(*corev1.PodSpec) +type deploymentOption func(*appsv1.Deployment) +type revisionOption func(*v1alpha1.Revision) + +func container(container *corev1.Container, opts ...containerOption) corev1.Container { + for _, option := range opts { + option(container) + } + return *container +} + +func userContainer(opts ...containerOption) corev1.Container { + return container(defaultUserContainer.DeepCopy(), opts...) +} + +func queueContainer(opts ...containerOption) corev1.Container { + return container(defaultQueueContainer.DeepCopy(), opts...) +} + +func withEnvVar(name, value string) containerOption { + return func(container *corev1.Container) { + for i, envVar := range container.Env { + if envVar.Name == name { + container.Env[i].Value = value + return + } + } + + container.Env = append(container.Env, corev1.EnvVar{ + Name: name, + Value: value, + }) + } +} + +func withInternalVolumeMount() containerOption { + return func(container *corev1.Container) { + container.VolumeMounts = append(container.VolumeMounts, internalVolumeMount) + } +} + +func withReadinessProbe(handler corev1.Handler) containerOption { + return func(container *corev1.Container) { + container.ReadinessProbe = &corev1.Probe{Handler: handler} + } +} + +func withTCPReadinessProbe() containerOption { + return withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(v1alpha1.DefaultUserPort), + }, + }) +} + +func withHTTPReadinessProbe(port int) containerOption { + return withReadinessProbe(corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(port), + Path: "/", + }, + }) +} + +func withExecReadinessProbe(command []string) containerOption { + return withReadinessProbe(corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: command, + }, + }) +} + +func withLivenessProbe(handler corev1.Handler) containerOption { + return func(container *corev1.Container) { + container.LivenessProbe = &corev1.Probe{Handler: handler} + } +} + +func withPrependedVolumeMounts(volumeMounts ...corev1.VolumeMount) containerOption { + return func(c *corev1.Container) { + c.VolumeMounts = append(volumeMounts, c.VolumeMounts...) + } +} + +func podSpec(containers []corev1.Container, opts ...podSpecOption) *corev1.PodSpec { + podSpec := defaultPodSpec.DeepCopy() + podSpec.Containers = containers + + for _, option := range opts { + option(podSpec) + } + + return podSpec +} + +func withAppendedVolumes(volumes ...corev1.Volume) podSpecOption { + return func(ps *corev1.PodSpec) { + ps.Volumes = append(ps.Volumes, volumes...) + } +} + +func makeDeployment(opts ...deploymentOption) *appsv1.Deployment { + deploy := defaultDeployment.DeepCopy() + for _, option := range opts { + option(deploy) + } + return deploy +} + +func revision(opts ...revisionOption) *v1alpha1.Revision { + revision := defaultRevision.DeepCopy() + for _, option := range opts { + option(revision) + } + return revision +} + +func withContainerConcurrency(cc int64) revisionOption { + return func(revision *v1alpha1.Revision) { + revision.Spec.ContainerConcurrency = &cc + } +} + +func withoutLabels(revision *v1alpha1.Revision) { + revision.ObjectMeta.Labels = map[string]string{} +} + +func withOwnerReference(name string) revisionOption { + return func(revision *v1alpha1.Revision) { + revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: name, + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }} + } +} + +func TestMakePodSpec(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + lc *logging.Config + tc *tracingconfig.Config + oc *metrics.ObservabilityConfig + cc *deployment.Config + want *corev1.PodSpec + }{{ + name: "user-defined user port, queue proxy have PORT env", + rev: revision( + withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ + ContainerPort: 8888, + }} + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer( + func(container *corev1.Container) { + container.Ports[0].ContainerPort = 8888 + }, + withEnvVar("PORT", "8888"), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("USER_PORT", "8888"), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), + ), + }), + }, { + name: "volumes passed through", + rev: revision( + withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + revision.Spec.GetContainer().Ports = []corev1.ContainerPort{{ + ContainerPort: 8888, + }} + revision.Spec.GetContainer().VolumeMounts = []corev1.VolumeMount{{ + Name: "asdf", + MountPath: "/asdf", + }} + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + revision.Spec.Volumes = []corev1.Volume{{ + Name: "asdf", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "asdf", + }, + }, + }} + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer( + func(container *corev1.Container) { + container.Ports[0].ContainerPort = 8888 + }, + withEnvVar("PORT", "8888"), + withPrependedVolumeMounts(corev1.VolumeMount{ + Name: "asdf", + MountPath: "/asdf", + }), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("USER_PORT", "8888"), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), + ), + }, withAppendedVolumes(corev1.Volume{ + Name: "asdf", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "asdf", + }, + }, + })), + }, { + name: "concurrency=1 no owner", + rev: revision( + withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer(), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + ), + }), + }, { + name: "concurrency=1 no owner digest resolved", + rev: revision( + withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + revision.Status = v1alpha1.RevisionStatus{ + ImageDigest: "busybox@sha256:deadbeef", + } + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer(func(container *corev1.Container) { + container.Image = "busybox@sha256:deadbeef" + }), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + ), + }), + }, { + name: "concurrency=1 with owner", + rev: revision( + withContainerConcurrency(1), + withOwnerReference("parent-config"), + func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer(), + queueContainer( + withEnvVar("SERVING_CONFIGURATION", "parent-config"), + withEnvVar("CONTAINER_CONCURRENCY", "1"), + ), + }), + }, { + name: "with http readiness probe", + rev: revision(func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withHTTPReadinessProbe(v1alpha1.DefaultUserPort), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer(), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "0"), + withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP","httpHeaders":[{"name":"K-Kubelet-Probe","value":"queue"}]}}`), + ), + }), + }, { + name: "with tcp readiness probe", + rev: revision(func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer(), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "0"), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), + ), + }), + }, { + name: "with shell readiness probe", + rev: revision(func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withExecReadinessProbe([]string{"echo", "hello"}), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer( + withExecReadinessProbe([]string{"echo", "hello"})), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "0"), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), + ), + }), + }, { + name: "with http liveness probe", + rev: revision(func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + withLivenessProbe(corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer( + withLivenessProbe(corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(networking.BackendHTTPPort), + HTTPHeaders: []corev1.HTTPHeader{{ + Name: network.KubeletProbeHeaderName, + Value: "queue", + }}, + }, + }), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "0"), + ), + }), + }, { + name: "with tcp liveness probe", + rev: revision(func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + withLivenessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer( + withLivenessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(v1alpha1.DefaultUserPort), + }, + }), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "0"), + ), + }), + }, { + name: "with /var/log collection", + rev: revision(withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{ + EnableVarLogCollection: true, + }, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer(), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("ENABLE_VAR_LOG_COLLECTION", "true"), + withInternalVolumeMount(), + ), + }, + func(podSpec *corev1.PodSpec) { + podSpec.Volumes = append(podSpec.Volumes, internalVolume) + }, + ), + }, { + name: "complex pod spec", + rev: revision( + withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + revision.ObjectMeta.Labels = map[string]string{} + revision.Spec.GetContainer().Command = []string{"/bin/bash"} + revision.Spec.GetContainer().Args = []string{"-c", "echo Hello world"} + container(revision.Spec.GetContainer(), + withTCPReadinessProbe(), + withEnvVar("FOO", "bar"), + withEnvVar("BAZ", "blah"), + ) + revision.Spec.GetContainer().Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + revision.Spec.GetContainer().TerminationMessagePolicy = corev1.TerminationMessageReadFile + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: podSpec( + []corev1.Container{ + userContainer( + func(container *corev1.Container) { + container.Command = []string{"/bin/bash"} + container.Args = []string{"-c", "echo Hello world"} + container.Env = append([]corev1.EnvVar{{ + Name: "FOO", + Value: "bar", + }, { + Name: "BAZ", + Value: "blah", + }}, container.Env...) + container.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("666Mi"), + corev1.ResourceCPU: resource.MustParse("666m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("888Mi"), + corev1.ResourceCPU: resource.MustParse("888m"), + }, + } + container.TerminationMessagePolicy = corev1.TerminationMessageReadFile + }, + withEnvVar("K_CONFIGURATION", ""), + withEnvVar("K_SERVICE", ""), + ), + queueContainer( + withEnvVar("CONTAINER_CONCURRENCY", "1"), + withEnvVar("SERVING_SERVICE", ""), + ), + }), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + quantityComparer := cmp.Comparer(func(x, y resource.Quantity) bool { + return x.Cmp(y) == 0 + }) + got, err := makePodSpec(test.rev, test.lc, test.tc, test.oc, test.cc) + if err != nil { + t.Fatal("makePodSpec returned errror") + } + if diff := cmp.Diff(test.want, got, quantityComparer); diff != "" { + t.Errorf("makePodSpec (-want, +got) = %v", diff) + } + }) + + t.Run(test.name+"(podspec)", func(t *testing.T) { + quantityComparer := cmp.Comparer(func(x, y resource.Quantity) bool { + return x.Cmp(y) == 0 + }) + + // Same test, but via podspec. + test.rev.Spec.Containers = []corev1.Container{ + *test.rev.Spec.DeprecatedContainer, + } + test.rev.Spec.DeprecatedContainer = nil + + got, err := makePodSpec(test.rev, test.lc, test.tc, test.oc, test.cc) + if err != nil { + t.Fatal("makePodSpec returned errror") + } + if diff := cmp.Diff(test.want, got, quantityComparer); diff != "" { + t.Errorf("makePodSpec (-want, +got) = %v", diff) + } + }) + } +} + +func TestMissingProbeError(t *testing.T) { + _, err := MakeDeployment(defaultRevision, + &logging.Config{}, + &tracingconfig.Config{}, + &network.Config{}, + &metrics.ObservabilityConfig{}, + &deployment.Config{}, + ) + + if err == nil { + t.Error("expected error from MakeDeployment") + } +} + +func TestMakeDeployment(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + lc *logging.Config + tc *tracingconfig.Config + nc *network.Config + oc *metrics.ObservabilityConfig + cc *deployment.Config + want *appsv1.Deployment + }{{ + name: "with concurrency=1", + rev: revision( + withoutLabels, + withContainerConcurrency(1), + func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + nc: &network.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: makeDeployment(), + }, { + name: "with owner", + rev: revision( + withoutLabels, + withOwnerReference("parent-config"), + func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + nc: &network.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: makeDeployment(), + }, { + name: "with outbound IP range configured", + rev: revision( + withoutLabels, + func(revision *v1alpha1.Revision) { + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + nc: &network.Config{ + IstioOutboundIPRanges: "*", + }, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: makeDeployment(func(deploy *appsv1.Deployment) { + deploy.Spec.Template.ObjectMeta.Annotations[IstioOutboundIPRangeAnnotation] = "*" + }), + }, { + name: "with sidecar annotation override", + rev: revision(withoutLabels, func(revision *v1alpha1.Revision) { + revision.ObjectMeta.Annotations = map[string]string{ + sidecarIstioInjectAnnotation: "false", + } + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + nc: &network.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: makeDeployment(func(deploy *appsv1.Deployment) { + deploy.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" + deploy.Spec.Template.ObjectMeta.Annotations[sidecarIstioInjectAnnotation] = "false" + }), + }, { + name: "with outbound IP range override", + rev: revision( + withoutLabels, + func(revision *v1alpha1.Revision) { + revision.ObjectMeta.Annotations = map[string]string{ + IstioOutboundIPRangeAnnotation: "10.4.0.0/14,10.7.240.0/20", + } + container(revision.Spec.GetContainer(), + withReadinessProbe(corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(12345), + }, + }), + ) + }, + ), + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + nc: &network.Config{ + IstioOutboundIPRanges: "*", + }, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: makeDeployment(func(deploy *appsv1.Deployment) { + deploy.ObjectMeta.Annotations[IstioOutboundIPRangeAnnotation] = "10.4.0.0/14,10.7.240.0/20" + deploy.Spec.Template.ObjectMeta.Annotations[IstioOutboundIPRangeAnnotation] = "10.4.0.0/14,10.7.240.0/20" + }), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Tested above so that we can rely on it here for brevity. + podSpec, err := makePodSpec(test.rev, test.lc, test.tc, test.oc, test.cc) + if err != nil { + t.Fatal("makePodSpec returned errror") + } + test.want.Spec.Template.Spec = *podSpec + got, err := MakeDeployment(test.rev, test.lc, test.tc, test.nc, test.oc, test.cc) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + if diff := cmp.Diff(test.want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("MakeDeployment (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/doc.go new file mode 100644 index 0000000000..bfd92671cb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources holds simple functions for synthesizing child resources +// from a Revision resource and any relevant Revision controller configuration. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/env_var.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/env_var.go new file mode 100644 index 0000000000..e582f07eb6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/env_var.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +const ( + knativeRevisionEnvVariableKey = "K_REVISION" + knativeConfigurationEnvVariableKey = "K_CONFIGURATION" + knativeServiceEnvVariableKey = "K_SERVICE" +) + +func getKnativeEnvVar(rev *v1alpha1.Revision) []corev1.EnvVar { + return []corev1.EnvVar{{ + Name: knativeRevisionEnvVariableKey, + Value: rev.Name, + }, { + Name: knativeConfigurationEnvVariableKey, + Value: rev.Labels[serving.ConfigurationLabelKey], + }, { + Name: knativeServiceEnvVariableKey, + Value: rev.Labels[serving.ServiceLabelKey], + }} +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache.go new file mode 100644 index 0000000000..92c0d513b0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + caching "knative.dev/caching/pkg/apis/caching/v1alpha1" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/revision/resources/names" + "knative.dev/serving/pkg/resources" +) + +// MakeImageCache makes an caching.Image resources from a revision. +func MakeImageCache(rev *v1alpha1.Revision) *caching.Image { + image := rev.Status.ImageDigest + if image == "" { + image = rev.Spec.GetContainer().Image + } + + img := &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.ImageCache(rev), + Namespace: rev.Namespace, + Labels: makeLabels(rev), + Annotations: resources.FilterMap(rev.GetAnnotations(), func(k string) bool { + // Ignore last pinned annotation. + return k == serving.RevisionLastPinnedAnnotationKey + }), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(rev)}, + }, + Spec: caching.ImageSpec{ + Image: image, + ServiceAccountName: rev.Spec.ServiceAccountName, + ImagePullSecrets: rev.Spec.ImagePullSecrets, + }, + } + + return img +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache_test.go new file mode 100644 index 0000000000..786ab74cb6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/imagecache_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + caching "knative.dev/caching/pkg/apis/caching/v1alpha1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestMakeImageCache(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + want *caching.Image + }{{ + name: "simple container", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Annotations: map[string]string{ + "a": "b", + serving.RevisionLastPinnedAnnotationKey: "c", + }, + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + Status: v1alpha1.RevisionStatus{ + ImageDigest: "busybox@sha256:deadbeef", + }, + }, + want: &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-cache", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: caching.ImageSpec{ + Image: "busybox@sha256:deadbeef", + }, + }, + }, { + name: "with service account", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + PodSpec: corev1.PodSpec{ + ServiceAccountName: "privilegeless", + }, + }, + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + }, + }, + want: &caching.Image{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar-cache", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: caching.ImageSpec{ + Image: "busybox", + ServiceAccountName: "privilegeless", + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MakeImageCache(test.rev) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("MakeImageCache (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta.go new file mode 100644 index 0000000000..eef8e74119 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/resources" +) + +// makeLabels constructs the labels we will apply to K8s resources. +func makeLabels(revision *v1alpha1.Revision) map[string]string { + labels := resources.FilterMap(revision.GetLabels(), func(k string) bool { + // Exclude the Route label so that a Revision becoming routable + // doesn't trigger deployment updates. + return k == serving.RouteLabelKey + }) + labels = resources.UnionMaps(labels, map[string]string{ + serving.RevisionLabelKey: revision.Name, + serving.RevisionUID: string(revision.UID), + }) + + // If users don't specify an app: label we will automatically + // populate it with the revision name to get the benefit of richer + // tracing information. + if _, ok := labels[AppLabelKey]; !ok { + labels[AppLabelKey] = revision.Name + } + return labels +} + +// makeSelector constructs the Selector we will apply to K8s resources. +func makeSelector(revision *v1alpha1.Revision) *metav1.LabelSelector { + return &metav1.LabelSelector{ + MatchLabels: map[string]string{ + serving.RevisionUID: string(revision.UID), + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta_test.go new file mode 100644 index 0000000000..3cd31ac205 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/meta_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestMakeLabels(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + want map[string]string + }{{ + name: "no user labels", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + }, + want: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + }, { + name: "propagate user labels", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + "ooga": "booga", + "unicorn": "rainbows", + }, + }, + }, + want: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + "ooga": "booga", + "unicorn": "rainbows", + }, + }, { + name: "override app label key", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + AppLabelKey: "my-app-override", + }, + }, + }, + want: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "my-app-override", + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := makeLabels(test.rev) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("makeLabels (-want, +got) = %v", diff) + } + + wantSelector := &metav1.LabelSelector{ + MatchLabels: map[string]string{serving.RevisionUID: "1234"}, + } + gotSelector := makeSelector(test.rev) + if diff := cmp.Diff(wantSelector, gotSelector); diff != "" { + t.Errorf("makeLabels (-want, +got) = %v", diff) + } + + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/doc.go new file mode 100644 index 0000000000..42a13f3c0f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names holds simple functions for synthesizing resource names. +package names diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names.go new file mode 100644 index 0000000000..52d88e420a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names.go @@ -0,0 +1,34 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import "knative.dev/pkg/kmeta" + +// Deployment returns the precomputed name for the revision deployment +func Deployment(rev kmeta.Accessor) string { + return kmeta.ChildName(rev.GetName(), "-deployment") +} + +// ImageCache returns the precomputed name for the image cache. +func ImageCache(rev kmeta.Accessor) string { + return kmeta.ChildName(rev.GetName(), "-cache") +} + +// PA returns the PA name for the revision. +func PA(rev kmeta.Accessor) string { + return rev.GetName() +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names_test.go new file mode 100644 index 0000000000..e4a531fe0b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/names/names_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestNamer(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + f func(kmeta.Accessor) string + want string + }{{ + name: "Deployment too long", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("f", 63), + }, + }, + f: Deployment, + want: "ffffffffffffffffffff105d7597f637e83cc711605ac3ea4957-deployment", + }, { + name: "Deployment long enough", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("f", 52), + }, + }, + f: Deployment, + want: strings.Repeat("f", 52) + "-deployment", + }, { + name: "Deployment", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + f: Deployment, + want: "foo-deployment", + }, { + name: "ImageCache, barely fits", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("u", 57), + }, + }, + f: ImageCache, + want: strings.Repeat("u", 57) + "-cache", + }, { + name: "ImageCache, already too long", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat("u", 63), + }, + }, + f: ImageCache, + want: "uuuuuuuuuuuuuuuuuuuuuuuuuca47ad1ce8479df271ec0d23653ce256-cache", + }, { + name: "ImageCache", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + f: ImageCache, + want: "foo-cache", + }, { + name: "PA", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + }, + }, + f: PA, + want: "baz", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.f(test.rev) + if got != test.want { + t.Errorf("%s() = %v, wanted %v", test.name, got, test.want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa.go new file mode 100644 index 0000000000..b78653edb4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/revision/resources/names" + "knative.dev/serving/pkg/resources" +) + +// MakePA makes a Knative Pod Autoscaler resource from a revision. +func MakePA(rev *v1alpha1.Revision) *av1alpha1.PodAutoscaler { + return &av1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.PA(rev), + Namespace: rev.Namespace, + Labels: makeLabels(rev), + Annotations: resources.FilterMap(rev.GetAnnotations(), func(k string) bool { + // Ignore last pinned annotation. + return k == serving.RevisionLastPinnedAnnotationKey + }), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(rev)}, + }, + Spec: av1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: rev.Spec.GetContainerConcurrency(), + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: names.Deployment(rev), + }, + ProtocolType: rev.GetProtocol(), + Reachability: func() av1alpha1.ReachabilityType { + // If the Revision has failed to become Ready, then mark the PodAutoscaler as unreachable. + if cond := rev.Status.GetCondition(v1alpha1.RevisionConditionReady); cond != nil && cond.Status == corev1.ConditionFalse { + // As a sanity check, also make sure that we don't do this when a + // newly failing revision is marked reachable by outside forces. + if !rev.IsReachable() { + return av1alpha1.ReachabilityUnreachable + } + } + + // We don't know the reachability if the revision has just been created + // or it is activating. + if cond := rev.Status.GetCondition(v1alpha1.RevisionConditionActive); cond != nil && cond.Status == corev1.ConditionUnknown { + return av1alpha1.ReachabilityUnknown + } + + if rev.IsReachable() { + return av1alpha1.ReachabilityReachable + } + return av1alpha1.ReachabilityUnreachable + }(), + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa_test.go new file mode 100644 index 0000000000..5cad313116 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/pa_test.go @@ -0,0 +1,327 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/ptr" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestMakePA(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + want *av1alpha1.PodAutoscaler + }{{ + name: "name is bar (Concurrency=1, Reachable=true)", + rev: func() *v1alpha1.Revision { + rev := v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.RouteLabelKey: "some-route", + }, + Annotations: map[string]string{ + "a": "b", + serving.RevisionLastPinnedAnnotationKey: "timeless", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + }, + }, + } + rev.Status.MarkActiveTrue() + return &rev + }(), + want: &av1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + Labels: map[string]string{ + serving.RevisionLabelKey: "bar", + serving.RevisionUID: "1234", + AppLabelKey: "bar", + }, + Annotations: map[string]string{ + "a": "b", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "bar", + UID: "1234", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: av1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 1, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "bar-deployment", + }, + ProtocolType: networking.ProtocolHTTP1, + Reachability: av1alpha1.ReachabilityReachable, + }, + }, + }, { + name: "name is baz (Concurrency=0, Reachable=false)", + rev: func() *v1alpha1.Revision { + rev := v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "baz", + UID: "4321", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + }, + DeprecatedContainer: &corev1.Container{ + Ports: []corev1.ContainerPort{{ + Name: "h2c", + HostPort: int32(443), + }}, + }, + }, + } + rev.Status.MarkActiveTrue() + return &rev + }(), + want: &av1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "baz", + Labels: map[string]string{ + serving.RevisionLabelKey: "baz", + serving.RevisionUID: "4321", + AppLabelKey: "baz", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "baz", + UID: "4321", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: av1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "baz-deployment", + }, + ProtocolType: networking.ProtocolH2C, + Reachability: av1alpha1.ReachabilityUnreachable, + }}, + }, { + name: "name is baz (Concurrency=0, Reachable=false, Activating)", + rev: func() *v1alpha1.Revision { + rev := v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "baz", + UID: "4321", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + }, + DeprecatedContainer: &corev1.Container{ + Ports: []corev1.ContainerPort{{ + Name: "h2c", + HostPort: int32(443), + }}, + }, + }, + } + rev.Status.MarkActiveUnknown("reasons", "because") + return &rev + }(), + want: &av1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "baz", + Labels: map[string]string{ + serving.RevisionLabelKey: "baz", + serving.RevisionUID: "4321", + AppLabelKey: "baz", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "baz", + UID: "4321", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: av1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "baz-deployment", + }, + ProtocolType: networking.ProtocolH2C, + Reachability: av1alpha1.ReachabilityUnknown, + }}, + }, { + name: "name is batman (Activating, Revision failed)", + rev: func() *v1alpha1.Revision { + rev := v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "batman", + UID: "4321", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + }, + DeprecatedContainer: &corev1.Container{ + Ports: []corev1.ContainerPort{{ + Name: "h2c", + HostPort: int32(443), + }}, + }, + }, + } + rev.Status.MarkActiveUnknown("reasons", "because") + rev.Status.MarkResourcesAvailableFalse("foo", "bar") + return &rev + }(), + want: &av1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "batman", + Labels: map[string]string{ + serving.RevisionLabelKey: "batman", + serving.RevisionUID: "4321", + AppLabelKey: "batman", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "batman", + UID: "4321", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: av1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "batman-deployment", + }, + ProtocolType: networking.ProtocolH2C, + // When the Revision has failed, we mark the PA as unreachable. + Reachability: av1alpha1.ReachabilityUnreachable, + }}, + }, { + name: "name is robin (Activating, Revision routable but failed)", + rev: func() *v1alpha1.Revision { + rev := v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "robin", + UID: "4321", + Labels: map[string]string{ + serving.RouteLabelKey: "asdf", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + }, + DeprecatedContainer: &corev1.Container{ + Ports: []corev1.ContainerPort{{ + Name: "h2c", + HostPort: int32(443), + }}, + }, + }, + } + rev.Status.MarkActiveUnknown("reasons", "because") + rev.Status.MarkResourcesAvailableFalse("foo", "bar") + return &rev + }(), + want: &av1alpha1.PodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "blah", + Name: "robin", + Labels: map[string]string{ + serving.RevisionLabelKey: "robin", + serving.RevisionUID: "4321", + AppLabelKey: "robin", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Revision", + Name: "robin", + UID: "4321", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: av1alpha1.PodAutoscalerSpec{ + ContainerConcurrency: 0, + ScaleTargetRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "robin-deployment", + }, + ProtocolType: networking.ProtocolH2C, + // Reachability trumps failure of Revisions. + Reachability: av1alpha1.ReachabilityUnknown, + }}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MakePA(test.rev) + if diff := cmp.Diff(test.want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("MakeK8sService (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go new file mode 100644 index 0000000000..8e1df78374 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go @@ -0,0 +1,371 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "math" + "strconv" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/logging" + pkgmetrics "knative.dev/pkg/metrics" + "knative.dev/pkg/profiling" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/queue/readiness" +) + +const ( + localAddress = "127.0.0.1" + requestQueueHTTPPortName = "queue-port" + profilingPortName = "profiling-port" +) + +var ( + queueHTTPPort = corev1.ContainerPort{ + Name: requestQueueHTTPPortName, + ContainerPort: int32(networking.BackendHTTPPort), + } + queueHTTP2Port = corev1.ContainerPort{ + Name: requestQueueHTTPPortName, + ContainerPort: int32(networking.BackendHTTP2Port), + } + queueNonServingPorts = []corev1.ContainerPort{{ + // Provides health checks and lifecycle hooks. + Name: v1alpha1.QueueAdminPortName, + ContainerPort: int32(networking.QueueAdminPort), + }, { + Name: v1alpha1.AutoscalingQueueMetricsPortName, + ContainerPort: int32(networking.AutoscalingQueueMetricsPort), + }, { + Name: v1alpha1.UserQueueMetricsPortName, + ContainerPort: int32(networking.UserQueueMetricsPort), + }} + + profilingPort = corev1.ContainerPort{ + Name: profilingPortName, + ContainerPort: int32(profiling.ProfilingPort), + } + + queueSecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.Bool(false), + } +) + +func createQueueResources(annotations map[string]string, userContainer *corev1.Container) corev1.ResourceRequirements { + resourceRequests := corev1.ResourceList{corev1.ResourceCPU: queueContainerCPU} + resourceLimits := corev1.ResourceList{} + var requestCPU, limitCPU, requestMemory, limitMemory resource.Quantity + + if resourceFraction, ok := fractionFromPercentage(annotations, serving.QueueSideCarResourcePercentageAnnotation); ok { + if ok, requestCPU = computeResourceRequirements(userContainer.Resources.Requests.Cpu(), resourceFraction, queueContainerRequestCPU); ok { + resourceRequests[corev1.ResourceCPU] = requestCPU + } + + if ok, limitCPU = computeResourceRequirements(userContainer.Resources.Limits.Cpu(), resourceFraction, queueContainerLimitCPU); ok { + resourceLimits[corev1.ResourceCPU] = limitCPU + } + + if ok, requestMemory = computeResourceRequirements(userContainer.Resources.Requests.Memory(), resourceFraction, queueContainerRequestMemory); ok { + resourceRequests[corev1.ResourceMemory] = requestMemory + } + + if ok, limitMemory = computeResourceRequirements(userContainer.Resources.Limits.Memory(), resourceFraction, queueContainerLimitMemory); ok { + resourceLimits[corev1.ResourceMemory] = limitMemory + } + } + + resources := corev1.ResourceRequirements{ + Requests: resourceRequests, + } + if len(resourceLimits) != 0 { + resources.Limits = resourceLimits + } + + return resources +} + +func computeResourceRequirements(resourceQuantity *resource.Quantity, fraction float64, boundary resourceBoundary) (bool, resource.Quantity) { + if resourceQuantity.IsZero() { + return false, resource.Quantity{} + } + + // In case the resourceQuantity MilliValue overflows int64 we use MaxInt64 + // https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go + scaledValue := resourceQuantity.Value() + scaledMilliValue := int64(math.MaxInt64 - 1) + if scaledValue < (math.MaxInt64 / 1000) { + scaledMilliValue = resourceQuantity.MilliValue() + } + + // float64(math.MaxInt64) > math.MaxInt64, to avoid overflow + percentageValue := float64(scaledMilliValue) * fraction + newValue := int64(math.MaxInt64) + if percentageValue < math.MaxInt64 { + newValue = int64(percentageValue) + } + + newquantity := boundary.applyBoundary(*resource.NewMilliQuantity(newValue, resource.BinarySI)) + return true, newquantity +} + +func fractionFromPercentage(m map[string]string, k string) (float64, bool) { + value, err := strconv.ParseFloat(m[k], 64) + return float64(value / 100), err == nil +} + +func makeQueueProbe(in *corev1.Probe) *corev1.Probe { + if in == nil || in.PeriodSeconds == 0 { + out := &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "0"}, + }, + }, + // We want to mark the service as not ready as soon as the + // PreStop handler is called, so we need to check a little + // bit more often than the default. It is a small + // sacrifice for a low rate of 503s. + PeriodSeconds: 1, + // We keep the connection open for a while because we're + // actively probing the user-container on that endpoint and + // thus don't want to be limited by K8s granularity here. + TimeoutSeconds: 10, + } + + if in != nil { + out.InitialDelaySeconds = in.InitialDelaySeconds + } + return out + } + + timeout := 1 + + if in.TimeoutSeconds > 1 { + timeout = int(in.TimeoutSeconds) + } + + return &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", strconv.Itoa(timeout)}, + }, + }, + PeriodSeconds: in.PeriodSeconds, + TimeoutSeconds: int32(timeout), + SuccessThreshold: in.SuccessThreshold, + FailureThreshold: in.FailureThreshold, + InitialDelaySeconds: in.InitialDelaySeconds, + } +} + +// makeQueueContainer creates the container spec for the queue sidecar. +func makeQueueContainer(rev *v1alpha1.Revision, loggingConfig *logging.Config, tracingConfig *tracingconfig.Config, observabilityConfig *metrics.ObservabilityConfig, + deploymentConfig *deployment.Config) (*corev1.Container, error) { + configName := "" + if owner := metav1.GetControllerOf(rev); owner != nil && owner.Kind == "Configuration" { + configName = owner.Name + } + serviceName := rev.Labels[serving.ServiceLabelKey] + + userPort := getUserPort(rev) + + var loggingLevel string + if ll, ok := loggingConfig.LoggingLevel["queueproxy"]; ok { + loggingLevel = ll.String() + } + + ts := int64(0) + if rev.Spec.TimeoutSeconds != nil { + ts = *rev.Spec.TimeoutSeconds + } + + ports := queueNonServingPorts + if observabilityConfig.EnableProfiling { + ports = append(ports, profilingPort) + } + // We need to configure only one serving port for the Queue proxy, since + // we know the protocol that is being used by this application. + servingPort := queueHTTPPort + if rev.GetProtocol() == networking.ProtocolH2C { + servingPort = queueHTTP2Port + } + ports = append(ports, servingPort) + + var volumeMounts []corev1.VolumeMount + if observabilityConfig.EnableVarLogCollection { + volumeMounts = append(volumeMounts, internalVolumeMount) + } + + rp := rev.Spec.GetContainer().ReadinessProbe.DeepCopy() + + applyReadinessProbeDefaults(rp, userPort) + + probeJSON, err := readiness.EncodeProbe(rp) + if err != nil { + return nil, fmt.Errorf("failed to serialize readiness probe: %w", err) + } + + return &corev1.Container{ + Name: QueueContainerName, + Image: deploymentConfig.QueueSidecarImage, + Resources: createQueueResources(rev.GetAnnotations(), rev.Spec.GetContainer()), + Ports: ports, + ReadinessProbe: makeQueueProbe(rp), + VolumeMounts: volumeMounts, + SecurityContext: queueSecurityContext, + Env: []corev1.EnvVar{{ + Name: "SERVING_NAMESPACE", + Value: rev.Namespace, + }, { + Name: "SERVING_SERVICE", + Value: serviceName, + }, { + Name: "SERVING_CONFIGURATION", + Value: configName, + }, { + Name: "SERVING_REVISION", + Value: rev.Name, + }, { + Name: "QUEUE_SERVING_PORT", + Value: strconv.Itoa(int(servingPort.ContainerPort)), + }, { + Name: "CONTAINER_CONCURRENCY", + Value: strconv.Itoa(int(rev.Spec.GetContainerConcurrency())), + }, { + Name: "REVISION_TIMEOUT_SECONDS", + Value: strconv.Itoa(int(ts)), + }, { + Name: "SERVING_POD", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, { + Name: "SERVING_POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, { + Name: "SERVING_LOGGING_CONFIG", + Value: loggingConfig.LoggingConfig, + }, { + Name: "SERVING_LOGGING_LEVEL", + Value: loggingLevel, + }, { + Name: "SERVING_REQUEST_LOG_TEMPLATE", + Value: observabilityConfig.RequestLogTemplate, + }, { + Name: "SERVING_REQUEST_METRICS_BACKEND", + Value: observabilityConfig.RequestMetricsBackend, + }, { + Name: "TRACING_CONFIG_BACKEND", + Value: string(tracingConfig.Backend), + }, { + Name: "TRACING_CONFIG_ZIPKIN_ENDPOINT", + Value: tracingConfig.ZipkinEndpoint, + }, { + Name: "TRACING_CONFIG_STACKDRIVER_PROJECT_ID", + Value: tracingConfig.StackdriverProjectID, + }, { + Name: "TRACING_CONFIG_DEBUG", + Value: strconv.FormatBool(tracingConfig.Debug), + }, { + Name: "TRACING_CONFIG_SAMPLE_RATE", + Value: fmt.Sprintf("%f", tracingConfig.SampleRate), + }, { + Name: "USER_PORT", + Value: strconv.Itoa(int(userPort)), + }, { + Name: system.NamespaceEnvKey, + Value: system.Namespace(), + }, { + Name: pkgmetrics.DomainEnv, + Value: pkgmetrics.Domain(), + }, { + Name: "USER_CONTAINER_NAME", + Value: rev.Spec.GetContainer().Name, + }, { + Name: "ENABLE_VAR_LOG_COLLECTION", + Value: strconv.FormatBool(observabilityConfig.EnableVarLogCollection), + }, { + Name: "VAR_LOG_VOLUME_NAME", + Value: varLogVolumeName, + }, { + Name: "INTERNAL_VOLUME_PATH", + Value: internalVolumePath, + }, { + Name: "SERVING_READINESS_PROBE", + Value: probeJSON, + }, { + Name: "ENABLE_PROFILING", + Value: strconv.FormatBool(observabilityConfig.EnableProfiling), + }, { + Name: "SERVING_ENABLE_PROBE_REQUEST_LOG", + Value: strconv.FormatBool(observabilityConfig.EnableProbeRequestLog), + }}, + }, nil +} + +func applyReadinessProbeDefaults(p *corev1.Probe, port int32) { + switch { + case p == nil: + return + case p.HTTPGet != nil: + p.HTTPGet.Host = localAddress + p.HTTPGet.Port = intstr.FromInt(int(port)) + + if p.HTTPGet.Scheme == "" { + p.HTTPGet.Scheme = corev1.URISchemeHTTP + } + + p.HTTPGet.HTTPHeaders = append(p.HTTPGet.HTTPHeaders, corev1.HTTPHeader{ + Name: network.KubeletProbeHeaderName, + Value: queue.Name, + }) + case p.TCPSocket != nil: + p.TCPSocket.Host = localAddress + p.TCPSocket.Port = intstr.FromInt(int(port)) + case p.Exec != nil: + // User-defined ExecProbe will still be run on user-container. + // Use TCP probe in queue-proxy. + p.TCPSocket = &corev1.TCPSocketAction{ + Host: localAddress, + Port: intstr.FromInt(int(port)), + } + p.Exec = nil + } + + if p.PeriodSeconds > 0 && p.TimeoutSeconds < 1 { + p.TimeoutSeconds = 1 + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue_test.go new file mode 100644 index 0000000000..ec45f546ef --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue_test.go @@ -0,0 +1,1163 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/logging" + pkgmetrics "knative.dev/pkg/metrics" + _ "knative.dev/pkg/metrics/testing" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + _ "knative.dev/pkg/system/testing" + tracingconfig "knative.dev/pkg/tracing/config" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/resources" +) + +var ( + defaultKnativeQReadinessProbe = &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "0"}, + }, + }, + // We want to mark the service as not ready as soon as the + // PreStop handler is called, so we need to check a little + // bit more often than the default. It is a small + // sacrifice for a low rate of 503s. + PeriodSeconds: 1, + // We keep the connection open for a while because we're + // actively probing the user-container on that endpoint and + // thus don't want to be limited by K8s granularity here. + TimeoutSeconds: 10, + } + testProbe = &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + }, + }, + } +) + +const testProbeJSONTemplate = `{"tcpSocket":{"port":%d,"host":"127.0.0.1"}}` + +func TestMakeQueueContainer(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + lc *logging.Config + tc *tracingconfig.Config + oc *metrics.ObservabilityConfig + cc *deployment.Config + want *corev1.Container + }{{ + name: "no owner no autoscaler single", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(nil), + }, + }, { + name: "no owner no autoscaler single", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: testProbe, + Ports: []corev1.ContainerPort{{ + ContainerPort: 1955, + Name: string(networking.ProtocolH2C), + }}, + }}, + }, + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTP2Port), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Image: "alpine", + Env: env(map[string]string{ + "USER_PORT": "1955", + "QUEUE_SERVING_PORT": "8013", + }), + }, + }, { + name: "service name in labels", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.ServiceLabelKey: "svc", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Image: "alpine", + Env: env(map[string]string{ + "SERVING_SERVICE": "svc", + }), + }}, { + name: "config owner as env var, zero concurrency", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "baz", + Name: "blah", + UID: "1234", + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "the-parent-config-name", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "CONTAINER_CONCURRENCY": "0", + "SERVING_CONFIGURATION": "the-parent-config-name", + "SERVING_NAMESPACE": "baz", + "SERVING_REVISION": "blah", + }), + }, + }, { + name: "logging configuration as env var", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "log", + Name: "this", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{ + LoggingConfig: "The logging configuration goes here", + LoggingLevel: map[string]zapcore.Level{ + "queueproxy": zapcore.ErrorLevel, + }, + }, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "CONTAINER_CONCURRENCY": "0", + "SERVING_LOGGING_CONFIG": "The logging configuration goes here", + "SERVING_LOGGING_LEVEL": "error", + "SERVING_NAMESPACE": "log", + "SERVING_REVISION": "this", + }), + }, + }, { + name: "container concurrency 10", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(10), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "CONTAINER_CONCURRENCY": "10", + }), + }, + }, { + name: "request log configuration as env var", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{ + RequestLogTemplate: "test template", + EnableProbeRequestLog: true, + }, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "CONTAINER_CONCURRENCY": "0", + "SERVING_REQUEST_LOG_TEMPLATE": "test template", + "SERVING_ENABLE_PROBE_REQUEST_LOG": "true", + }), + }, + }, { + name: "request metrics backend as env var", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{ + RequestMetricsBackend: "prometheus", + }, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "CONTAINER_CONCURRENCY": "0", + "SERVING_REQUEST_METRICS_BACKEND": "prometheus", + }), + }, + }, { + name: "enable profiling", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(0), + TimeoutSeconds: ptr.Int64(45), + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{EnableProfiling: true}, + cc: &deployment.Config{}, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, profilingPort, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "CONTAINER_CONCURRENCY": "0", + "ENABLE_PROFILING": "true", + }), + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if len(test.rev.Spec.PodSpec.Containers) == 0 { + test.rev.Spec.PodSpec = corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: testProbe, + }}, + } + } + got, err := makeQueueContainer(test.rev, test.lc, test.tc, test.oc, test.cc) + + if err != nil { + t.Fatal("makeQueueContainer returned error") + } + + test.want.Env = append(test.want.Env, corev1.EnvVar{ + Name: "SERVING_READINESS_PROBE", + Value: probeJSON(test.rev.Spec.GetContainer()), + }) + sortEnv(got.Env) + sortEnv(test.want.Env) + if diff := cmp.Diff(test.want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("makeQueueContainer (-want, +got) = %v", diff) + } + }) + } +} + +func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { + tests := []struct { + name string + rev *v1alpha1.Revision + lc *logging.Config + tc *tracingconfig.Config + oc *metrics.ObservabilityConfig + cc *deployment.Config + want *corev1.Container + }{{ + name: "resources percentage in annotations", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.ServiceLabelKey: "svc", + }, + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "20", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: testProbe, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceName("memory"): resource.MustParse("2Gi"), + corev1.ResourceName("cpu"): resource.MustParse("2"), + }, + }, + }}, + }, + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("25m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceName("memory"): *resource.NewMilliQuantity(429496729600, resource.BinarySI), + corev1.ResourceName("cpu"): *resource.NewMilliQuantity(400, resource.BinarySI), + }, + }, + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Image: "alpine", + Env: env(map[string]string{ + "SERVING_SERVICE": "svc", + }), + }}, { + name: "resources percentage in annotations small than min allowed", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.ServiceLabelKey: "svc", + }, + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "0.2", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: testProbe, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("50m"), + corev1.ResourceName("memory"): resource.MustParse("128Mi"), + }, + }, + }}, + }, + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("25m"), + corev1.ResourceName("memory"): resource.MustParse("50Mi"), + }, + }, + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Image: "alpine", + Env: env(map[string]string{ + "SERVING_SERVICE": "svc", + }), + }}, { + name: "Invalid resources percentage in annotations", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.ServiceLabelKey: "svc", + }, + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "foo", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: testProbe, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("50m"), + corev1.ResourceName("memory"): resource.MustParse("128Mi"), + }, + }, + }}, + }, + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("25m"), + }, + }, + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Image: "alpine", + Env: env(map[string]string{ + "SERVING_SERVICE": "svc", + }), + }}, { + name: "resources percentage in annotations bigger than than math.MaxInt64", + rev: &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + Labels: map[string]string{ + serving.ServiceLabelKey: "svc", + }, + Annotations: map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "100", + }, + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: testProbe, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("memory"): resource.MustParse("900000Pi"), + }, + }, + }}, + }, + }, + }, + }, + lc: &logging.Config{}, + tc: &tracingconfig.Config{}, + oc: &metrics.ObservabilityConfig{}, + cc: &deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("25m"), + corev1.ResourceName("memory"): resource.MustParse("200Mi"), + }, + }, + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: defaultKnativeQReadinessProbe, + SecurityContext: queueSecurityContext, + // These changed based on the Revision and configs passed in. + Image: "alpine", + Env: env(map[string]string{ + "SERVING_SERVICE": "svc", + }), + }}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := makeQueueContainer(test.rev, test.lc, test.tc, test.oc, test.cc) + if err != nil { + t.Fatal("makeQueueContainer returned error") + } + test.want.Env = append(test.want.Env, corev1.EnvVar{ + Name: "SERVING_READINESS_PROBE", + Value: probeJSON(test.rev.Spec.GetContainer()), + }) + sortEnv(got.Env) + sortEnv(test.want.Env) + if diff := cmp.Diff(test.want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("makeQueueContainerWithPercentageAnnotation (-want, +got) = %v", diff) + } + if test.want.Resources.Limits.Memory().Cmp(*got.Resources.Limits.Memory()) != 0 { + t.Errorf("Resources.Limits.Memory = %v, want: %v", got.Resources.Limits.Memory(), test.want.Resources.Limits.Memory()) + } + if test.want.Resources.Requests.Cpu().Cmp(*got.Resources.Requests.Cpu()) != 0 { + t.Errorf("Resources.Request.CPU = %v, want: %v", got.Resources.Requests.Cpu(), test.want.Resources.Requests.Cpu()) + } + if test.want.Resources.Requests.Memory().Cmp(*got.Resources.Requests.Memory()) != 0 { + t.Errorf("Resources.Requests.Memory = %v, want: %v", got.Resources.Requests.Memory(), test.want.Resources.Requests.Memory()) + } + if test.want.Resources.Limits.Cpu().Cmp(*got.Resources.Limits.Cpu()) != 0 { + t.Errorf("Resources.Limits.CPU = %v, want: %v", got.Resources.Limits.Cpu(), test.want.Resources.Limits.Cpu()) + } + }) + } +} + +func TestProbeGenerationHTTPDefaults(t *testing.T) { + rev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 10, + }, + }}, + }, + }, + }, + } + + expectedProbe := &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: "127.0.0.1", + Path: "/", + Port: intstr.FromInt(int(v1alpha1.DefaultUserPort)), + Scheme: corev1.URISchemeHTTP, + HTTPHeaders: []corev1.HTTPHeader{{ + Name: network.KubeletProbeHeaderName, + Value: "queue", + }}, + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 10, + } + + wantProbeJSON, err := json.Marshal(expectedProbe) + if err != nil { + t.Fatal("failed to marshal expected probe") + } + + lc := &logging.Config{} + tc := &tracingconfig.Config{} + oc := &metrics.ObservabilityConfig{} + cc := &deployment.Config{} + want := &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "10"}, + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 10, + }, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "SERVING_READINESS_PROBE": string(wantProbeJSON), + }), + SecurityContext: queueSecurityContext, + } + + got, err := makeQueueContainer(rev, lc, tc, oc, cc) + if err != nil { + t.Fatal("makeQueueContainer returned error") + } + sortEnv(got.Env) + if diff := cmp.Diff(want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("makeQueueContainer(-want, +got) = %v", diff) + } +} + +func TestProbeGenerationHTTP(t *testing.T) { + userPort := 12345 + probePath := "/health" + + rev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + Ports: []corev1.ContainerPort{{ + ContainerPort: int32(userPort), + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: probePath, + Scheme: corev1.URISchemeHTTPS, + }, + }, + PeriodSeconds: 2, + TimeoutSeconds: 10, + }, + }}, + }, + }, + }, + } + + expectedProbe := &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Host: "127.0.0.1", + Path: probePath, + Port: intstr.FromInt(userPort), + Scheme: corev1.URISchemeHTTPS, + HTTPHeaders: []corev1.HTTPHeader{{ + Name: network.KubeletProbeHeaderName, + Value: "queue", + }}, + }, + }, + PeriodSeconds: 2, + TimeoutSeconds: 10, + } + + wantProbeJSON, err := json.Marshal(expectedProbe) + if err != nil { + t.Fatal("failed to marshal expected probe") + } + + lc := &logging.Config{} + tc := &tracingconfig.Config{} + oc := &metrics.ObservabilityConfig{} + cc := &deployment.Config{} + want := &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "10"}, + }, + }, + PeriodSeconds: 2, + TimeoutSeconds: 10, + }, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{ + "USER_PORT": strconv.Itoa(userPort), + "SERVING_READINESS_PROBE": string(wantProbeJSON), + }), + SecurityContext: queueSecurityContext, + } + + got, err := makeQueueContainer(rev, lc, tc, oc, cc) + if err != nil { + t.Fatal("makeQueueContainer returned error") + } + sortEnv(got.Env) + if diff := cmp.Diff(want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("makeQueueContainer(-want, +got) = %v", diff) + } +} + +func TestTCPProbeGeneration(t *testing.T) { + userPort := 12345 + tests := []struct { + name string + rev v1alpha1.RevisionSpec + want *corev1.Container + wantProbe *corev1.Probe + }{{ + name: "knative tcp probe", + wantProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(userPort), + }, + }, + PeriodSeconds: 0, + SuccessThreshold: 3, + }, + rev: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + Ports: []corev1.ContainerPort{{ + ContainerPort: int32(userPort), + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + PeriodSeconds: 0, + SuccessThreshold: 3, + }, + }}, + }, + }, + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "0"}, + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 10, + }, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{"USER_PORT": strconv.Itoa(userPort)}), + SecurityContext: queueSecurityContext, + }, + }, { + name: "tcp defaults", + rev: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + PeriodSeconds: 1, + }, + }}, + }, + }, + }, + wantProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(int(v1alpha1.DefaultUserPort)), + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 1, + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "1"}, + }, + }, + PeriodSeconds: 1, + TimeoutSeconds: 1, + }, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{}), + SecurityContext: queueSecurityContext, + }, + }, { + name: "user defined tcp probe", + wantProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(userPort), + }, + }, + PeriodSeconds: 2, + TimeoutSeconds: 15, + SuccessThreshold: 2, + FailureThreshold: 7, + InitialDelaySeconds: 3, + }, + rev: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + ContainerConcurrency: ptr.Int64(1), + TimeoutSeconds: ptr.Int64(45), + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: containerName, + Ports: []corev1.ContainerPort{{ + ContainerPort: int32(userPort), + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + PeriodSeconds: 2, + TimeoutSeconds: 15, + SuccessThreshold: 2, + FailureThreshold: 7, + InitialDelaySeconds: 3, + }, + }}, + }, + }, + }, + want: &corev1.Container{ + // These are effectively constant + Name: QueueContainerName, + Resources: createQueueResources(make(map[string]string), &corev1.Container{}), + Ports: append(queueNonServingPorts, queueHTTPPort), + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/queue", "-probe-period", "15"}, + }, + }, + PeriodSeconds: 2, + TimeoutSeconds: 15, + SuccessThreshold: 2, + FailureThreshold: 7, + InitialDelaySeconds: 3, + }, + // These changed based on the Revision and configs passed in. + Env: env(map[string]string{"USER_PORT": strconv.Itoa(userPort)}), + SecurityContext: queueSecurityContext, + }, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lc := &logging.Config{} + tc := &tracingconfig.Config{} + oc := &metrics.ObservabilityConfig{} + cc := &deployment.Config{} + testRev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + UID: "1234", + }, + Spec: test.rev, + } + wantProbeJSON, err := json.Marshal(test.wantProbe) + if err != nil { + t.Fatal("failed to marshal expected probe") + } + test.want.Env = append(test.want.Env, corev1.EnvVar{ + Name: "SERVING_READINESS_PROBE", + Value: string(wantProbeJSON), + }) + + got, err := makeQueueContainer(testRev, lc, tc, oc, cc) + if err != nil { + t.Fatal("makeQueueContainer returned error") + } + sortEnv(got.Env) + sortEnv(test.want.Env) + if diff := cmp.Diff(test.want, got, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" { + t.Errorf("makeQueueContainer (-want, +got) = %v", diff) + } + }) + } +} + +var defaultEnv = map[string]string{ + "SERVING_NAMESPACE": "foo", + "SERVING_SERVICE": "", + "SERVING_CONFIGURATION": "", + "SERVING_REVISION": "bar", + "CONTAINER_CONCURRENCY": "1", + "REVISION_TIMEOUT_SECONDS": "45", + "SERVING_LOGGING_CONFIG": "", + "SERVING_LOGGING_LEVEL": "", + "TRACING_CONFIG_BACKEND": "", + "TRACING_CONFIG_ZIPKIN_ENDPOINT": "", + "TRACING_CONFIG_STACKDRIVER_PROJECT_ID": "", + "TRACING_CONFIG_SAMPLE_RATE": "0.000000", + "TRACING_CONFIG_DEBUG": "false", + "SERVING_REQUEST_LOG_TEMPLATE": "", + "SERVING_REQUEST_METRICS_BACKEND": "", + "USER_PORT": strconv.Itoa(v1alpha1.DefaultUserPort), + "SYSTEM_NAMESPACE": system.Namespace(), + "METRICS_DOMAIN": pkgmetrics.Domain(), + "QUEUE_SERVING_PORT": "8012", + "USER_CONTAINER_NAME": containerName, + "ENABLE_VAR_LOG_COLLECTION": "false", + "VAR_LOG_VOLUME_NAME": varLogVolumeName, + "INTERNAL_VOLUME_PATH": internalVolumePath, + "ENABLE_PROFILING": "false", + "SERVING_ENABLE_PROBE_REQUEST_LOG": "false", +} + +func probeJSON(container *corev1.Container) string { + if container == nil { + return fmt.Sprintf(testProbeJSONTemplate, v1alpha1.DefaultUserPort) + } + + if ports := container.Ports; len(ports) > 0 && ports[0].ContainerPort != 0 { + return fmt.Sprintf(testProbeJSONTemplate, ports[0].ContainerPort) + } + return fmt.Sprintf(testProbeJSONTemplate, v1alpha1.DefaultUserPort) +} + +func env(overrides map[string]string) []corev1.EnvVar { + values := resources.UnionMaps(defaultEnv, overrides) + + var env []corev1.EnvVar + for key, value := range values { + env = append(env, corev1.EnvVar{ + Name: key, + Value: value, + }) + } + + env = append(env, []corev1.EnvVar{{ + Name: "SERVING_POD", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}, + }, + }, { + Name: "SERVING_POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "status.podIP"}, + }, + }}...) + + sortEnv(env) + + return env +} + +func sortEnv(envs []corev1.EnvVar) { + sort.SliceStable(envs, func(i, j int) bool { + return envs[i].Name < envs[j].Name + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary.go new file mode 100644 index 0000000000..05919d548a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +// resourceBoundary is the minimum and maximum resource allowed for a type of +type resourceBoundary struct { + min resource.Quantity + max resource.Quantity +} + +var ( + queueContainerRequestCPU = resourceBoundary{min: resource.MustParse("25m"), max: resource.MustParse("100m")} + queueContainerLimitCPU = resourceBoundary{min: resource.MustParse("40m"), max: resource.MustParse("500m")} + queueContainerRequestMemory = resourceBoundary{min: resource.MustParse("50Mi"), max: resource.MustParse("200Mi")} + queueContainerLimitMemory = resourceBoundary{min: resource.MustParse("200Mi"), max: resource.MustParse("500Mi")} +) + +func (boundary *resourceBoundary) applyBoundary(resource resource.Quantity) resource.Quantity { + if resource.Cmp(boundary.min) == -1 { + resource = boundary.min + } else if resource.Cmp(boundary.max) == 1 { + resource = boundary.max + } + return resource +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary_test.go new file mode 100644 index 0000000000..0384a21104 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/resources/resourceboundary_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestResourceBoundary(t *testing.T) { + tests := []struct { + name string + boundary resourceBoundary + resource resource.Quantity + want resource.Quantity + }{{ + name: "resource within boundary", + boundary: queueContainerRequestCPU, + resource: resource.MustParse("55m"), + want: resource.MustParse("55m"), + }, { + name: "resource lower than min boundary", + boundary: queueContainerRequestCPU, + resource: resource.MustParse("15m"), + want: resource.MustParse("25m"), + }, + { + name: "resource lower than min boundary", + boundary: queueContainerRequestCPU, + resource: resource.MustParse("110m"), + want: resource.MustParse("100m"), + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.boundary.applyBoundary(test.resource) + if test.want.Cmp(got) != 0 { + t.Errorf("Expected quantity %v got %v ", test.want, got) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/revision.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/revision.go new file mode 100644 index 0000000000..4a1bad4738 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/revision.go @@ -0,0 +1,255 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + appsv1listers "k8s.io/client-go/listers/apps/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + cachinglisters "knative.dev/caching/pkg/client/listers/caching/v1alpha1" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + palisters "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/revision/config" +) + +type resolver interface { + Resolve(string, k8schain.Options, sets.String) (string, error) +} + +// Reconciler implements controller.Reconciler for Revision resources. +type Reconciler struct { + *reconciler.Base + + // lister indexes properties about Revision + revisionLister listers.RevisionLister + podAutoscalerLister palisters.PodAutoscalerLister + imageLister cachinglisters.ImageLister + deploymentLister appsv1listers.DeploymentLister + serviceLister corev1listers.ServiceLister + configMapLister corev1listers.ConfigMapLister + + resolver resolver + configStore reconciler.ConfigStore +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Revision resource +// with the current status of the resource. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.configStore.ToContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + logger.Info("Running reconcile Revision") + + // Get the Revision resource with this namespace/name + original, err := c.revisionLister.Revisions(namespace).Get(name) + // The resource may no longer exist, in which case we stop processing. + if apierrs.IsNotFound(err) { + logger.Info("Revision in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informer's copy. + rev := original.DeepCopy() + + // Reconcile this copy of the revision and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := c.reconcile(ctx, rev) + if equality.Semantic.DeepEqual(original.Status, rev.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err = c.updateStatus(original, rev); err != nil { + logger.Warnw("Failed to update revision status", zap.Error(err)) + c.Recorder.Eventf(rev, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for Revision %q: %v", rev.Name, err) + return err + } + if reconcileErr != nil { + c.Recorder.Event(rev, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + return reconcileErr + } + // TODO(mattmoor): Remove this after 0.7 cuts. + // If the spec has changed, then assume we need an upgrade and issue a patch to trigger + // the webhook to upgrade via defaulting. Status updates do not trigger this due to the + // use of the /status resource. + if !equality.Semantic.DeepEqual(original.Spec, rev.Spec) { + revisions := v1alpha1.SchemeGroupVersion.WithResource("revisions") + if err := c.MarkNeedsUpgrade(revisions, rev.Namespace, rev.Name); err != nil { + return err + } + } + return nil +} + +func (c *Reconciler) reconcileDigest(ctx context.Context, rev *v1alpha1.Revision) error { + // The image digest has already been resolved. + if rev.Status.ImageDigest != "" { + return nil + } + + var imagePullSecrets []string + for _, s := range rev.Spec.ImagePullSecrets { + imagePullSecrets = append(imagePullSecrets, s.Name) + } + cfgs := config.FromContext(ctx) + opt := k8schain.Options{ + Namespace: rev.Namespace, + ServiceAccountName: rev.Spec.ServiceAccountName, + ImagePullSecrets: imagePullSecrets, + } + digest, err := c.resolver.Resolve(rev.Spec.GetContainer().Image, + opt, cfgs.Deployment.RegistriesSkippingTagResolving) + if err != nil { + err = fmt.Errorf("failed to resolve image to digest: %w", err) + rev.Status.MarkContainerHealthyFalse(v1alpha1.ContainerMissing, + v1alpha1.RevisionContainerMissingMessage( + rev.Spec.GetContainer().Image, err.Error())) + return err + } + + rev.Status.ImageDigest = digest + + return nil +} + +func (c *Reconciler) reconcile(ctx context.Context, rev *v1alpha1.Revision) error { + if rev.GetDeletionTimestamp() != nil { + return nil + } + readyBeforeReconcile := rev.Status.IsReady() + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + rev.SetDefaults(v1.WithUpgradeViaDefaulting(ctx)) + + rev.Status.InitializeConditions() + c.updateRevisionLoggingURL(ctx, rev) + + if err := rev.ConvertUp(ctx, &v1beta1.Revision{}); err != nil { + if ce, ok := err.(*v1alpha1.CannotConvertError); ok { + rev.Status.MarkResourceNotConvertible(ce) + return nil + } + return err + } + + phases := []struct { + name string + f func(context.Context, *v1alpha1.Revision) error + }{{ + name: "image digest", + f: c.reconcileDigest, + }, { + name: "user deployment", + f: c.reconcileDeployment, + }, { + name: "image cache", + f: c.reconcileImageCache, + }, { + name: "PA", + f: c.reconcilePA, + }} + + for _, phase := range phases { + if err := phase.f(ctx, rev); err != nil { + return err + } + } + + readyAfterReconcile := rev.Status.IsReady() + if !readyBeforeReconcile && readyAfterReconcile { + c.Recorder.Event(rev, corev1.EventTypeNormal, "RevisionReady", + "Revision becomes ready upon all resources being ready") + } + + rev.Status.ObservedGeneration = rev.Generation + return nil +} + +func (c *Reconciler) updateRevisionLoggingURL( + ctx context.Context, + rev *v1alpha1.Revision, +) { + + config := config.FromContext(ctx) + if config.Observability.LoggingURLTemplate == "" { + return + } + + uid := string(rev.UID) + + rev.Status.LogURL = strings.Replace( + config.Observability.LoggingURLTemplate, + "${REVISION_UID}", uid, -1) +} + +func (c *Reconciler) updateStatus(existing *v1alpha1.Revision, desired *v1alpha1.Revision) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = c.ServingClientSet.ServingV1alpha1().Revisions(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = c.ServingClientSet.ServingV1alpha1().Revisions(desired.Namespace).UpdateStatus(existing) + return err + }) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/revision_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/revision_test.go new file mode 100644 index 0000000000..7bc41d0322 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/revision_test.go @@ -0,0 +1,788 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "knative.dev/serving/pkg/apis/config" + + // Inject the fakes for informers this controller relies on. + fakecachingclient "knative.dev/caching/pkg/client/injection/client/fake" + fakeimageinformer "knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake" + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakedeploymentinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/fake" + fakeendpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakepainformer "knative.dev/serving/pkg/client/injection/informers/autoscaling/v1alpha1/podautoscaler/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/authn/k8schain" + "golang.org/x/sync/errgroup" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "knative.dev/pkg/apis" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/logging" + "knative.dev/pkg/metrics" + _ "knative.dev/pkg/metrics/testing" + "knative.dev/pkg/system" + tracingconfig "knative.dev/pkg/tracing/config" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/pkg/deployment" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/revision/resources" + resourcenames "knative.dev/serving/pkg/reconciler/revision/resources/names" + + . "knative.dev/pkg/reconciler/testing" +) + +func testConfiguration() *v1alpha1.Configuration { + return &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/configurations/test-config", + Name: "test-config", + Namespace: testNamespace, + }, + } +} + +func serviceName(rn string) string { + return rn +} + +func testReadyEndpoints(revName string) *corev1.Endpoints { + return &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName(revName), + Namespace: testNamespace, + Labels: map[string]string{ + serving.RevisionLabelKey: revName, + }, + }, + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: "123.456.78.90", + }}, + }}, + } +} + +func testReadyPA(rev *v1alpha1.Revision) *av1alpha1.PodAutoscaler { + pa := resources.MakePA(rev) + pa.Status.InitializeConditions() + pa.Status.MarkActive() + pa.Status.ServiceName = serviceName(rev.Name) + return pa +} + +func newTestControllerWithConfig(t *testing.T, deploymentConfig *deployment.Config, configs ...*corev1.ConfigMap) ( + context.Context, + []controller.Informer, + *controller.Impl, + *configmap.ManualWatcher) { + + ctx, informers := SetupFakeContext(t) + configMapWatcher := &configmap.ManualWatcher{Namespace: system.Namespace()} + controller := NewController(ctx, configMapWatcher) + + controller.Reconciler.(*Reconciler).resolver = &nopResolver{} + + cms := []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: network.ConfigName, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: logging.ConfigMapName(), + }, + Data: map[string]string{ + "zap-logger-config": "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}", + "loglevel.queueproxy": "info", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "true", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: tracingconfig.ConfigName, + }, + Data: map[string]string{ + "enable": "true", + "debug": "true", + "zipkin-endpoint": "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: autoscaler.ConfigName, + }, + Data: map[string]string{ + "max-scale-up-rate": "11.0", + "container-concurrency-target-percentage": "0.5", + "container-concurrency-target-default": "10.0", + "stable-window": "5m", + "panic-window": "10s", + "tick-interval": "2s", + }, + }, getTestDeploymentConfigMap(), getTestDefaultsConfigMap()} + + cms = append(cms, configs...) + + for _, configMap := range cms { + configMapWatcher.OnChange(configMap) + } + return ctx, informers, controller, configMapWatcher +} + +func createRevision( + t *testing.T, + ctx context.Context, + controller *controller.Impl, + rev *v1alpha1.Revision, +) *v1alpha1.Revision { + t.Helper() + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(rev.Namespace).Create(rev) + // Since Reconcile looks in the lister, we need to add it to the informer + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + if err := controller.Reconciler.Reconcile(context.Background(), KeyOrDie(rev)); err == nil { + rev, _, _ = addResourcesToInformers(t, ctx, rev) + } + return rev +} + +func updateRevision( + t *testing.T, + ctx context.Context, + controller *controller.Impl, + rev *v1alpha1.Revision, +) { + t.Helper() + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(rev.Namespace).Update(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Update(rev) + + if err := controller.Reconciler.Reconcile(context.Background(), KeyOrDie(rev)); err == nil { + addResourcesToInformers(t, ctx, rev) + } +} + +func addResourcesToInformers(t *testing.T, ctx context.Context, rev *v1alpha1.Revision) (*v1alpha1.Revision, *appsv1.Deployment, *av1alpha1.PodAutoscaler) { + t.Helper() + + rev, err := fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(rev.Namespace).Get(rev.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Revisions.Get(%v) = %v", rev.Name, err) + } + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + haveBuild := rev.Spec.DeprecatedBuildRef != nil + + ns := rev.Namespace + + paName := resourcenames.PA(rev) + pa, err := fakeservingclient.Get(ctx).AutoscalingV1alpha1().PodAutoscalers(rev.Namespace).Get(paName, metav1.GetOptions{}) + if apierrs.IsNotFound(err) && haveBuild { + // If we're doing a Build this won't exist yet. + } else if err != nil { + t.Errorf("PodAutoscalers.Get(%v) = %v", paName, err) + } else { + fakepainformer.Get(ctx).Informer().GetIndexer().Add(pa) + } + + imageName := resourcenames.ImageCache(rev) + image, err := fakecachingclient.Get(ctx).CachingV1alpha1().Images(rev.Namespace).Get(imageName, metav1.GetOptions{}) + if apierrs.IsNotFound(err) && haveBuild { + // If we're doing a Build this won't exist yet. + } else if err != nil { + t.Errorf("Caching.Images.Get(%v) = %v", imageName, err) + } else { + fakeimageinformer.Get(ctx).Informer().GetIndexer().Add(image) + } + + deploymentName := resourcenames.Deployment(rev) + deployment, err := fakekubeclient.Get(ctx).AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + if apierrs.IsNotFound(err) && haveBuild { + // If we're doing a Build this won't exist yet. + } else if err != nil { + t.Errorf("Deployments.Get(%v) = %v", deploymentName, err) + } else { + fakedeploymentinformer.Get(ctx).Informer().GetIndexer().Add(deployment) + } + + return rev, deployment, pa +} + +type fixedResolver struct { + digest string +} + +func (r *fixedResolver) Resolve(_ string, _ k8schain.Options, _ sets.String) (string, error) { + return r.digest, nil +} + +type errorResolver struct { + err error +} + +func (r *errorResolver) Resolve(_ string, _ k8schain.Options, _ sets.String) (string, error) { + return "", r.err +} + +func TestResolutionFailed(t *testing.T) { + ctx, cancel, _, controller, _ := newTestController(t) + defer cancel() + + // Unconditionally return this error during resolution. + innerError := errors.New("i am the expected error message, hear me ROAR!") + controller.Reconciler.(*Reconciler).resolver = &errorResolver{innerError} + + rev := testRevision() + config := testConfiguration() + rev.OwnerReferences = append(rev.OwnerReferences, *kmeta.NewControllerRef(config)) + + createRevision(t, ctx, controller, rev) + + rev, err := fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Get(rev.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get revision: %v", err) + } + + // Ensure that the Revision status is updated. + for _, ct := range []apis.ConditionType{"ContainerHealthy", "Ready"} { + got := rev.Status.GetCondition(ct) + want := &apis.Condition{ + Type: ct, + Status: corev1.ConditionFalse, + Reason: "ContainerMissing", + Message: v1alpha1.RevisionContainerMissingMessage( + rev.Spec.GetContainer().Image, "failed to resolve image to digest: "+innerError.Error()), + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) + } + } +} + +// TODO(mattmoor): add coverage of a Reconcile fixing a stale logging URL +func TestUpdateRevWithWithUpdatedLoggingURL(t *testing.T) { + deploymentConfig := getTestDeploymentConfig() + ctx, _, controller, watcher := newTestControllerWithConfig(t, deploymentConfig, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "true", + "logging.revision-url-template": "http://old-logging.test.com?filter=${REVISION_UID}", + }, + }, getTestDeploymentConfigMap(), + ) + revClient := fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace) + + rev := testRevision() + createRevision(t, ctx, controller, rev) + + // Update controllers logging URL + watcher.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "true", + "logging.revision-url-template": "http://new-logging.test.com?filter=${REVISION_UID}", + }, + }) + updateRevision(t, ctx, controller, rev) + + updatedRev, err := revClient.Get(rev.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get revision: %v", err) + } + + expectedLoggingURL := fmt.Sprintf("http://new-logging.test.com?filter=%s", rev.UID) + if updatedRev.Status.LogURL != expectedLoggingURL { + t.Errorf("Updated revision does not have an updated logging URL: expected: %s, got: %s", expectedLoggingURL, updatedRev.Status.LogURL) + } +} + +// TODO(mattmoor): Remove when we have coverage of EnqueueEndpointsRevision +func TestMarkRevReadyUponEndpointBecomesReady(t *testing.T) { + ctx, cancel, _, controller, _ := newTestController(t) + defer cancel() + rev := testRevision() + + fakeRecorder := controller.Reconciler.(*Reconciler).Base.Recorder.(*record.FakeRecorder) + + // Look for the revision ready event. Events are delivered asynchronously so + // we need to use hooks here. + + deployingRev := createRevision(t, ctx, controller, rev) + + // The revision is not marked ready until an endpoint is created. + for _, ct := range []apis.ConditionType{"Ready"} { + got := deployingRev.Status.GetCondition(ct) + want := &apis.Condition{ + Type: ct, + Status: corev1.ConditionUnknown, + Reason: "Deploying", + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) + } + } + + endpoints := testReadyEndpoints(rev.Name) + fakeendpointsinformer.Get(ctx).Informer().GetIndexer().Add(endpoints) + pa := testReadyPA(rev) + fakepainformer.Get(ctx).Informer().GetIndexer().Add(pa) + f := controller.EnqueueLabelOfNamespaceScopedResource("", serving.RevisionLabelKey) + f(endpoints) + if err := controller.Reconciler.Reconcile(context.Background(), KeyOrDie(rev)); err != nil { + t.Errorf("Reconcile() = %v", err) + } + + // Make sure that the changes from the Reconcile are reflected in our Informers. + readyRev, _, _ := addResourcesToInformers(t, ctx, rev) + + // After reconciling the endpoint, the revision should be ready. + for _, ct := range []apis.ConditionType{"Ready"} { + got := readyRev.Status.GetCondition(ct) + want := &apis.Condition{ + Type: ct, + Status: corev1.ConditionTrue, + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected revision conditions diff (-want +got): %v", diff) + } + } + + select { + case got := <-fakeRecorder.Events: + const want = "Normal RevisionReady Revision becomes ready upon all resources being ready" + if got != want { + t.Errorf("<-Events = %s, wanted %s", got, want) + } + case <-time.After(3 * time.Second): + t.Error("Timeout") + } +} + +func TestNoQueueSidecarImageUpdateFail(t *testing.T) { + ctx, cancel, _, controller, watcher := newTestController(t) + defer cancel() + + rev := testRevision() + config := testConfiguration() + rev.OwnerReferences = append( + rev.OwnerReferences, + *kmeta.NewControllerRef(config), + ) + // Update controller config with no side car image + watcher.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-controller", + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }) + createRevision(t, ctx, controller, rev) + + // Look for the revision deployment. + _, err := fakekubeclient.Get(ctx).AppsV1().Deployments(system.Namespace()).Get(rev.Name, metav1.GetOptions{}) + if !apierrs.IsNotFound(err) { + t.Errorf("Expected revision deployment %s to not exist.", rev.Name) + } +} + +// This covers *error* paths in receiveNetworkConfig, since "" is not a valid value. +func TestIstioOutboundIPRangesInjection(t *testing.T) { + var annotations map[string]string + + // A valid IP range + in := " 10.10.10.0/24\r,,\t,\n,," + want := "10.10.10.0/24" + annotations = getPodAnnotationsForConfig(t, in, "") + if got := annotations[resources.IstioOutboundIPRangeAnnotation]; want != got { + t.Fatalf("%v annotation expected to be %v, but is %v.", resources.IstioOutboundIPRangeAnnotation, want, got) + } + + // Multiple valid ranges with whitespaces + in = " \t\t10.10.10.0/24, ,,\t\n\r\n,10.240.10.0/14\n, 192.192.10.0/16" + want = "10.10.10.0/24,10.240.10.0/14,192.192.10.0/16" + annotations = getPodAnnotationsForConfig(t, in, "") + if got := annotations[resources.IstioOutboundIPRangeAnnotation]; want != got { + t.Fatalf("%v annotation expected to be %v, but is %v.", resources.IstioOutboundIPRangeAnnotation, want, got) + } + + // An invalid IP range + in = "10.10.10.10/33" + annotations = getPodAnnotationsForConfig(t, in, "") + if got, ok := annotations[resources.IstioOutboundIPRangeAnnotation]; !ok { + t.Fatalf("Expected to have no %v annotation for invalid option %v. But found value %v", resources.IstioOutboundIPRangeAnnotation, want, got) + } + + // Configuration has an annotation override - its value must be preserved + want = "10.240.10.0/14" + annotations = getPodAnnotationsForConfig(t, "", want) + if got := annotations[resources.IstioOutboundIPRangeAnnotation]; got != want { + t.Fatalf("%v annotation is expected to have %v but got %v", resources.IstioOutboundIPRangeAnnotation, want, got) + } + annotations = getPodAnnotationsForConfig(t, "10.10.10.0/24", want) + if got := annotations[resources.IstioOutboundIPRangeAnnotation]; got != want { + t.Fatalf("%v annotation is expected to have %v but got %v", resources.IstioOutboundIPRangeAnnotation, want, got) + } +} + +func getPodAnnotationsForConfig(t *testing.T, configMapValue string, configAnnotationOverride string) map[string]string { + controllerConfig := getTestDeploymentConfig() + ctx, _, controller, watcher := newTestControllerWithConfig(t, controllerConfig) + + // Resolve image references to this "digest" + digest := "foo@sha256:deadbeef" + controller.Reconciler.(*Reconciler).resolver = &fixedResolver{digest} + + watcher.OnChange(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + network.IstioOutboundIPRangesKey: configMapValue, + }}) + + rev := testRevision() + config := testConfiguration() + if len(configAnnotationOverride) > 0 { + rev.ObjectMeta.Annotations = map[string]string{resources.IstioOutboundIPRangeAnnotation: configAnnotationOverride} + } + + rev.OwnerReferences = append( + rev.OwnerReferences, + *kmeta.NewControllerRef(config), + ) + + createRevision(t, ctx, controller, rev) + + expectedDeploymentName := fmt.Sprintf("%s-deployment", rev.Name) + deployment, err := fakekubeclient.Get(ctx).AppsV1().Deployments(testNamespace).Get(expectedDeploymentName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get serving deployment: %v", err) + } + return deployment.Spec.Template.ObjectMeta.Annotations +} + +func TestGlobalResyncOnConfigMapUpdateRevision(t *testing.T) { + // Test that changes to the ConfigMap result in the desired changes on an existing + // revision. + tests := []struct { + name string + configMapToUpdate *corev1.ConfigMap + callback func(*testing.T) func(runtime.Object) HookResult + }{{ + name: "Update LoggingURL", // Should update LogURL on revision + configMapToUpdate: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "true", + "logging.revision-url-template": "http://log-here.test.com?filter=${REVISION_UID}", + }, + }, + callback: func(t *testing.T) func(runtime.Object) HookResult { + return func(obj runtime.Object) HookResult { + revision := obj.(*v1alpha1.Revision) + t.Logf("Revision updated: %v", revision.Name) + + expected := "http://log-here.test.com?filter=" + got := revision.Status.LogURL + if strings.HasPrefix(got, expected) { + return HookComplete + } + + t.Logf("No update occurred; expected: %s got: %s", expected, got) + return HookIncomplete + } + }, + }, { + name: "Update ContainerConcurrency", // Should update ContainerConcurrency on revision spec + configMapToUpdate: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: config.DefaultsConfigName, + }, + Data: map[string]string{ + "container-concurrency": "3", + }, + }, + callback: func(t *testing.T) func(runtime.Object) HookResult { + return func(obj runtime.Object) HookResult { + revision := obj.(*v1alpha1.Revision) + t.Logf("Revision updated: %v", revision.Name) + + expected := int64(3) + got := *(revision.Spec.ContainerConcurrency) + if got != expected { + return HookComplete + } + + t.Logf("No update occurred; expected: %d got: %d", expected, got) + return HookIncomplete + } + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + controllerConfig := getTestDeploymentConfig() + ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, controllerConfig) + + ctx, cancel := context.WithCancel(ctx) + grp := errgroup.Group{} + + servingClient := fakeservingclient.Get(ctx) + + rev := testRevision() + revClient := servingClient.ServingV1alpha1().Revisions(rev.Namespace) + + h := NewHooks() + + h.OnUpdate(&servingClient.Fake, "revisions", test.callback(t)) + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + if err := grp.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + waitInformers() + }() + + if err := watcher.Start(ctx.Done()); err != nil { + t.Fatalf("Failed to start configuration manager: %v", err) + } + + grp.Go(func() error { return ctrl.Run(1, ctx.Done()) }) + + revClient.Create(rev) + + watcher.OnChange(test.configMapToUpdate) + + if err := h.WaitForHooks(1 * time.Second); err != nil { + t.Errorf("%s Global Resync Failed: %v", test.name, err) + } + }) + } +} + +func TestGlobalResyncOnConfigMapUpdateDeployment(t *testing.T) { + // Test that changes to the ConfigMap result in the desired changes on an existing + // deployment. + tests := []struct { + name string + configMapToUpdate *corev1.ConfigMap + callback func(*testing.T) func(runtime.Object) HookResult + }{{ + name: "Update Istio Outbound IP Ranges", // Should update metadata on Deployment + configMapToUpdate: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "istio.sidecar.includeOutboundIPRanges": "10.0.0.1/24", + }, + }, + callback: func(t *testing.T) func(runtime.Object) HookResult { + return func(obj runtime.Object) HookResult { + deployment := obj.(*appsv1.Deployment) + t.Logf("Deployment updated: %v", deployment.Name) + + expected := "10.0.0.1/24" + annotations := deployment.Spec.Template.ObjectMeta.Annotations + got := annotations[resources.IstioOutboundIPRangeAnnotation] + + if got != expected { + t.Logf("No update occurred; expected: %s got: %s", expected, got) + return HookIncomplete + } + + return HookComplete + } + }, + }, { + name: "Disable /var/log Collection", // Should set ENABLE_VAR_LOG_COLLECTION to false + configMapToUpdate: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: metrics.ConfigMapName(), + }, + Data: map[string]string{ + "logging.enable-var-log-collection": "false", + }, + }, + callback: func(t *testing.T) func(runtime.Object) HookResult { + return func(obj runtime.Object) HookResult { + deployment := obj.(*appsv1.Deployment) + t.Logf("Deployment updated: %v", deployment.Name) + + for _, c := range deployment.Spec.Template.Spec.Containers { + if c.Name == resources.QueueContainerName { + for _, e := range c.Env { + if e.Name == "ENABLE_VAR_LOG_COLLECTION" { + flag, err := strconv.ParseBool(e.Value) + if err != nil { + t.Errorf("Invalid ENABLE_VAR_LOG_COLLECTION value: %q", e.Name) + return HookIncomplete + } + if flag { + t.Errorf("ENABLE_VAR_LOG_COLLECTION = %v, want: %v", flag, false) + return HookIncomplete + } + return HookComplete + } + } + + t.Error("ENABLE_VAR_LOG_COLLECTION is not set") + return HookIncomplete + } + } + t.Logf("The deployment spec doesn't contain the expected container %q", resources.QueueContainerName) + return HookIncomplete + } + }, + }, { + name: "Update QueueProxy Image", // Should update queueSidecarImage + configMapToUpdate: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: deployment.ConfigName, + }, + Data: map[string]string{ + "queueSidecarImage": "myAwesomeQueueImage", + }, + }, + callback: func(t *testing.T) func(runtime.Object) HookResult { + return func(obj runtime.Object) HookResult { + deployment := obj.(*appsv1.Deployment) + t.Logf("Deployment updated: %v", deployment.Name) + + expected := "myAwesomeQueueImage" + + var got string + for _, c := range deployment.Spec.Template.Spec.Containers { + if c.Name == resources.QueueContainerName { + got = c.Image + if got == expected { + return HookComplete + } + } + } + + t.Logf("No update occurred; expected: %s got: %s", expected, got) + return HookIncomplete + } + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + controllerConfig := getTestDeploymentConfig() + ctx, informers, ctrl, watcher := newTestControllerWithConfig(t, controllerConfig) + + ctx, cancel := context.WithCancel(ctx) + grp := errgroup.Group{} + + kubeClient := fakekubeclient.Get(ctx) + + rev := testRevision() + revClient := fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(rev.Namespace) + h := NewHooks() + h.OnUpdate(&kubeClient.Fake, "deployments", test.callback(t)) + + // Wait for the deployment creation to trigger the global resync. This + // avoids the create and update being coalesced into one event. + h.OnCreate(&kubeClient.Fake, "deployments", func(obj runtime.Object) HookResult { + watcher.OnChange(test.configMapToUpdate) + return HookComplete + }) + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + if err := grp.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + waitInformers() + }() + + if err := watcher.Start(ctx.Done()); err != nil { + t.Fatalf("Failed to start configuration manager: %v", err) + } + + grp.Go(func() error { return ctrl.Run(1, ctx.Done()) }) + + revClient.Create(rev) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Errorf("%s Global Resync Failed: %v", test.name, err) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/revision/table_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/revision/table_test.go new file mode 100644 index 0000000000..ae879e3f44 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/revision/table_test.go @@ -0,0 +1,769 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package revision + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + + caching "knative.dev/caching/pkg/apis/caching/v1alpha1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + tracingconfig "knative.dev/pkg/tracing/config" + asv1a1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/metrics" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/revision/config" + "knative.dev/serving/pkg/reconciler/revision/resources" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + // Make sure Reconcile handles bad keys. + Key: "too/many/parts", + }, { + Name: "key not found", + // Make sure Reconcile handles good keys that don't exist. + Key: "foo/not-found", + }, { + Name: "nop deletion reconcile", + // Test that with a DeletionTimestamp we do nothing. + Objects: []runtime.Object{ + rev("foo", "delete-pending", WithRevisionDeletionTimestamp), + }, + Key: "foo/delete-pending", + }, { + Name: "first revision reconciliation", + // Test the simplest successful reconciliation flow. + // We feed in a well formed Revision where none of its sub-resources exist, + // and we exect it to create them and initialize the Revision's status. + Objects: []runtime.Object{ + rev("foo", "first-reconcile"), + }, + WantCreates: []runtime.Object{ + // The first reconciliation of a Revision creates the following resources. + pa("foo", "first-reconcile"), + deploy(t, "foo", "first-reconcile"), + image("foo", "first-reconcile"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "first-reconcile", + // The first reconciliation Populates the following status properties. + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying")), + }}, + Key: "foo/first-reconcile", + }, { + Name: "failure updating revision status", + // This starts from the first reconciliation case above and induces a failure + // updating the revision status. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "revisions"), + }, + Objects: []runtime.Object{ + rev("foo", "update-status-failure"), + pa("foo", "update-status-failure"), + }, + WantCreates: []runtime.Object{ + // We still see the following creates before the failure is induced. + deploy(t, "foo", "update-status-failure"), + image("foo", "update-status-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "update-status-failure", + // Despite failure, the following status properties are set. + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for Revision %q: %v", + "update-status-failure", "inducing failure for update revisions"), + }, + Key: "foo/update-status-failure", + }, { + Name: "failure creating pa", + // This starts from the first reconciliation case above and induces a failure + // creating the PA. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "podautoscalers"), + }, + Objects: []runtime.Object{ + rev("foo", "create-pa-failure"), + }, + WantCreates: []runtime.Object{ + // We still see the following creates before the failure is induced. + pa("foo", "create-pa-failure"), + deploy(t, "foo", "create-pa-failure"), + image("foo", "create-pa-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "create-pa-failure", + // Despite failure, the following status properties are set. + WithLogURL, WithInitRevConditions, + MarkDeploying("Deploying")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `failed to create PA "create-pa-failure": inducing failure for create podautoscalers`), + }, + Key: "foo/create-pa-failure", + }, { + Name: "failure creating user deployment", + // This starts from the first reconciliation case above and induces a failure + // creating the user's deployment + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "deployments"), + }, + Objects: []runtime.Object{ + rev("foo", "create-user-deploy-failure"), + pa("foo", "create-user-deploy-failure"), + }, + WantCreates: []runtime.Object{ + // We still see the following creates before the failure is induced. + deploy(t, "foo", "create-user-deploy-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "create-user-deploy-failure", + // Despite failure, the following status properties are set. + WithLogURL, WithInitRevConditions, + MarkDeploying("Deploying")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `failed to create deployment "create-user-deploy-failure-deployment": inducing failure for create deployments`), + }, + Key: "foo/create-user-deploy-failure", + }, { + Name: "stable revision reconciliation", + // Test a simple stable reconciliation of an Active Revision. + // We feed in a Revision and the resources it controls in a steady + // state (immediately post-creation), and verify that no changes + // are necessary. + Objects: []runtime.Object{ + rev("foo", "stable-reconcile", WithLogURL, AllUnknownConditions), + pa("foo", "stable-reconcile", WithReachability(asv1a1.ReachabilityUnknown)), + + deploy(t, "foo", "stable-reconcile"), + image("foo", "stable-reconcile"), + }, + // No changes are made to any objects. + Key: "foo/stable-reconcile", + }, { + Name: "stable revision reconciliation (needs upgrade)", + // Test a simple reconciliation of a steady state in a pre-beta form, + // which should result in us patching the revision with an annotation + // to force a webhook upgrade. + Objects: []runtime.Object{ + rev("foo", "needs-upgrade", WithLogURL, AllUnknownConditions, func(rev *v1alpha1.Revision) { + // Start the revision in the old form. + rev.Spec.DeprecatedContainer = &rev.Spec.Containers[0] + rev.Spec.Containers = nil + }), + pa("foo", "needs-upgrade", WithReachability(asv1a1.ReachabilityUnknown)), + deploy(t, "foo", "needs-upgrade"), + image("foo", "needs-upgrade"), + }, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "needs-upgrade", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + Key: "foo/needs-upgrade", + }, { + Name: "update deployment containers", + // Test that we update a deployment with new containers when they disagree + // with our desired spec. + Objects: []runtime.Object{ + rev("foo", "fix-containers", + WithLogURL, AllUnknownConditions), + pa("foo", "fix-containers", WithReachability(asv1a1.ReachabilityUnknown)), + changeContainers(deploy(t, "foo", "fix-containers")), + image("foo", "fix-containers"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: deploy(t, "foo", "fix-containers"), + }}, + Key: "foo/fix-containers", + }, { + Name: "failure updating deployment", + // Test that we handle an error updating the deployment properly. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "deployments"), + }, + Objects: []runtime.Object{ + rev("foo", "failure-update-deploy", + withK8sServiceName("whateves"), WithLogURL, AllUnknownConditions), + pa("foo", "failure-update-deploy"), + changeContainers(deploy(t, "foo", "failure-update-deploy")), + image("foo", "failure-update-deploy"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: deploy(t, "foo", "failure-update-deploy"), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + `failed to update deployment "failure-update-deploy-deployment": inducing failure for update deployments`), + }, + Key: "foo/failure-update-deploy", + }, { + Name: "deactivated revision is stable", + // Test a simple stable reconciliation of an inactive Revision. + // We feed in a Revision and the resources it controls in a steady + // state (port-Reserve), and verify that no changes are necessary. + Objects: []runtime.Object{ + rev("foo", "stable-deactivation", + WithLogURL, MarkRevisionReady, + MarkInactive("NoTraffic", "This thing is inactive.")), + pa("foo", "stable-deactivation", + WithNoTraffic("NoTraffic", "This thing is inactive.")), + deploy(t, "foo", "stable-deactivation"), + image("foo", "stable-deactivation"), + }, + Key: "foo/stable-deactivation", + }, { + Name: "pa is ready", + Objects: []runtime.Object{ + rev("foo", "pa-ready", + withK8sServiceName("old-stuff"), WithLogURL, AllUnknownConditions), + pa("foo", "pa-ready", WithTraffic, WithPAStatusService("new-stuff"), WithReachability(asv1a1.ReachabilityUnknown)), + deploy(t, "foo", "pa-ready"), + image("foo", "pa-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pa-ready", withK8sServiceName("new-stuff"), + WithLogURL, + // When the endpoint and pa are ready, then we will see the + // Revision become ready. + MarkRevisionReady), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), + }, + Key: "foo/pa-ready", + }, { + Name: "pa not ready", + // Test propagating the pa not ready status to the Revision. + Objects: []runtime.Object{ + rev("foo", "pa-not-ready", + withK8sServiceName("somebody-told-me"), WithLogURL, + MarkRevisionReady), + pa("foo", "pa-not-ready", + WithPAStatusService("its-not-confidential"), + WithBufferedTraffic("Something", "This is something longer")), + readyDeploy(deploy(t, "foo", "pa-not-ready")), + image("foo", "pa-not-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pa-not-ready", + WithLogURL, MarkRevisionReady, + withK8sServiceName("its-not-confidential"), + // When we reconcile a ready state and our pa is in an activating + // state, we should see the following mutation. + MarkActivating("Something", "This is something longer"), + ), + }}, + Key: "foo/pa-not-ready", + }, { + Name: "pa inactive", + // Test propagating the inactivity signal from the pa to the Revision. + Objects: []runtime.Object{ + rev("foo", "pa-inactive", + withK8sServiceName("something-in-the-way"), WithLogURL, MarkRevisionReady), + pa("foo", "pa-inactive", + WithNoTraffic("NoTraffic", "This thing is inactive.")), + readyDeploy(deploy(t, "foo", "pa-inactive")), + image("foo", "pa-inactive"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pa-inactive", + WithLogURL, MarkRevisionReady, + // When we reconcile an "all ready" revision when the PA + // is inactive, we should see the following change. + MarkInactive("NoTraffic", "This thing is inactive.")), + }}, + Key: "foo/pa-inactive", + }, { + Name: "pa inactive, but has service", + // Test propagating the inactivity signal from the pa to the Revision. + // But propagatethe service name. + Objects: []runtime.Object{ + rev("foo", "pa-inactive", + withK8sServiceName("here-comes-the-sun"), WithLogURL, MarkRevisionReady), + pa("foo", "pa-inactive", + WithNoTraffic("NoTraffic", "This thing is inactive."), + WithPAStatusService("pa-inactive-svc")), + readyDeploy(deploy(t, "foo", "pa-inactive")), + image("foo", "pa-inactive"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pa-inactive", + WithLogURL, MarkRevisionReady, + withK8sServiceName("pa-inactive-svc"), + // When we reconcile an "all ready" revision when the PA + // is inactive, we should see the following change. + MarkInactive("NoTraffic", "This thing is inactive.")), + }}, + Key: "foo/pa-inactive", + }, { + Name: "mutated pa gets fixed", + // This test validates, that when users mess with the pa directly + // we bring it back to the required shape. + // Protocol type is the only thing that can be changed on PA + Objects: []runtime.Object{ + rev("foo", "fix-mutated-pa", + withK8sServiceName("ill-follow-the-sun"), WithLogURL, MarkRevisionReady), + pa("foo", "fix-mutated-pa", WithProtocolType(networking.ProtocolH2C), + WithTraffic, WithPAStatusService("fix-mutated-pa")), + deploy(t, "foo", "fix-mutated-pa"), + image("foo", "fix-mutated-pa"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "fix-mutated-pa", + WithLogURL, AllUnknownConditions, + // When our reconciliation has to change the service + // we should see the following mutations to status. + withK8sServiceName("fix-mutated-pa"), WithLogURL, MarkRevisionReady), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "fix-mutated-pa", WithTraffic, + WithPAStatusService("fix-mutated-pa")), + }}, + Key: "foo/fix-mutated-pa", + }, { + Name: "mutated pa gets error during the fix", + // Same as above, but will fail during the update. + Objects: []runtime.Object{ + rev("foo", "fix-mutated-pa-fail", + withK8sServiceName("some-old-stuff"), + WithLogURL, AllUnknownConditions), + pa("foo", "fix-mutated-pa-fail", WithProtocolType(networking.ProtocolH2C), WithReachability(asv1a1.ReachabilityUnknown)), + deploy(t, "foo", "fix-mutated-pa-fail"), + image("foo", "fix-mutated-pa-fail"), + }, + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "podautoscalers"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "fix-mutated-pa-fail", WithReachability(asv1a1.ReachabilityUnknown)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `failed to update PA "fix-mutated-pa-fail": inducing failure for update podautoscalers`), + }, + Key: "foo/fix-mutated-pa-fail", + }, { + Name: "surface deployment timeout", + // Test the propagation of ProgressDeadlineExceeded from Deployment. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a ProgressDeadlineExceeded + // condition. It then verifies that Reconcile propagates this into the + // status of the Revision. + Objects: []runtime.Object{ + rev("foo", "deploy-timeout", + withK8sServiceName("the-taxman"), WithLogURL, MarkActive), + pa("foo", "deploy-timeout"), // pa can't be ready since deployment times out. + timeoutDeploy(deploy(t, "foo", "deploy-timeout"), "I timed out!"), + image("foo", "deploy-timeout"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "deploy-timeout", + WithLogURL, AllUnknownConditions, + // When the revision is reconciled after a Deployment has + // timed out, we should see it marked with the PDE state. + MarkProgressDeadlineExceeded("I timed out!")), + }}, + Key: "foo/deploy-timeout", + }, { + Name: "surface replica failure", + // Test the propagation of FailedCreate from Deployment. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a FailedCreate condition. + // It then verifies that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "deploy-replica-failure", + withK8sServiceName("the-taxman"), WithLogURL, MarkActive), + pa("foo", "deploy-replica-failure"), + replicaFailureDeploy(deploy(t, "foo", "deploy-replica-failure"), "I replica failed!"), + image("foo", "deploy-replica-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "deploy-replica-failure", + WithLogURL, AllUnknownConditions, + // When the revision is reconciled after a Deployment has + // timed out, we should see it marked with the FailedCreate state. + MarkResourcesUnavailable("FailedCreate", "I replica failed!")), + }}, + Key: "foo/deploy-replica-failure", + }, { + Name: "surface ImagePullBackoff", + // Test the propagation of ImagePullBackoff from user container. + Objects: []runtime.Object{ + rev("foo", "pull-backoff", + withK8sServiceName("the-taxman"), WithLogURL, MarkActivating("Deploying", "")), + pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnknown)), // pa can't be ready since deployment times out. + pod(t, "foo", "pull-backoff", WithWaitingContainer("user-container", "ImagePullBackoff", "can't pull it")), + timeoutDeploy(deploy(t, "foo", "pull-backoff"), "Timed out!"), + image("foo", "pull-backoff"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pull-backoff", + WithLogURL, AllUnknownConditions, + MarkResourcesUnavailable("ImagePullBackoff", "can't pull it")), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: pa("foo", "pull-backoff", WithReachability(asv1a1.ReachabilityUnreachable)), + }}, + Key: "foo/pull-backoff", + }, { + Name: "surface pod errors", + // Test the propagation of the termination state of a Pod into the revision. + // This initializes the world to the stable state after its first reconcile, + // but changes the user deployment to have a failing pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-error", + withK8sServiceName("a-pod-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-error", WithFailingContainer("user-container", 5, "I failed man!")), + deploy(t, "foo", "pod-error"), + image("foo", "pod-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-error", + WithLogURL, AllUnknownConditions, MarkContainerExiting(5, v1alpha1.RevisionContainerExitingMessage("I failed man!"))), + }}, + Key: "foo/pod-error", + }, { + Name: "surface pod schedule errors", + // Test the propagation of the scheduling errors of Pod into the revision. + // This initializes the world to unschedule pod. It then verifies + // that Reconcile propagates this into the status of the Revision. + Objects: []runtime.Object{ + rev("foo", "pod-schedule-error", + withK8sServiceName("a-pod-schedule-error"), WithLogURL, AllUnknownConditions, MarkActive), + pa("foo", "pod-schedule-error"), // PA can't be ready, since no traffic. + pod(t, "foo", "pod-schedule-error", WithUnschedulableContainer("Insufficient energy", "Unschedulable")), + deploy(t, "foo", "pod-schedule-error"), + image("foo", "pod-schedule-error"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "pod-schedule-error", + WithLogURL, AllUnknownConditions, MarkResourcesUnavailable("Insufficient energy", "Unschedulable")), + }}, + Key: "foo/pod-schedule-error", + }, { + Name: "ready steady state", + // Test the transition that Reconcile makes when Endpoints become ready on the + // SKS owned services, which is signalled by pa having servince name. + // This puts the world into the stable post-reconcile state for an Active + // Revision. It then creates an Endpoints resource with active subsets. + // This signal should make our Reconcile mark the Revision as Ready. + Objects: []runtime.Object{ + rev("foo", "steady-ready", withK8sServiceName("very-steady"), WithLogURL), + pa("foo", "steady-ready", WithTraffic, WithPAStatusService("steadier-even")), + deploy(t, "foo", "steady-ready"), + image("foo", "steady-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "steady-ready", withK8sServiceName("steadier-even"), WithLogURL, + // All resources are ready to go, we should see the revision being + // marked ready + MarkRevisionReady), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "RevisionReady", "Revision becomes ready upon all resources being ready"), + }, + Key: "foo/steady-ready", + }, { + Name: "lost pa owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady), + pa("foo", "missing-owners", WithTraffic, WithPodAutoscalerOwnersRemoved), + deploy(t, "foo", "missing-owners"), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("lesser-revision"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for PodAutoscaler we see this update. + MarkResourceNotOwned("PodAutoscaler", "missing-owners")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own PodAutoscaler: "missing-owners"`), + }, + Key: "foo/missing-owners", + }, { + Name: "lost deployment owner ref", + WantErr: true, + Objects: []runtime.Object{ + rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady), + pa("foo", "missing-owners", WithTraffic), + noOwner(deploy(t, "foo", "missing-owners")), + image("foo", "missing-owners"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "missing-owners", withK8sServiceName("youre-gonna-lose"), WithLogURL, + MarkRevisionReady, + // When we're missing the OwnerRef for Deployment we see this update. + MarkResourceNotOwned("Deployment", "missing-owners-deployment")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `revision: "missing-owners" does not own Deployment: "missing-owners-deployment"`), + }, + Key: "foo/missing-owners", + }, { + Name: "image pull secrets", + // This test case tests that the image pull secrets from revision propagate to deployment and image + Objects: []runtime.Object{ + rev("foo", "image-pull-secrets", WithImagePullSecrets("foo-secret")), + }, + WantCreates: []runtime.Object{ + pa("foo", "image-pull-secrets"), + deployImagePullSecrets(deploy(t, "foo", "image-pull-secrets"), "foo-secret"), + imagePullSecrets(image("foo", "image-pull-secrets"), "foo-secret"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: rev("foo", "image-pull-secrets", + WithImagePullSecrets("foo-secret"), + WithLogURL, AllUnknownConditions, MarkDeploying("Deploying")), + }}, + Key: "foo/image-pull-secrets", + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + revisionLister: listers.GetRevisionLister(), + podAutoscalerLister: listers.GetPodAutoscalerLister(), + imageLister: listers.GetImageLister(), + deploymentLister: listers.GetDeploymentLister(), + serviceLister: listers.GetK8sServiceLister(), + configMapLister: listers.GetConfigMapLister(), + resolver: &nopResolver{}, + configStore: &testConfigStore{config: ReconcilerTestConfig()}, + } + })) +} + +func readyDeploy(deploy *appsv1.Deployment) *appsv1.Deployment { + deploy.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }} + return deploy +} + +func timeoutDeploy(deploy *appsv1.Deployment, message string) *appsv1.Deployment { + deploy.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionFalse, + Reason: "ProgressDeadlineExceeded", + Message: message, + }} + return deploy +} + +func replicaFailureDeploy(deploy *appsv1.Deployment, message string) *appsv1.Deployment { + deploy.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentReplicaFailure, + Status: corev1.ConditionTrue, + Reason: "FailedCreate", + Message: message, + }} + return deploy +} + +func noOwner(deploy *appsv1.Deployment) *appsv1.Deployment { + deploy.OwnerReferences = nil + return deploy +} + +func deployImagePullSecrets(deploy *appsv1.Deployment, secretName string) *appsv1.Deployment { + deploy.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{ + Name: secretName, + }} + return deploy +} + +func imagePullSecrets(image *caching.Image, secretName string) *caching.Image { + image.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{ + Name: secretName, + }} + return image +} + +func changeContainers(deploy *appsv1.Deployment) *appsv1.Deployment { + podSpec := deploy.Spec.Template.Spec + for i := range podSpec.Containers { + podSpec.Containers[i].Image = "asdf" + } + return deploy +} + +func rev(namespace, name string, ro ...RevisionOption) *v1alpha1.Revision { + r := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: "test-uid", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + } + r.SetDefaults(context.Background()) + + for _, opt := range ro { + opt(r) + } + return r +} + +func withK8sServiceName(sn string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.ServiceName = sn + } +} + +// TODO(mattmoor): Come up with a better name for this. +func AllUnknownConditions(r *v1alpha1.Revision) { + WithInitRevConditions(r) + MarkDeploying("")(r) + MarkActivating("Deploying", "")(r) +} + +type configOption func(*config.Config) + +func deploy(t *testing.T, namespace, name string, opts ...interface{}) *appsv1.Deployment { + t.Helper() + cfg := ReconcilerTestConfig() + + for _, opt := range opts { + if configOpt, ok := opt.(configOption); ok { + configOpt(cfg) + } + } + + rev := rev(namespace, name) + + for _, opt := range opts { + if revOpt, ok := opt.(RevisionOption); ok { + revOpt(rev) + } + } + + // Do this here instead of in `rev` itself to ensure that we populate defaults + // before calling MakeDeployment within Reconcile. + rev.SetDefaults(context.Background()) + deployment, err := resources.MakeDeployment(rev, cfg.Logging, cfg.Tracing, cfg.Network, + cfg.Observability, cfg.Deployment, + ) + + if err != nil { + t.Fatal("failed to create deployment") + } + return deployment +} + +func image(namespace, name string, co ...configOption) *caching.Image { + config := ReconcilerTestConfig() + for _, opt := range co { + opt(config) + } + + return resources.MakeImageCache(rev(namespace, name)) +} + +func pa(namespace, name string, ko ...PodAutoscalerOption) *asv1a1.PodAutoscaler { + rev := rev(namespace, name) + k := resources.MakePA(rev) + + for _, opt := range ko { + opt(k) + } + return k +} + +func pod(t *testing.T, namespace, name string, po ...PodOption) *corev1.Pod { + t.Helper() + deploy := deploy(t, namespace, name) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: deploy.Labels, + }, + } + + for _, opt := range po { + opt(pod) + } + return pod +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ reconciler.ConfigStore = (*testConfigStore)(nil) + +func ReconcilerTestConfig() *config.Config { + return &config.Config{ + Deployment: getTestDeploymentConfig(), + Network: &network.Config{IstioOutboundIPRanges: "*"}, + Observability: &metrics.ObservabilityConfig{ + LoggingURLTemplate: "http://logger.io/${REVISION_UID}", + }, + Logging: &logging.Config{}, + Tracing: &tracingconfig.Config{}, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/OWNERS b/test/vendor/knative.dev/serving/pkg/reconciler/route/OWNERS new file mode 100644 index 0000000000..8787cec263 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- networking-approvers + +reviewers: +- networking-reviewers + +labels: +- area/networking diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/README.md b/test/vendor/knative.dev/serving/pkg/reconciler/route/README.md new file mode 100644 index 0000000000..55bb30cecb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/README.md @@ -0,0 +1,76 @@ +### Route CRDs + +All routing in Knative Serving is done through the Route CRD, which provides a +network endpoint for a user's service/app (which consists of a series of +software and configuration Revisions over time). The Route provides a +long-lived, stable, named, HTTP-addressable endpoint that is backed by one or +more +[Revisions](https://github.com/knative/serving/blob/master/docs/spec/overview.md#revision). +The default configuration is for the `Route` to automatically direct traffic to +the latest revision created by a Configuration. For more about Routes, read +[this doc](https://github.com/knative/serving/blob/master/docs/spec/overview.md#route). + +Currently we use Istio to program the network for Routes, but we don't exclude +other implementations if they can provide similar functionality. + +### Underlying implementation using Istio + +#### Shared Gateway for all Knative Routes + +Currently all Routes can receive external traffic through a shared Istio +Gateway. Many of our users may already be Istio users. In order to avoid +conflict with users' Gateway settings, we use a different Gateway than the +default `istio-ingressgateway`. In the future we should probably provide a way +for the users to select what the Gateway they use -- and how Knative would +expect such Gateway to look like. + +#### For each Route, a VirtualService and Service + +A valid Route object, when reconciled by Knative Route controller, will generate +the following objects: + +- A VirtualService to realize the routing from the Gateway + `knative-ingress-gateway` to the traffic target referenced in the Route. +- A Service with the same name as the Route, so that we can access the Route + using `..svc.`. This Service + has no Pod, we use it solely to have a domain name and a cluster IP to be used + in the VirtualService. The value of `` depends on a + domain name specified during the installation of the cluster. If no custom + domain name was specified, then `cluster.local` should be used as in the + following example: + + `..svc.cluster.local` + + otherwise cluster's custom domain name should be used: + + `..svc.real-domain-name.com` + +For example, if we have two Knative Revisions `hello-world-01` and +`hello-world-02`, and one Route `hello-world` that directs traffic to both +Revisions, the resources would look like: + +![Istio resources generated by a Route are shown in the dotted box](doc/images/active_revisions.svg) + +#### Routing in the presence of Inactive Revisions (aka 0→1) + +In the case of inactive Revisions, a Route would direct requests through the +Service `activator-service`, with enough information in the headers so that the +Service `activator-service` Service can activate a Revision before relaying the +traffic to it. + +From the same scenario of the previous example, if the Revision `hello-world-01` +becomes inactive due to lack of traffic, the resources would look like: + +![Revision `hello-world-01` is deactivated](doc/images/inactive_revision.svg) + +Note that while we still see a `hello-world-01` Service in this case, it does +not have any Pod until activated by the activator. + +After Revision `hello-world-02` also becomes inactive due to lack of traffic, +the resources would look like: + +![Both Revisions are deactivated](doc/images/inactive_revisions.svg) + +If any activation happens, Revisions becomes active again and traffic will be +adjusted to route directly to the Revision, without going through the Service +`activator-service`. diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/doc.go new file mode 100644 index 0000000000..18fe4f381f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// Package config holds the typed objects that define the schemas for +// assorted ConfigMap objects on which the Route controller depends. +package config diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go new file mode 100644 index 0000000000..3863a1311f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "strings" + + "github.com/ghodss/yaml" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/network" +) + +const ( + DomainConfigName = "config-domain" + // VisibilityLabelKey is the label to indicate visibility of Route + // and KServices. It can be an annotation too but since users are + // already using labels for domain, it probably best to keep this + // consistent. + VisibilityLabelKey = "serving.knative.dev/visibility" + // VisibilityClusterLocal is the label value for VisibilityLabelKey + // that will result to the Route/KService getting a cluster local + // domain suffix. + VisibilityClusterLocal = "cluster-local" + // DefaultDomain holds the domain that Route's live under by default + // when no label selector-based options apply. + DefaultDomain = "example.com" +) + +// LabelSelector represents map of {key,value} pairs. A single {key,value} in the +// map is equivalent to a requirement key == value. The requirements are ANDed. +type LabelSelector struct { + Selector map[string]string `json:"selector,omitempty"` +} + +func (s *LabelSelector) specificity() int { + return len(s.Selector) +} + +// Matches returns whether the given labels meet the requirement of the selector. +func (s *LabelSelector) Matches(labels map[string]string) bool { + for label, expectedValue := range s.Selector { + value, ok := labels[label] + if !ok || expectedValue != value { + return false + } + } + return true +} + +// Domain maps domains to routes by matching the domain's +// label selectors to the route's labels. +type Domain struct { + // Domains map from domain to label selector. If a route has + // labels matching a particular selector, it will use the + // corresponding domain. If multiple selectors match, we choose + // the most specific selector. + Domains map[string]*LabelSelector +} + +// NewDomainFromConfigMap creates a Domain from the supplied ConfigMap +func NewDomainFromConfigMap(configMap *corev1.ConfigMap) (*Domain, error) { + c := Domain{Domains: map[string]*LabelSelector{}} + hasDefault := false + for k, v := range configMap.Data { + if k == configmap.ExampleKey { + continue + } + labelSelector := LabelSelector{} + err := yaml.Unmarshal([]byte(v), &labelSelector) + if err != nil { + return nil, err + } + c.Domains[k] = &labelSelector + if len(labelSelector.Selector) == 0 { + hasDefault = true + } + } + if !hasDefault { + c.Domains[DefaultDomain] = &LabelSelector{} + } + return &c, nil +} + +// LookupDomainForLabels returns a domain given a set of labels. +// Since we reject configuration without a default domain, this should +// always return a value. +func (c *Domain) LookupDomainForLabels(labels map[string]string) string { + domain := "" + specificity := -1 + // If we see VisibilityLabelKey sets with VisibilityClusterLocal, that + // will take precedence and the route will get a Cluster's Domain Name. + if labels[VisibilityLabelKey] == VisibilityClusterLocal { + return "svc." + network.GetClusterDomainName() + } + for k, selector := range c.Domains { + // Ignore if selector doesn't match, or decrease the specificity. + if !selector.Matches(labels) || selector.specificity() < specificity { + continue + } + if selector.specificity() > specificity || strings.Compare(k, domain) < 0 { + domain = k + specificity = selector.specificity() + } + } + + return domain +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain_test.go new file mode 100644 index 0000000000..b0d0d59c02 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/domain_test.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + . "knative.dev/pkg/configmap/testing" + "knative.dev/pkg/network" + "knative.dev/pkg/system" + + _ "knative.dev/pkg/system/testing" +) + +func TestSelectorMatches(t *testing.T) { + selector := LabelSelector{ + Selector: map[string]string{ + "app": "bar", + "version": "beta", + }, + } + nonMatchingLabels := []map[string]string{ + {"app": "bar"}, + {"version": "beta"}, + {"app": "foo"}, + {}, + } + matchingLabels := []map[string]string{ + {"app": "bar", "version": "beta"}, + {"app": "bar", "version": "beta", "last_updated": "yesterday"}, + {"app": "bar", "version": "beta", "deployer": "Felicity Smoak"}, + } + for _, labels := range nonMatchingLabels { + if selector.Matches(labels) { + t.Errorf("Expect selector %v not to match labels %v", selector, labels) + } + } + for _, labels := range matchingLabels { + if !selector.Matches(labels) { + t.Errorf("Expect selector %v to match labels %v", selector, labels) + } + } +} + +func TestNewConfigNoEntry(t *testing.T) { + d, err := NewDomainFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DomainConfigName, + }, + }) + if err != nil { + t.Errorf("Unexpected error when config file has no entry: %v", err) + } + got := d.LookupDomainForLabels(nil) + if got != DefaultDomain { + t.Errorf("LookupDomainForLabels() = %s, wanted %s", got, DefaultDomain) + } +} + +func TestNewConfigBadYaml(t *testing.T) { + c, err := NewDomainFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DomainConfigName, + }, + Data: map[string]string{ + "default.com": "bad: yaml: all: day", + }, + }) + if err == nil { + t.Errorf("NewDomainFromConfigMap() = %v, wanted error", c) + } +} + +func TestNewConfig(t *testing.T) { + expectedConfig := Domain{ + Domains: map[string]*LabelSelector{ + "test-domain.foo.com": { + Selector: map[string]string{ + "app": "foo", + }, + }, + "bar.com": { + Selector: map[string]string{ + "app": "bar", + "version": "beta", + }, + }, + "default.com": {}, + }, + } + c, err := NewDomainFromConfigMap(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: DomainConfigName, + }, + Data: map[string]string{ + "test-domain.foo.com": "selector:\n app: foo", + "bar.com": "selector:\n app: bar\n version: beta", + "default.com": "", + }, + }) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if diff := cmp.Diff(&expectedConfig, c); diff != "" { + t.Errorf("Unexpected config diff (-want +got): %s", diff) + } +} + +func TestLookupDomainForLabels(t *testing.T) { + config := Domain{ + Domains: map[string]*LabelSelector{ + "test-domain.foo.com": { + Selector: map[string]string{ + "app": "foo", + }, + }, + "foo.com": { + Selector: map[string]string{ + "app": "foo", + "version": "prod", + }, + }, + "bar.com": { + Selector: map[string]string{ + "app": "bar", + }, + }, + "default.com": {}, + }, + } + + expectations := []struct { + labels map[string]string + domain string + }{{ + labels: map[string]string{"app": "foo"}, + domain: "test-domain.foo.com", + }, { + // This should match two selector, but the one with version=prod is more specific. + labels: map[string]string{"app": "foo", "version": "prod"}, + domain: "foo.com", + }, { + labels: map[string]string{"app": "bar"}, + domain: "bar.com", + }, { + labels: map[string]string{"app": "bar", "version": "whatever"}, + domain: "bar.com", + }, { + labels: map[string]string{"app": "whatever"}, + domain: "default.com", + }, { + labels: map[string]string{}, + domain: "default.com", + }, { + labels: map[string]string{"serving.knative.dev/visibility": "cluster-local"}, + domain: "svc." + network.GetClusterDomainName(), + }} + + for _, expected := range expectations { + domain := config.LookupDomainForLabels(expected.labels) + if expected.domain != domain { + t.Errorf("Expected domain %q got %q", expected.domain, domain) + } + } +} + +func TestOurDomain(t *testing.T) { + cm, example := ConfigMapsFromTestFile(t, DomainConfigName) + if _, err := NewDomainFromConfigMap(cm); err != nil { + t.Errorf("NewDomainFromConfigMap(actual) = %v", err) + } + if _, err := NewDomainFromConfigMap(example); err != nil { + t.Errorf("NewDomainFromConfigMap(example) = %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go new file mode 100644 index 0000000000..4867d7e019 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/store.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/logging" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" +) + +type cfgKey struct{} + +// +k8s:deepcopy-gen=false +type Config struct { + Domain *Domain + GC *gc.Config + Network *network.Config +} + +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// Store is based on configmap.UntypedStore and is used to store and watch for +// updates to configuration related to routes (currently only config-domain). +// +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a configmap.UntypedStore based config store. +// +// logger must be non-nil implementation of configmap.Logger (commonly used +// loggers conform) +// +// onAfterStore is a variadic list of callbacks to run +// after the ConfigMap has been processed and stored. +// +// See also: configmap.NewUntypedStore(). +func NewStore(ctx context.Context, onAfterStore ...func(name string, value interface{})) *Store { + logger := logging.FromContext(ctx) + + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "route", + logger, + configmap.Constructors{ + DomainConfigName: NewDomainFromConfigMap, + gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx), + network.ConfigName: network.NewConfigFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +func (s *Store) Load() *Config { + return &Config{ + Domain: s.UntypedLoad(DomainConfigName).(*Domain).DeepCopy(), + GC: s.UntypedLoad(gc.ConfigName).(*gc.Config).DeepCopy(), + Network: s.UntypedLoad(network.ConfigName).(*network.Config).DeepCopy(), + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/store_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/store_test.go new file mode 100644 index 0000000000..537c9fe6e0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/store_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + logtesting "knative.dev/pkg/logging/testing" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + + . "knative.dev/pkg/configmap/testing" +) + +func TestStoreLoadWithContext(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + store := NewStore(ctx) + + domainConfig := ConfigMapFromTestFile(t, DomainConfigName) + gcConfig := ConfigMapFromTestFile(t, gc.ConfigName) + networkConfig := ConfigMapFromTestFile(t, network.ConfigName) + + store.OnConfigChanged(domainConfig) + store.OnConfigChanged(gcConfig) + store.OnConfigChanged(networkConfig) + + config := FromContext(store.ToContext(context.Background())) + + t.Run("domain", func(t *testing.T) { + expected, _ := NewDomainFromConfigMap(domainConfig) + if diff := cmp.Diff(expected, config.Domain); diff != "" { + t.Errorf("Unexpected controller config (-want, +got): %v", diff) + } + }) + + t.Run("gc", func(t *testing.T) { + expected, err := gc.NewConfigFromConfigMapFunc(ctx)(gcConfig) + if err != nil { + t.Errorf("Parsing configmap: %v", err) + } + if diff := cmp.Diff(expected, config.GC); diff != "" { + t.Errorf("Unexpected controller config (-want, +got): %v", diff) + } + }) + + t.Run("gc invalid timeout", func(t *testing.T) { + gcConfig.Data["stale-revision-timeout"] = "1h" + expected, err := gc.NewConfigFromConfigMapFunc(ctx)(gcConfig) + + if err != nil { + t.Errorf("Got error parsing gc config with invalid timeout: %v", err) + } + + if expected.StaleRevisionTimeout != 15*time.Hour { + t.Errorf("Expected revision timeout of %v, got %v", 15*time.Hour, expected.StaleRevisionTimeout) + } + }) +} + +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(logtesting.TestContextWithLogger(t)) + store.OnConfigChanged(ConfigMapFromTestFile(t, DomainConfigName)) + store.OnConfigChanged(ConfigMapFromTestFile(t, network.ConfigName)) + store.OnConfigChanged(ConfigMapFromTestFile(t, gc.ConfigName)) + + config := store.Load() + + config.Domain.Domains = map[string]*LabelSelector{ + "mutated": nil, + } + + newConfig := store.Load() + + if _, ok := newConfig.Domain.Domains["mutated"]; ok { + t.Error("Domain config is not immutable") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-domain.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-domain.yaml new file mode 120000 index 0000000000..39a7f1839a --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-domain.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/domain.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-gc.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-gc.yaml new file mode 120000 index 0000000000..71c3f7d74d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-gc.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/gc.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-network.yaml b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-network.yaml new file mode 120000 index 0000000000..56cb332a04 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/testdata/config-network.yaml @@ -0,0 +1 @@ +../../../../../config/core/configmaps/network.yaml \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/config/zz_generated.deepcopy.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..86b30c4995 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/config/zz_generated.deepcopy.go @@ -0,0 +1,75 @@ +// +build !ignore_autogenerated + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Domain) DeepCopyInto(out *Domain) { + *out = *in + if in.Domains != nil { + in, out := &in.Domains, &out.Domains + *out = make(map[string]*LabelSelector, len(*in)) + for key, val := range *in { + var outVal *LabelSelector + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(LabelSelector) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Domain. +func (in *Domain) DeepCopy() *Domain { + if in == nil { + return nil + } + out := new(Domain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelector) DeepCopyInto(out *LabelSelector) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector. +func (in *LabelSelector) DeepCopy() *LabelSelector { + if in == nil { + return nil + } + out := new(LabelSelector) + in.DeepCopyInto(out) + return out +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/controller.go new file mode 100644 index 0000000000..7dcea81c90 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/controller.go @@ -0,0 +1,131 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + certificateinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate" + ingressinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress" + configurationinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + routeinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/system" + "knative.dev/pkg/tracker" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/route/config" +) + +const ( + controllerAgentName = "route-controller" +) + +// NewController initializes the controller and is called by the generated code +// Registers eventhandlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + return NewControllerWithClock(ctx, cmw, system.RealClock{}) +} + +func NewControllerWithClock( + ctx context.Context, + cmw configmap.Watcher, + clock system.Clock, +) *controller.Impl { + + serviceInformer := serviceinformer.Get(ctx) + routeInformer := routeinformer.Get(ctx) + configInformer := configurationinformer.Get(ctx) + revisionInformer := revisioninformer.Get(ctx) + ingressInformer := ingressinformer.Get(ctx) + certificateInformer := certificateinformer.Get(ctx) + + // No need to lock domainConfigMutex yet since the informers that can modify + // domainConfig haven't started yet. + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + routeLister: routeInformer.Lister(), + configurationLister: configInformer.Lister(), + revisionLister: revisionInformer.Lister(), + serviceLister: serviceInformer.Lister(), + ingressLister: ingressInformer.Lister(), + certificateLister: certificateInformer.Lister(), + clock: clock, + } + impl := controller.NewImpl(c, c.Logger, "Routes") + + c.Logger.Info("Setting up event handlers") + routeInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Route")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + ingressInformer.Informer().AddEventHandler(controller.HandleAll(impl.EnqueueControllerOf)) + + c.tracker = tracker.New(impl.EnqueueKey, controller.GetTrackerLease(ctx)) + + configInformer.Informer().AddEventHandler(controller.HandleAll( + // Call the tracker's OnChanged method, but we've seen the objects + // coming through this path missing TypeMeta, so ensure it is properly + // populated. + controller.EnsureTypeMeta( + c.tracker.OnChanged, + v1alpha1.SchemeGroupVersion.WithKind("Configuration"), + ), + )) + + revisionInformer.Informer().AddEventHandler(controller.HandleAll( + // Call the tracker's OnChanged method, but we've seen the objects + // coming through this path missing TypeMeta, so ensure it is properly + // populated. + controller.EnsureTypeMeta( + c.tracker.OnChanged, + v1alpha1.SchemeGroupVersion.WithKind("Revision"), + ), + )) + + certificateInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Route")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + c.Logger.Info("Setting up ConfigMap receivers") + configsToResync := []interface{}{ + &network.Config{}, + &config.Domain{}, + } + resync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) { + impl.GlobalResync(routeInformer.Informer()) + }) + configStore := config.NewStore(logging.WithLogger(ctx, c.Logger.Named("config-store")), resync) + configStore.WatchConfigs(cmw) + c.configStore = configStore + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc.go new file mode 100644 index 0000000000..d8e8d44f62 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +Package route implements a kubernetes controller which tracks Route resource +and reconcile Ingress as its child resource. + +*/ +package route diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/active_revisions.svg b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/active_revisions.svg new file mode 100644 index 0000000000..dcd35b5934 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/active_revisions.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revision.svg b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revision.svg new file mode 100644 index 0000000000..8f14e29167 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revision.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revisions.svg b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revisions.svg new file mode 100644 index 0000000000..2a09577242 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/doc/images/inactive_revisions.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/doc.go new file mode 100644 index 0000000000..c53b4648b6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package domains holds simple functions for generating domains. +package domains diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go new file mode 100644 index 0000000000..14dd9e3f47 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package domains + +import ( + "bytes" + "context" + "fmt" + "strings" + "text/template" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + pkgnet "knative.dev/pkg/network" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/resources/labels" + + "knative.dev/pkg/apis" +) + +// HTTPScheme is the string representation of http. +const HTTPScheme string = "http" + +// GetAllDomainsAndTags returns all of the domains and tags(including subdomains) associated with a Route +func GetAllDomainsAndTags(ctx context.Context, r *v1alpha1.Route, names []string, localServiceNames sets.String) (map[string]string, error) { + domainTagMap := make(map[string]string) + + for _, name := range names { + meta := r.ObjectMeta.DeepCopy() + + hostname, err := HostnameFromTemplate(ctx, meta.Name, name) + if err != nil { + return nil, err + } + + labels.SetVisibility(meta, localServiceNames.Has(hostname)) + + subDomain, err := DomainNameFromTemplate(ctx, *meta, hostname) + if err != nil { + return nil, err + } + domainTagMap[subDomain] = name + } + return domainTagMap, nil +} + +// DomainNameFromTemplate generates domain name base on the template specified in the `config-network` ConfigMap. +// name is the "subdomain" which will be referred as the "name" in the template +func DomainNameFromTemplate(ctx context.Context, r v1.ObjectMeta, name string) (string, error) { + domainConfig := config.FromContext(ctx).Domain + rLabels := r.Labels + domain := domainConfig.LookupDomainForLabels(rLabels) + annotations := r.Annotations + // These are the available properties they can choose from. + // We could add more over time - e.g. RevisionName if we thought that + // might be of interest to people. + data := network.DomainTemplateValues{ + Name: name, + Namespace: r.Namespace, + Domain: domain, + Annotations: annotations, + } + + networkConfig := config.FromContext(ctx).Network + buf := bytes.Buffer{} + + var templ *template.Template + // If the route is "cluster local" then don't use the user-defined + // domain template, use the default one + if rLabels[config.VisibilityLabelKey] == config.VisibilityClusterLocal { + templ = template.Must(template.New("domain-template").Parse( + network.DefaultDomainTemplate)) + } else { + templ = networkConfig.GetDomainTemplate() + } + + if err := templ.Execute(&buf, data); err != nil { + return "", fmt.Errorf("error executing the DomainTemplate: %w", err) + } + return buf.String(), nil +} + +// HostnameFromTemplate generates domain name base on the template specified in the `config-network` ConfigMap. +// name is the "subdomain" which will be referred as the "name" in the template +func HostnameFromTemplate(ctx context.Context, name string, tag string) (string, error) { + if tag == "" { + return name, nil + } + // These are the available properties they can choose from. + // We could add more over time - e.g. RevisionName if we thought that + // might be of interest to people. + data := network.TagTemplateValues{ + Name: name, + Tag: tag, + } + + networkConfig := config.FromContext(ctx).Network + buf := bytes.Buffer{} + if err := networkConfig.GetTagTemplate().Execute(&buf, data); err != nil { + return "", fmt.Errorf("error executing the TagTemplate: %w", err) + } + return buf.String(), nil +} + +// URL generates the a string representation of a URL. +func URL(scheme, fqdn string) *apis.URL { + return &apis.URL{ + Scheme: scheme, + Host: fqdn, + } +} + +// IsClusterLocal checks if a domain is only visible with cluster. +func IsClusterLocal(domain string) bool { + return strings.HasSuffix(domain, pkgnet.GetClusterDomainName()) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains_test.go new file mode 100644 index 0000000000..9591b59e8e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains_test.go @@ -0,0 +1,282 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package domains + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/google/go-cmp/cmp" + "knative.dev/pkg/apis" + + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/route/config" +) + +func testConfig() *config.Config { + return &config.Config{ + Domain: &config.Domain{ + Domains: map[string]*config.LabelSelector{ + "example.com": {}, + "another-example.com": { + Selector: map[string]string{"app": "prod"}, + }, + }, + }, + Network: &network.Config{ + DefaultIngressClass: "ingress-class-foo", + DomainTemplate: network.DefaultDomainTemplate, + }, + GC: &gc.Config{ + StaleRevisionLastpinnedDebounce: time.Duration(1 * time.Minute), + }, + } +} + +func TestDomainNameFromTemplate(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + template string + args args + want string + wantErr bool + local bool + }{{ + name: "Default", + template: "{{.Name}}.{{.Namespace}}.{{.Domain}}", + args: args{name: "test-name"}, + want: "test-name.default.example.com", + local: false, + }, { + name: "Dash", + template: "{{.Name}}-{{.Namespace}}.{{.Domain}}", + args: args{name: "test-name"}, + want: "test-name-default.example.com", + local: false, + }, { + name: "LocalDash", + template: "{{.Name}}-{{.Namespace}}.{{.Domain}}", + args: args{name: "test-name"}, + want: "test-name.default.svc.cluster.local", + local: true, + }, { + name: "Short", + template: "{{.Name}}.{{.Domain}}", + args: args{name: "test-name"}, + want: "test-name.example.com", + local: false, + }, { + name: "SuperShort", + template: "{{.Name}}", + args: args{name: "test-name"}, + want: "test-name", + local: false, + }, { + name: "Annotations", + template: `{{.Name}}.{{ index .Annotations "sub"}}.{{.Domain}}`, + args: args{name: "test-name"}, + want: "test-name.mysub.example.com", + local: false, + }, { + // This cannot get through our validation, but verify we handle errors. + name: "BadVarName", + template: "{{.Name}}.{{.NNNamespace}}.{{.Domain}}", + args: args{name: "test-name"}, + wantErr: true, + local: false, + }} + + meta := metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/Routes/myapp", + Name: "myroute", + Namespace: "default", + Labels: map[string]string{ + "route": "myapp", + }, + Annotations: map[string]string{ + "sub": "mysub", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + cfg := testConfig() + cfg.Network.DomainTemplate = tt.template + ctx = config.ToContext(ctx, cfg) + + if tt.local { + meta.Labels[config.VisibilityLabelKey] = config.VisibilityClusterLocal + } else { + delete(meta.Labels, config.VisibilityLabelKey) + } + + got, err := DomainNameFromTemplate(ctx, meta, tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("DomainNameFromTemplate() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("DomainNameFromTemplate() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestURL(t *testing.T) { + tests := []struct { + name string + scheme string + domain string + Expected apis.URL + }{{ + name: "subdomain", + scheme: HTTPScheme, + domain: "current.svc.local.com", + Expected: apis.URL{ + Scheme: "http", + Host: "current.svc.local.com", + }, + }, { + name: "default target", + scheme: HTTPScheme, + domain: "example.com", + Expected: apis.URL{ + Scheme: "http", + Host: "example.com", + }, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, want := *URL(tt.scheme, tt.domain), tt.Expected; !cmp.Equal(want, got) { + t.Errorf("URL = %v, want: %v", got, want) + } + }) + } +} + +func TestGetAllDomainsAndTags(t *testing.T) { + tests := []struct { + name string + domainTemplate string + tagTemplate string + want map[string]string + wantErr bool + }{{ + name: "happy case", + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}", + tagTemplate: "{{.Name}}-{{.Tag}}", + want: map[string]string{ + "myroute-target-1.default.example.com": "target-1", + "myroute-target-2.default.example.com": "target-2", + "myroute.default.example.com": "", + }, + }, { + name: "another happy case", + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}", + tagTemplate: "{{.Tag}}-{{.Name}}", + want: map[string]string{ + "target-1-myroute.default.example.com": "target-1", + "target-2-myroute.default.example.com": "target-2", + "myroute.default.example.com": "", + }, + }, { + name: "or appengine style", + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}", + tagTemplate: "{{.Tag}}-dot-{{.Name}}", + want: map[string]string{ + "target-1-dot-myroute.default.example.com": "target-1", + "target-2-dot-myroute.default.example.com": "target-2", + "myroute.default.example.com": "", + }, + }, { + name: "bad template", + domainTemplate: "{{.NNName}}.{{.Namespace}}.{{.Domain}}", + tagTemplate: "{{.Name}}-{{.Tag}}", + wantErr: true, + }, { + name: "bad template", + domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}", + tagTemplate: "{{.NNName}}-{{.Tag}}", + wantErr: true, + }} + + route := &v1alpha1.Route{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/Routes/myapp", + Name: "myroute", + Namespace: "default", + Labels: map[string]string{ + "route": "myapp", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + cfg := testConfig() + cfg.Network.DomainTemplate = tt.domainTemplate + cfg.Network.TagTemplate = tt.tagTemplate + ctx = config.ToContext(ctx, cfg) + + // here, a tag-less major domain will have empty string as the input + got, err := GetAllDomainsAndTags(ctx, route, []string{"", "target-1", "target-2"}, sets.String{}) + if (err != nil) != tt.wantErr { + t.Errorf("GetAllDomains() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("GetAllDomains() diff (-want +got): %v", diff) + } + }) + } +} +func TestIsClusterLocal(t *testing.T) { + tests := []struct { + name string + domain string + want bool + }{ + { + name: "domain is public", + domain: "k8s.io", + want: false, + }, + { + name: "domain is cluster local", + domain: "my-app.cluster.local", + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsClusterLocal(tt.domain); got != tt.want { + t.Errorf("IsClusterLocal() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/queueing_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/queueing_test.go new file mode 100644 index 0000000000..d870bcc989 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/queueing_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "testing" + "time" + + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/route/config" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +func TestNewRouteCallsSyncHandler(t *testing.T) { + ctx, cancel, informers := SetupFakeContextWithCancel(t) + + // A standalone revision + rev := getTestRevision("test-rev") + // A route targeting the revision + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "test-rev", + Percent: ptr.Int64(100), + }, + })) + + // Create fake clients + configMapWatcher := configmap.NewStaticWatcher(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + defaultDomainSuffix: "", + prodDomainSuffix: "selector:\n app: prod", + }, + }, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: gc.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }) + + ctrl := NewController(ctx, configMapWatcher) + + servingClient := fakeservingclient.Get(ctx) + + h := NewHooks() + + // Check for Ingress created as a signal that syncHandler ran + h.OnCreate(&servingClient.Fake, "ingresses", func(obj runtime.Object) HookResult { + ci := obj.(*netv1alpha1.Ingress) + t.Logf("ingress created: %q", ci.Name) + + return HookComplete + }) + + eg := errgroup.Group{} + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cancel() + if err := eg.Wait(); err != nil { + t.Fatalf("Error running controller: %v", err) + } + waitInformers() + }() + + // Run the controller. + eg.Go(func() error { + return ctrl.Run(2, ctx.Done()) + }) + + if _, err := servingClient.ServingV1alpha1().Revisions(rev.Namespace).Create(rev); err != nil { + t.Errorf("Unexpected error creating revision: %v", err) + } + + for i, informer := range informers { + if ok := cache.WaitForCacheSync(ctx.Done(), informer.HasSynced); !ok { + t.Fatalf("failed to wait for cache at index %d to sync", i) + } + } + + if _, err := servingClient.ServingV1alpha1().Routes(route.Namespace).Create(route); err != nil { + t.Errorf("Unexpected error creating route: %v", err) + } + + if err := h.WaitForHooks(time.Second * 3); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go new file mode 100644 index 0000000000..36e288f6be --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go @@ -0,0 +1,300 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + "fmt" + "reflect" + + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/logging" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/resources" + "knative.dev/serving/pkg/reconciler/route/traffic" +) + +func routeOwnerLabelSelector(route *v1alpha1.Route) labels.Selector { + return labels.Set(map[string]string{ + serving.RouteLabelKey: route.Name, + serving.RouteNamespaceLabelKey: route.Namespace, + }).AsSelector() +} + +func (c *Reconciler) deleteIngressForRoute(route *v1alpha1.Route) error { + + // We always use DeleteCollection because even with a fixed name, we apply the labels. + selector := routeOwnerLabelSelector(route).String() + + // Delete Ingresses owned by this route. + return c.ServingClientSet.NetworkingV1alpha1().Ingresses(route.Namespace).DeleteCollection( + nil, metav1.ListOptions{LabelSelector: selector}) +} + +func (c *Reconciler) reconcileIngress(ctx context.Context, r *v1alpha1.Route, desired *netv1alpha1.Ingress) (*netv1alpha1.Ingress, error) { + ingress, err := c.ingressLister.Ingresses(desired.Namespace).Get(desired.Name) + if apierrs.IsNotFound(err) { + ingress, err = c.ServingClientSet.NetworkingV1alpha1().Ingresses(desired.Namespace).Create(desired) + if err != nil { + c.Recorder.Eventf(r, corev1.EventTypeWarning, "CreationFailed", "Failed to create Ingress: %v", err) + return nil, fmt.Errorf("failed to create Ingress: %w", err) + } + + c.Recorder.Eventf(r, corev1.EventTypeNormal, "Created", "Created Ingress %q", ingress.GetName()) + return ingress, nil + } else if err != nil { + return nil, err + } else { + // It is notable that one reason for differences here may be defaulting. + // When that is the case, the Update will end up being a nop because the + // webhook will bring them into alignment and no new reconciliation will occur. + // Also, compare annotation in case ingress.Class is updated. + if !equality.Semantic.DeepEqual(ingress.Spec, desired.Spec) || + !equality.Semantic.DeepEqual(ingress.Annotations, desired.Annotations) { + // Don't modify the informers copy + origin := ingress.DeepCopy() + origin.Spec = desired.Spec + origin.Annotations = desired.Annotations + updated, err := c.ServingClientSet.NetworkingV1alpha1().Ingresses(origin.Namespace).Update(origin) + if err != nil { + return nil, fmt.Errorf("failed to update Ingress: %w", err) + } + return updated, nil + } + } + + return ingress, err +} + +func (c *Reconciler) deleteServices(namespace string, serviceNames sets.String) error { + for _, serviceName := range serviceNames.List() { + if err := c.KubeClientSet.CoreV1().Services(namespace).Delete(serviceName, nil); err != nil { + return fmt.Errorf("failed to delete Service: %w", err) + } + } + + return nil +} + +func (c *Reconciler) reconcilePlaceholderServices(ctx context.Context, route *v1alpha1.Route, targets map[string]traffic.RevisionTargets, existingServiceNames sets.String) ([]*corev1.Service, error) { + logger := logging.FromContext(ctx) + ns := route.Namespace + + names := sets.NewString() + for name := range targets { + names.Insert(name) + } + + createdServiceNames := sets.String{} + + var services []*corev1.Service + for _, name := range names.List() { + desiredService, err := resources.MakeK8sPlaceholderService(ctx, route, name) + if err != nil { + logger.Warnw("Failed to construct placeholder k8s service", zap.Error(err)) + return nil, err + } + + service, err := c.serviceLister.Services(ns).Get(desiredService.Name) + if apierrs.IsNotFound(err) { + // Doesn't exist, create it. + service, err = c.KubeClientSet.CoreV1().Services(ns).Create(desiredService) + if err != nil { + c.Recorder.Eventf(route, corev1.EventTypeWarning, "CreationFailed", + "Failed to create placeholder service %q: %v", desiredService.Name, err) + return nil, fmt.Errorf("failed to create placeholder service: %w", err) + } + logger.Infof("Created service %s", desiredService.Name) + c.Recorder.Eventf(route, corev1.EventTypeNormal, "Created", "Created placeholder service %q", desiredService.Name) + } else if err != nil { + return nil, err + } else if !metav1.IsControlledBy(service, route) { + // Surface an error in the route's status, and return an error. + route.Status.MarkServiceNotOwned(desiredService.Name) + return nil, fmt.Errorf("route: %q does not own Service: %q", route.Name, desiredService.Name) + } + + services = append(services, service) + createdServiceNames.Insert(desiredService.Name) + } + + // Delete any current services that was no longer desired. + if err := c.deleteServices(ns, existingServiceNames.Difference(createdServiceNames)); err != nil { + return nil, err + } + + // TODO(mattmoor): This is where we'd look at the state of the Service and + // reflect any necessary state into the Route. + return services, nil +} + +func (c *Reconciler) updatePlaceholderServices(ctx context.Context, route *v1alpha1.Route, services []*corev1.Service, ingress *netv1alpha1.Ingress) error { + logger := logging.FromContext(ctx) + ns := route.Namespace + + eg, _ := errgroup.WithContext(ctx) + for _, service := range services { + service := service + eg.Go(func() error { + desiredService, err := resources.MakeK8sService(ctx, route, service.Name, ingress, resources.IsClusterLocalService(service)) + if err != nil { + // Loadbalancer not ready, no need to update. + logger.Warnf("Failed to update k8s service: %v", err) + return nil + } + + // Make sure that the service has the proper specification. + if !equality.Semantic.DeepEqual(service.Spec, desiredService.Spec) { + // Don't modify the informers copy + existing := service.DeepCopy() + existing.Spec = desiredService.Spec + _, err = c.KubeClientSet.CoreV1().Services(ns).Update(existing) + if err != nil { + return err + } + } + return nil + }) + } + + // TODO(mattmoor): This is where we'd look at the state of the Service and + // reflect any necessary state into the Route. + return eg.Wait() +} + +func (c *Reconciler) updateStatus(existing *v1alpha1.Route, desired *v1alpha1.Route) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = c.ServingClientSet.ServingV1alpha1().Routes(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = c.ServingClientSet.ServingV1alpha1().Routes(desired.Namespace).UpdateStatus(existing) + return err + }) +} + +// Update the lastPinned annotation on revisions we target so they don't get GC'd. +func (c *Reconciler) reconcileTargetRevisions(ctx context.Context, t *traffic.Config, route *v1alpha1.Route) error { + gcConfig := config.FromContext(ctx).GC + logger := logging.FromContext(ctx) + lpDebounce := gcConfig.StaleRevisionLastpinnedDebounce + + eg, _ := errgroup.WithContext(ctx) + for _, target := range t.Targets { + for _, rt := range target { + tt := rt.TrafficTarget + eg.Go(func() error { + rev, err := c.revisionLister.Revisions(route.Namespace).Get(tt.RevisionName) + if apierrs.IsNotFound(err) { + logger.Infof("Unable to update lastPinned for missing revision %q", tt.RevisionName) + return nil + } else if err != nil { + return err + } + + newRev := rev.DeepCopy() + + lastPin, err := newRev.GetLastPinned() + if err != nil { + // Missing is an expected error case for a not yet pinned revision. + if err.(v1alpha1.LastPinnedParseError).Type != v1alpha1.AnnotationParseErrorTypeMissing { + return err + } + } else { + // Enforce a delay before performing an update on lastPinned to avoid excess churn. + if lastPin.Add(lpDebounce).After(c.clock.Now()) { + return nil + } + } + + newRev.SetLastPinned(c.clock.Now()) + + patch, err := duck.CreateMergePatch(rev, newRev) + if err != nil { + return err + } + + if _, err := c.ServingClientSet.ServingV1alpha1().Revisions(route.Namespace).Patch(rev.Name, types.MergePatchType, patch); err != nil { + return fmt.Errorf("failed to set revision annotation: %w", err) + } + return nil + }) + } + } + return eg.Wait() +} + +func (c *Reconciler) reconcileCertificate(ctx context.Context, r *v1alpha1.Route, desiredCert *netv1alpha1.Certificate) (*netv1alpha1.Certificate, error) { + cert, err := c.certificateLister.Certificates(desiredCert.Namespace).Get(desiredCert.Name) + if apierrs.IsNotFound(err) { + cert, err = c.ServingClientSet.NetworkingV1alpha1().Certificates(desiredCert.Namespace).Create(desiredCert) + if err != nil { + c.Recorder.Eventf(r, corev1.EventTypeWarning, "CreationFailed", "Failed to create Certificate: %v", err) + return nil, fmt.Errorf("failed to create Certificate: %w", err) + } + c.Recorder.Eventf(r, corev1.EventTypeNormal, "Created", + "Created Certificate %s/%s", cert.Namespace, cert.Name) + return cert, nil + } else if err != nil { + return nil, err + } else if !metav1.IsControlledBy(cert, r) { + // Surface an error in the route's status, and return an error. + r.Status.MarkCertificateNotOwned(cert.Name) + return nil, fmt.Errorf("route: %s does not own certificate: %s", r.Name, cert.Name) + } else { + if !equality.Semantic.DeepEqual(cert.Spec, desiredCert.Spec) { + // Don't modify the informers copy + existing := cert.DeepCopy() + existing.Spec = desiredCert.Spec + cert, err := c.ServingClientSet.NetworkingV1alpha1().Certificates(existing.Namespace).Update(existing) + if err != nil { + c.Recorder.Eventf(r, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update Certificate %s/%s: %v", existing.Namespace, existing.Name, err) + return nil, err + } + c.Recorder.Eventf(existing, corev1.EventTypeNormal, "Updated", + "Updated Spec for Certificate %s/%s", existing.Namespace, existing.Name) + return cert, nil + } + } + return cert, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources_test.go new file mode 100644 index 0000000000..a88942659e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources_test.go @@ -0,0 +1,311 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/pkg/kmeta" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakecertinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake" + fakeciinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/resources" + "knative.dev/serving/pkg/reconciler/route/traffic" + + . "knative.dev/pkg/logging/testing" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +func TestReconcileIngressInsert(t *testing.T) { + _, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + r := Route("test-ns", "test-route") + ci := newTestIngress(t, r) + + if _, err := reconciler.reconcileIngress(TestContextWithLogger(t), r, ci); err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestReconcileIngressUpdate(t *testing.T) { + ctx, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + r := Route("test-ns", "test-route") + + ci := newTestIngress(t, r) + if _, err := reconciler.reconcileIngress(TestContextWithLogger(t), r, ci); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + updated := getRouteIngressFromClient(ctx, t, r) + fakeciinformer.Get(ctx).Informer().GetIndexer().Add(updated) + + ci2 := newTestIngress(t, r, func(tc *traffic.Config) { + tc.Targets[traffic.DefaultTarget][0].TrafficTarget.Percent = ptr.Int64(50) + tc.Targets[traffic.DefaultTarget] = append(tc.Targets[traffic.DefaultTarget], traffic.RevisionTarget{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(50), + RevisionName: "revision2", + }, + }) + }) + if _, err := reconciler.reconcileIngress(TestContextWithLogger(t), r, ci2); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + updated = getRouteIngressFromClient(ctx, t, r) + if diff := cmp.Diff(ci2, updated); diff != "" { + t.Errorf("Unexpected diff (-want +got): %v", diff) + } + if diff := cmp.Diff(ci, updated); diff == "" { + t.Error("Expected difference, but found none") + } +} + +func TestReconcileTargetValidRevision(t *testing.T) { + ctx, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + r := Route("test-ns", "test-route", WithRouteLabel(map[string]string{"route": "test-route"})) + rev := newTestRevision(r.Namespace, "revision") + tc := traffic.Config{Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + Active: true, + }}}} + + ctx = config.ToContext(ctx, &config.Config{ + GC: &gc.Config{ + StaleRevisionLastpinnedDebounce: time.Minute, + }, + }) + + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(r.Namespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // Get timestamp before reconciling, so that we can compare this to the last pinned timestamp + // after reconciliation + beforeTimestamp, err := getLastPinnedTimestamp(t, rev) + if err != nil { + t.Fatalf("Error getting last pinned: %v", err) + } + + if err = reconciler.reconcileTargetRevisions(ctx, &tc, r); err != nil { + t.Fatalf("Error reconciling target revisions: %v", err) + } + + // Verify last pinned annotation is updated correctly + newRev, err := fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(r.Namespace).Get(rev.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting revision: %v", err) + } + afterTimestamp, err := getLastPinnedTimestamp(t, newRev) + if err != nil { + t.Fatalf("Error getting last pinned timestamps: %v", err) + } + if beforeTimestamp == afterTimestamp { + t.Fatal("The last pinned timestamp is not updated") + } +} + +func TestReconcileRevisionTargetDoesNotExist(t *testing.T) { + ctx, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + r := Route("test-ns", "test-route", WithRouteLabel(map[string]string{"route": "test-route"})) + rev := newTestRevision(r.Namespace, "revision") + tcInvalidRev := traffic.Config{Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "invalid-revision", + Percent: ptr.Int64(100), + }, + Active: true, + }}}} + ctx = config.ToContext(ctx, &config.Config{ + GC: &gc.Config{ + StaleRevisionLastpinnedDebounce: time.Minute, + }, + }) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(r.Namespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // Try reconciling target revisions for a revision that does not exist. No err should be returned + if err := reconciler.reconcileTargetRevisions(ctx, &tcInvalidRev, r); err != nil { + t.Fatalf("Error reconciling target revisions: %v", err) + } +} + +func newTestRevision(namespace string, name string) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + serving.RevisionLastPinnedAnnotationKey: v1alpha1.RevisionLastPinnedString(time.Now().Add(-1 * time.Hour)), + }, + }, + Spec: v1alpha1.RevisionSpec{}, + } +} + +func getLastPinnedTimestamp(t *testing.T, rev *v1alpha1.Revision) (string, error) { + lastPinnedTime, ok := rev.ObjectMeta.Annotations[serving.RevisionLastPinnedAnnotationKey] + if !ok { + return "", errors.New("last pinned annotation not found") + } + return lastPinnedTime, nil +} + +func newTestIngress(t *testing.T, r *v1alpha1.Route, trafficOpts ...func(tc *traffic.Config)) *netv1alpha1.Ingress { + tc := &traffic.Config{Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + Active: true, + }}}} + + for _, opt := range trafficOpts { + opt(tc) + } + + tls := []netv1alpha1.IngressTLS{{ + Hosts: []string{"test-route.test-ns.example.com"}, + SecretName: "test-secret", + SecretNamespace: "test-ns", + }} + ingress, err := resources.MakeIngress(getContext(), r, tc, tls, sets.NewString(), "foo-ingress") + if err != nil { + t.Errorf("Unexpected error %v", err) + } + return ingress +} + +func TestReconcileCertificatesInsert(t *testing.T) { + ctx, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + r := Route("test-ns", "test-route") + certificate := newCerts([]string{"*.default.example.com"}, r) + if _, err := reconciler.reconcileCertificate(TestContextWithLogger(t), r, certificate); err != nil { + t.Errorf("Unexpected error: %v", err) + } + created := getCertificateFromClient(t, ctx, certificate) + if diff := cmp.Diff(certificate, created); diff != "" { + t.Errorf("Unexpected diff (-want +got): %s", diff) + } +} + +func TestReconcileCertificateUpdate(t *testing.T) { + ctx, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + r := Route("test-ns", "test-route") + certificate := newCerts([]string{"old.example.com"}, r) + if _, err := reconciler.reconcileCertificate(TestContextWithLogger(t), r, certificate); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + storedCert := getCertificateFromClient(t, ctx, certificate) + fakecertinformer.Get(ctx).Informer().GetIndexer().Add(storedCert) + + newCertificate := newCerts([]string{"new.example.com"}, r) + if _, err := reconciler.reconcileCertificate(TestContextWithLogger(t), r, newCertificate); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + updated := getCertificateFromClient(t, ctx, newCertificate) + if diff := cmp.Diff(newCertificate, updated); diff != "" { + t.Errorf("Unexpected diff (-want +got): %s", diff) + } + if diff := cmp.Diff(certificate, updated); diff == "" { + t.Error("Expected difference, but found none") + } +} + +func TestReconcileIngressClassAnnotation(t *testing.T) { + ctx, _, reconciler, _, cancel := newTestReconciler(t) + defer cancel() + + const expClass = "foo.ingress.networking.knative.dev" + + r := Route("test-ns", "test-route") + ci := newTestIngress(t, r) + if _, err := reconciler.reconcileIngress(TestContextWithLogger(t), r, ci); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + updated := getRouteIngressFromClient(ctx, t, r) + fakeciinformer.Get(ctx).Informer().GetIndexer().Add(updated) + + ci2 := newTestIngress(t, r) + // Add ingress.class annotation. + ci2.ObjectMeta.Annotations[networking.IngressClassAnnotationKey] = expClass + + if _, err := reconciler.reconcileIngress(TestContextWithLogger(t), r, ci2); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + updated = getRouteIngressFromClient(ctx, t, r) + updatedClass := updated.ObjectMeta.Annotations[networking.IngressClassAnnotationKey] + if expClass != updatedClass { + t.Errorf("Unexpected annotation got %q want %q", expClass, updatedClass) + } +} + +func newCerts(dnsNames []string, r *v1alpha1.Route) *netv1alpha1.Certificate { + return &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cert", + Namespace: system.Namespace(), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(r)}, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: dnsNames, + SecretName: "test-secret", + }, + } +} + +func getContext() context.Context { + cfg := ReconcilerTestConfig(false) + return config.ToContext(context.Background(), cfg) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate.go new file mode 100644 index 0000000000..8bdc9466d5 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "hash/adler32" + "sort" + + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/resources" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/kmeta" + networkingv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/route/resources/names" +) + +// MakeCertificates creates an array of Certificate for the Route to request TLS certificates. +// domainTagMap is an one-to-one mapping between domain and tag, for major domain (tag-less), +// the value is an empty string +// Returns one certificate for each domain +func MakeCertificates(route *v1alpha1.Route, domainTagMap map[string]string, certClass string) []*networkingv1alpha1.Certificate { + order := make(sort.StringSlice, 0, len(domainTagMap)) + for dnsName := range domainTagMap { + order = append(order, dnsName) + } + order.Sort() + + var certs []*networkingv1alpha1.Certificate + for _, dnsName := range order { + tag := domainTagMap[dnsName] + + // k8s supports cert name only up to 63 chars and so is constructed as route-[UID]-[tag digest] + // where route-[UID] will take 42 characters and leaves 20 characters for tag digest (need to include `-`). + // We use https://golang.org/pkg/hash/adler32/#Checksum to compute the digest which returns a uint32. + // We represent the digest in unsigned integer format with maximum value of 4,294,967,295 which are 10 digits. + // The "-[tag digest]" is computed only if there's a tag + certName := names.Certificate(route) + if tag != "" { + certName = fmt.Sprintf("%s-%d", certName, adler32.Checksum([]byte(tag))) + } + + certs = append(certs, &networkingv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: certName, + Namespace: route.Namespace, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(route)}, + Annotations: resources.FilterMap(resources.UnionMaps(map[string]string{ + networking.CertificateClassAnnotationKey: certClass, + }, route.ObjectMeta.Annotations), func(key string) bool { + return key == corev1.LastAppliedConfigAnnotation + }), + Labels: map[string]string{ + serving.RouteLabelKey: route.Name, + }, + }, + Spec: networkingv1alpha1.CertificateSpec{ + DNSNames: []string{dnsName}, + SecretName: certName, + }, + }) + } + return certs +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate_test.go new file mode 100644 index 0000000000..42aca5b4b1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/certificate_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package resources + +import ( + "testing" + + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + + "knative.dev/pkg/kmeta" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +var ( + dnsNameTagMap = map[string]string{ + "v1.default.example.com": "", + "v1-current.default.example.com": "current", + } + route = Route("default", "route", WithRouteUID("12345")) +) + +func TestMakeCertificates(t *testing.T) { + want := []*netv1alpha1.Certificate{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12345-200999684", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(route)}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: "foo-cert", + }, + Labels: map[string]string{ + serving.RouteLabelKey: "route", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"v1-current.default.example.com"}, + SecretName: "route-12345-200999684", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12345", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(route)}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: "foo-cert", + }, + Labels: map[string]string{ + serving.RouteLabelKey: "route", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"v1.default.example.com"}, + SecretName: "route-12345", + }, + }, + } + got := MakeCertificates(route, dnsNameTagMap, "foo-cert") + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MakeCertificate (-want, +got) = %v", diff) + } +} + +func TestMakeCertificates_FilterLastAppliedAnno(t *testing.T) { + var orgRoute = Route("default", "route", WithRouteUID("12345"), WithRouteLabel(map[string]string{"label-from-route": "foo", serving.RouteLabelKey: "foo"}), + WithRouteAnnotation(map[string]string{corev1.LastAppliedConfigAnnotation: "something-last-applied", networking.CertificateClassAnnotationKey: "passdown-cert"})) + want := []*netv1alpha1.Certificate{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12345-200999684", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(orgRoute)}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: "passdown-cert", + }, + Labels: map[string]string{ + serving.RouteLabelKey: "route", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"v1-current.default.example.com"}, + SecretName: "route-12345-200999684", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12345", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(orgRoute)}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: "passdown-cert", + }, + Labels: map[string]string{ + serving.RouteLabelKey: "route", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"v1.default.example.com"}, + SecretName: "route-12345", + }, + }, + } + got := MakeCertificates(orgRoute, dnsNameTagMap, "default-cert") + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MakeCertificate (-want, +got) = %v", diff) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/doc.go new file mode 100644 index 0000000000..7441d63b8e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources holds simple functions for synthesizing child resources +// from a Route resource and any relevant Route controller configuration. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters.go new file mode 100644 index 0000000000..9c20e36900 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/pkg/reconciler/route/config" +) + +// Filter is used for applying a function to a service +type Filter func(service *corev1.Service) bool + +// FilterService applies a filter to the list of services and return the services that are accepted +func FilterService(services []*corev1.Service, acceptFilter Filter) []*corev1.Service { + var filteredServices []*corev1.Service + + for i := range services { + service := services[i] + if acceptFilter(service) { + filteredServices = append(filteredServices, service) + } + } + + return filteredServices +} + +// Filter functions + +// IsClusterLocalService returns whether a service is cluster local. +func IsClusterLocalService(svc *corev1.Service) bool { + if visibility, ok := svc.GetLabels()[config.VisibilityLabelKey]; ok { + return config.VisibilityClusterLocal == visibility + } + + return false +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters_test.go new file mode 100644 index 0000000000..8605c4512e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/filters_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package resources + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "knative.dev/serving/pkg/reconciler/route/config" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestFilterService(t *testing.T) { + tests := []struct { + name string + services []*corev1.Service + acceptFilter Filter + want []*corev1.Service + }{ + { + name: "no services", + }, + { + name: "matches services", + services: []*corev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bar-2", + }, + }, + }, + acceptFilter: func(service *corev1.Service) bool { + return strings.HasPrefix(service.Name, "foo") + }, + want: []*corev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-1", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + if got := FilterService(tt.services, tt.acceptFilter); !cmp.Equal(got, tt.want) { + t.Errorf("FilterService() (-want, +got) = %v", cmp.Diff(tt.want, got)) + } + }) + } +} + +func TestIsClusterLocalService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + want bool + }{ + { + name: "Service does NOT have visibility label set", + svc: &corev1.Service{}, + want: false, + }, + { + name: "Service has visibility label set to anything but ClusterLocal", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + config.VisibilityLabelKey: "something-unknown", + }, + }, + }, + want: false, + }, + { + name: "Service has visibility label set to cluster local", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + config.VisibilityLabelKey: config.VisibilityClusterLocal, + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsClusterLocalService(tt.svc); got != tt.want { + t.Errorf("IsClusterLocalService() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go new file mode 100644 index 0000000000..00941d1f27 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go @@ -0,0 +1,242 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "sort" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/activator" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/route/domains" + "knative.dev/serving/pkg/reconciler/route/resources/labels" + "knative.dev/serving/pkg/reconciler/route/resources/names" + "knative.dev/serving/pkg/reconciler/route/traffic" + "knative.dev/serving/pkg/resources" +) + +// MakeIngressTLS creates IngressTLS to configure the ingress TLS. +func MakeIngressTLS(cert *v1alpha1.Certificate, hostNames []string) v1alpha1.IngressTLS { + return v1alpha1.IngressTLS{ + Hosts: hostNames, + SecretName: cert.Spec.SecretName, + SecretNamespace: cert.Namespace, + } +} + +// MakeIngress creates Ingress to set up routing rules. Such Ingress specifies +// which Hosts that it applies to, as well as the routing rules. +func MakeIngress( + ctx context.Context, + r *servingv1alpha1.Route, + tc *traffic.Config, + tls []v1alpha1.IngressTLS, + clusterLocalServices sets.String, + ingressClass string, + acmeChallenges ...v1alpha1.HTTP01Challenge, +) (*v1alpha1.Ingress, error) { + spec, err := MakeIngressSpec(ctx, r, tls, clusterLocalServices, tc.Targets, acmeChallenges...) + if err != nil { + return nil, err + } + return &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Ingress(r), + Namespace: r.Namespace, + Labels: resources.UnionMaps(r.ObjectMeta.Labels, map[string]string{ + serving.RouteLabelKey: r.Name, + serving.RouteNamespaceLabelKey: r.Namespace, + }), + Annotations: resources.FilterMap(resources.UnionMaps(map[string]string{ + networking.IngressClassAnnotationKey: ingressClass, + }, r.GetAnnotations()), func(key string) bool { + return key == corev1.LastAppliedConfigAnnotation + }), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(r)}, + }, + Spec: spec, + }, nil +} + +// MakeIngressSpec creates a new IngressSpec +func MakeIngressSpec( + ctx context.Context, + r *servingv1alpha1.Route, + tls []v1alpha1.IngressTLS, + clusterLocalServices sets.String, + targets map[string]traffic.RevisionTargets, + acmeChallenges ...v1alpha1.HTTP01Challenge, +) (v1alpha1.IngressSpec, error) { + // Domain should have been specified in route status + // before calling this func. + names := make([]string, 0, len(targets)) + for name := range targets { + names = append(names, name) + } + // Sort the names to give things a deterministic ordering. + sort.Strings(names) + // The routes are matching rule based on domain name to traffic split targets. + rules := make([]v1alpha1.IngressRule, 0, len(names)) + challengeHosts := getChallengeHosts(acmeChallenges) + + for _, name := range names { + serviceDomain, err := domains.HostnameFromTemplate(ctx, r.Name, name) + if err != nil { + return v1alpha1.IngressSpec{}, err + } + + isClusterLocal := clusterLocalServices.Has(serviceDomain) + + routeDomains, err := routeDomains(ctx, name, r, isClusterLocal) + if err != nil { + return v1alpha1.IngressSpec{}, err + } + + rule := *makeIngressRule(routeDomains, r.Namespace, isClusterLocal, targets[name]) + rule.HTTP.Paths = append(makeACMEIngressPaths(challengeHosts, routeDomains), rule.HTTP.Paths...) + + rules = append(rules, rule) + } + + defaultDomain, err := domains.HostnameFromTemplate(ctx, r.Name, "") + if err != nil { + return v1alpha1.IngressSpec{}, err + } + + visibility := v1alpha1.IngressVisibilityExternalIP + if clusterLocalServices.Has(defaultDomain) { + visibility = v1alpha1.IngressVisibilityClusterLocal + } + + return v1alpha1.IngressSpec{ + Rules: rules, + Visibility: visibility, + TLS: tls, + }, nil +} + +func getChallengeHosts(challenges []v1alpha1.HTTP01Challenge) map[string]v1alpha1.HTTP01Challenge { + c := make(map[string]v1alpha1.HTTP01Challenge, len(challenges)) + + for _, challenge := range challenges { + c[challenge.URL.Host] = challenge + } + + return c +} + +func routeDomains(ctx context.Context, targetName string, r *servingv1alpha1.Route, isClusterLocal bool) ([]string, error) { + hostname, err := domains.HostnameFromTemplate(ctx, r.Name, targetName) + if err != nil { + return nil, err + } + + meta := r.ObjectMeta.DeepCopy() + labels.SetVisibility(meta, true) + clusterLocalName, err := domains.DomainNameFromTemplate(ctx, *meta, hostname) + if err != nil { + return nil, err + } + ruleDomains := []string{clusterLocalName} + + if !isClusterLocal { + labels.SetVisibility(meta, false) + fullName, err := domains.DomainNameFromTemplate(ctx, *meta, hostname) + if err != nil { + return nil, err + } + + if fullName != clusterLocalName { + ruleDomains = append(ruleDomains, fullName) + } + } + + return ruleDomains, nil +} + +func makeACMEIngressPaths(challenges map[string]v1alpha1.HTTP01Challenge, domains []string) []v1alpha1.HTTPIngressPath { + paths := make([]v1alpha1.HTTPIngressPath, 0, len(challenges)) + for _, domain := range domains { + challenge, ok := challenges[domain] + if !ok { + continue + } + + paths = append(paths, v1alpha1.HTTPIngressPath{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: challenge.ServiceNamespace, + ServiceName: challenge.ServiceName, + ServicePort: challenge.ServicePort, + }, + Percent: 100, + }}, + Path: challenge.URL.Path, + }) + } + return paths +} + +func makeIngressRule(domains []string, ns string, isClusterLocal bool, targets traffic.RevisionTargets) *v1alpha1.IngressRule { + // Optimistically allocate |targets| elements. + splits := make([]v1alpha1.IngressBackendSplit, 0, len(targets)) + for _, t := range targets { + if t.Percent == nil || *t.Percent == 0 { + continue + } + + splits = append(splits, v1alpha1.IngressBackendSplit{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: t.ServiceName, + // Port on the public service must match port on the activator. + // Otherwise, the serverless services can't guarantee seamless positive handoff. + ServicePort: intstr.FromInt(int(networking.ServicePort(t.Protocol))), + }, + Percent: int(*t.Percent), + AppendHeaders: map[string]string{ + activator.RevisionHeaderName: t.TrafficTarget.RevisionName, + activator.RevisionHeaderNamespace: ns, + }, + }) + } + + visibility := v1alpha1.IngressVisibilityExternalIP + if isClusterLocal { + visibility = v1alpha1.IngressVisibilityClusterLocal + } + + return &v1alpha1.IngressRule{ + Hosts: domains, + Visibility: visibility, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: splits, + // TODO(lichuqiang): #2201, plumbing to config timeout and retries. + }}, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress_test.go new file mode 100644 index 0000000000..d35ae23d23 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress_test.go @@ -0,0 +1,870 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/google/go-cmp/cmp" + "knative.dev/pkg/apis" + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/traffic" + + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + + _ "knative.dev/pkg/system/testing" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const ( + ns = "test-ns" + + testRouteName = "test-route" + testAnnotationValue = "test-annotation-value" + testIngressClass = "test-ingress" +) + +func getServiceVisibility() sets.String { + return sets.NewString() +} + +func TestMakeIngress_CorrectMetadata(t *testing.T) { + targets := map[string]traffic.RevisionTargets{} + ingressClass := "ng-ingress" + passdownIngressClass := "ok-ingress" + r := Route(ns, "test-route", WithRouteLabel(map[string]string{ + serving.RouteLabelKey: "try-to-override", + serving.RouteNamespaceLabelKey: "try-to-override", + "test-label": "foo", + }), WithRouteAnnotation(map[string]string{ + networking.IngressClassAnnotationKey: passdownIngressClass, + "test-annotation": "bar", + }), WithRouteUID("1234-5678"), WithURL) + expected := metav1.ObjectMeta{ + Name: "test-route", + Namespace: ns, + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: ns, + "test-label": "foo", + }, + Annotations: map[string]string{ + // Make sure to get passdownIngressClass instead of ingressClass + networking.IngressClassAnnotationKey: passdownIngressClass, + "test-annotation": "bar", + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(r)}, + } + ia, err := MakeIngress(getContext(), r, &traffic.Config{Targets: targets}, nil, getServiceVisibility(), ingressClass) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(expected, ia.ObjectMeta) { + t.Errorf("Unexpected metadata (-want, +got): %s", cmp.Diff(expected, ia.ObjectMeta)) + } +} + +func TestIngress_NoKubectlAnnotation(t *testing.T) { + targets := map[string]traffic.RevisionTargets{} + r := Route(ns, testRouteName, WithRouteAnnotation(map[string]string{ + networking.IngressClassAnnotationKey: testIngressClass, + corev1.LastAppliedConfigAnnotation: testAnnotationValue, + }), WithRouteUID("1234-5678"), WithURL) + ia, err := MakeIngress(getContext(), r, &traffic.Config{Targets: targets}, nil, getServiceVisibility(), testIngressClass) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if v, ok := ia.Annotations[corev1.LastAppliedConfigAnnotation]; ok { + t.Errorf("Annotation %s = %q, want empty", corev1.LastAppliedConfigAnnotation, v) + } +} + +func TestMakeIngressSpec_CorrectRules(t *testing.T) { + targets := map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v2", + Percent: ptr.Int64(100), + }, + ServiceName: "gilberto", + Active: true, + }}, + "v1": {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v1", + Percent: ptr.Int64(100), + }, + ServiceName: "jobim", + Active: true, + }}, + } + + r := Route(ns, "test-route", WithURL) + + expected := []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route." + ns + ".svc.cluster.local", + "test-route." + ns + ".example.com", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "gilberto", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v2", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "v1-test-route." + ns + ".svc.cluster.local", + "v1-test-route." + ns + ".example.com", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "jobim", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v1", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }} + + ci, err := MakeIngressSpec(getContext(), r, nil, getServiceVisibility(), targets) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(expected, ci.Rules) { + t.Errorf("Unexpected rules (-want, +got): %s", cmp.Diff(expected, ci.Rules)) + } +} + +func TestMakeIngressSpec_CorrectVisibility(t *testing.T) { + cases := []struct { + name string + route *v1alpha1.Route + serviceVisibility sets.String + expectedVisibility netv1alpha1.IngressVisibility + }{{ + name: "public route", + route: Route("", "", WithURL), + + expectedVisibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + name: "private route", + route: Route("", "", WithAddress), + serviceVisibility: sets.NewString(""), + expectedVisibility: netv1alpha1.IngressVisibilityClusterLocal, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ci, err := MakeIngressSpec(getContext(), c.route, nil, c.serviceVisibility, nil) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(c.expectedVisibility, ci.Visibility) { + t.Errorf("Unexpected visibility (-want, +got): %s", cmp.Diff(c.expectedVisibility, ci.Visibility)) + } + }) + } +} + +func TestMakeIngressSpec_CorrectRuleVisibility(t *testing.T) { + cases := []struct { + name string + route *v1alpha1.Route + targets map[string]traffic.RevisionTargets + serviceVisibility sets.String + expectedVisibility netv1alpha1.IngressVisibility + }{{ + name: "public route", + route: Route("default", "myroute", WithURL), + targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v2", + Percent: ptr.Int64(100), + }, + ServiceName: "gilberto", + Active: true, + }}, + }, + expectedVisibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + name: "private route", + route: Route("default", "myroute", WithLocalDomain), + targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v2", + Percent: ptr.Int64(100), + }, + ServiceName: "gilberto", + Active: true, + }}, + }, + serviceVisibility: sets.NewString("myroute"), + expectedVisibility: netv1alpha1.IngressVisibilityClusterLocal, + }, { + name: "unspecified route", + route: Route("default", "myroute", WithLocalDomain), + targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v2", + Percent: ptr.Int64(100), + }, + ServiceName: "gilberto", + Active: true, + }}, + }, + serviceVisibility: getServiceVisibility(), + expectedVisibility: netv1alpha1.IngressVisibilityExternalIP, + }} + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + ci, err := MakeIngressSpec(getContext(), c.route, nil, c.serviceVisibility, c.targets) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(c.expectedVisibility, ci.Rules[0].Visibility) { + t.Errorf("Unexpected visibility (-want, +got): %s", cmp.Diff(c.expectedVisibility, ci.Rules[0].Visibility)) + } + }) + } +} + +func TestGetRouteDomains_NamelessTargetDup(t *testing.T) { + r := Route("test-ns", "test-route", WithURL) + expected := []string{ + "test-route." + ns + ".svc.cluster.local", + "test-route." + ns + ".example.com", + } + domains, err := routeDomains(getContext(), "", r, false) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(expected, domains) { + t.Errorf("Unexpected domains (-want, +got): %s", cmp.Diff(expected, domains)) + } +} +func TestGetRouteDomains_NamelessTarget(t *testing.T) { + r := Route("test-ns", "test-route", WithURL) + expected := []string{ + "test-route." + ns + ".svc.cluster.local", + "test-route." + ns + ".example.com", + } + domains, err := routeDomains(getContext(), "", r, false) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(expected, domains) { + t.Errorf("Unexpected domains (-want, +got): %s", cmp.Diff(expected, domains)) + } +} + +func TestGetRouteDomains_NamedTarget(t *testing.T) { + const ( + name = "v1" + ) + r := Route("test-ns", "test-route", WithURL) + expected := []string{ + + "v1-test-route." + ns + ".svc.cluster.local", + "v1-test-route." + ns + ".example.com", + } + domains, err := routeDomains(getContext(), name, r, false) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(expected, domains) { + t.Errorf("Unexpected domains (-want, +got): %s", cmp.Diff(expected, domains)) + } +} + +// One active target. +func TestMakeIngressRule_Vanilla(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + ServiceName: "chocolate", + Active: true, + }} + domains := []string{"a.com", "b.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{ + "a.com", + "b.org", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "chocolate", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +// One active target and a target of zero percent. +func TestMakeIngressRule_ZeroPercentTarget(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + ServiceName: "active-target", + Active: true, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new-config", + RevisionName: "new-revision", + Percent: ptr.Int64(0), + }, + Active: true, + }} + domains := []string{"test.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{"test.org"}, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "active-target", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +// One active target and a target of nil (implied zero) percent. +func TestMakeIngressRule_NilPercentTarget(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + ServiceName: "active-target", + Active: true, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new-config", + RevisionName: "new-revision", + Percent: nil, + }, + Active: true, + }} + domains := []string{"test.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{"test.org"}, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "active-target", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +// Two active targets. +func TestMakeIngressRule_TwoTargets(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(80), + }, + ServiceName: "nigh", + Active: true, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new-config", + RevisionName: "new-revision", + Percent: ptr.Int64(20), + }, + ServiceName: "death", + Active: true, + }} + domains := []string{"test.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{"test.org"}, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "nigh", + ServicePort: intstr.FromInt(80), + }, + Percent: 80, + AppendHeaders: map[string]string{ + "Knative-Serving-Namespace": ns, + "Knative-Serving-Revision": "revision", + }, + }, { + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "death", + ServicePort: intstr.FromInt(80), + }, + Percent: 20, + AppendHeaders: map[string]string{ + "Knative-Serving-Namespace": ns, + "Knative-Serving-Revision": "new-revision", + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +// Inactive target. +func TestMakeIngressRule_InactiveTarget(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + ServiceName: "strange-quark", + Active: false, + }} + domains := []string{"a.com", "b.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{ + "a.com", + "b.org", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: targets[0].ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +// Two inactive targets. +func TestMakeIngressRule_TwoInactiveTargets(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(80), + }, + ServiceName: "up-quark", + Active: false, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new-config", + RevisionName: "new-revision", + Percent: ptr.Int64(20), + }, + ServiceName: "down-quark", + Active: false, + }} + domains := []string{"a.com", "b.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{ + "a.com", + "b.org", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: targets[0].ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 80, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }, { + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: targets[1].ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 20, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "new-revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +func TestMakeIngressRule_ZeroPercentTargetInactive(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + ServiceName: "apathy-sets-in", + Active: true, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new-config", + RevisionName: "new-revision", + Percent: ptr.Int64(0), + }, + Active: false, + }} + domains := []string{"test.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{"test.org"}, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "apathy-sets-in", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +func TestMakeIngressRule_NilPercentTargetInactive(t *testing.T) { + targets := []traffic.RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "revision", + Percent: ptr.Int64(100), + }, + ServiceName: "apathy-sets-in", + Active: true, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "new-config", + RevisionName: "new-revision", + Percent: nil, + }, + Active: false, + }} + domains := []string{"test.org"} + rule := makeIngressRule(domains, ns, false, targets) + expected := netv1alpha1.IngressRule{ + Hosts: []string{"test.org"}, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: ns, + ServiceName: "apathy-sets-in", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "revision", + "Knative-Serving-Namespace": ns, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + } + + if !cmp.Equal(&expected, rule) { + t.Errorf("Unexpected rule (-want, +got): %s", cmp.Diff(&expected, rule)) + } +} + +func TestMakeIngress_WithTLS(t *testing.T) { + targets := map[string]traffic.RevisionTargets{} + ingressClass := "foo-ingress" + r := Route(ns, "test-route", WithRouteUID("1234-5678"), WithURL) + tls := []netv1alpha1.IngressTLS{{ + Hosts: []string{"*.default.domain.com"}, + SecretName: "secret", + }} + expected := &netv1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-route", + Namespace: ns, + Annotations: map[string]string{ + networking.IngressClassAnnotationKey: ingressClass, + }, + Labels: map[string]string{ + serving.RouteLabelKey: "test-route", + serving.RouteNamespaceLabelKey: ns, + }, + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(r)}, + }, + Spec: netv1alpha1.IngressSpec{ + Rules: []netv1alpha1.IngressRule{}, + TLS: tls, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, + } + got, err := MakeIngress(getContext(), r, &traffic.Config{Targets: targets}, tls, getServiceVisibility(), ingressClass) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if diff := cmp.Diff(expected, got); diff != "" { + t.Errorf("Unexpected metadata (-want, +got): %v", diff) + } +} + +func TestMakeIngressTLS(t *testing.T) { + cert := &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route-1234", + Namespace: system.Namespace(), + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"test.default.example.com", "v1.test.default.example.com"}, + SecretName: "route-1234", + }, + } + want := netv1alpha1.IngressTLS{ + Hosts: []string{"test.default.example.com", "v1.test.default.example.com"}, + SecretName: "route-1234", + SecretNamespace: system.Namespace(), + } + hostNames := []string{"test.default.example.com", "v1.test.default.example.com"} + got := MakeIngressTLS(cert, hostNames) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected IngressTLS (-want, +got): %v", diff) + } +} + +func TestMakeClusterIngress_ACMEChallenges(t *testing.T) { + targets := map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "config", + RevisionName: "v2", + Percent: ptr.Int64(100), + }, + ServiceName: "gilberto", + Active: true, + }}, + } + + r := &v1alpha1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-route", + Namespace: "test-ns", + }, + Status: v1alpha1.RouteStatus{ + RouteStatusFields: v1alpha1.RouteStatusFields{ + URL: &apis.URL{ + Scheme: "http", + Host: "domain.com", + }, + }, + }, + } + + acmeChallenge := netv1alpha1.HTTP01Challenge{ + ServiceNamespace: "test-ns", + ServiceName: "cm-solver", + ServicePort: intstr.FromInt(8090), + URL: &apis.URL{ + Scheme: "http", + Path: "/.well-known/acme-challenge/challenge-token", + Host: "test-route.test-ns.example.com", + }, + } + + expected := []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test-ns.svc.cluster.local", + "test-route.test-ns.example.com", + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Path: "/.well-known/acme-challenge/challenge-token", + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "cm-solver", + ServicePort: intstr.FromInt(8090), + }, + Percent: 100, + }}, + }, { + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: "test-ns", + ServiceName: "gilberto", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "v2", + "Knative-Serving-Namespace": "test-ns", + }, + }}, + }}}}} + + ci, err := MakeIngressSpec(getContext(), r, nil, getServiceVisibility(), targets, acmeChallenge) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if !cmp.Equal(expected, ci.Rules) { + t.Errorf("Unexpected rules (-want, +got): %s", cmp.Diff(expected, ci.Rules)) + } + +} + +func getContext() context.Context { + ctx := context.Background() + cfg := testConfig() + return config.ToContext(ctx, cfg) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/doc.go new file mode 100644 index 0000000000..49e42e5bba --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package labels holds simple functions for working with ObjectMeta labels. +package labels diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go new file mode 100644 index 0000000000..c823777904 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/reconciler/route/config" +) + +// IsObjectLocalVisibility returns whether an ObjectMeta is of cluster-local visibility +func IsObjectLocalVisibility(meta v1.ObjectMeta) bool { + return meta.Labels != nil && meta.Labels[config.VisibilityLabelKey] != "" +} + +// SetVisibility sets the visibility on an ObjectMeta +func SetVisibility(meta *v1.ObjectMeta, isClusterLocal bool) { + if isClusterLocal { + SetLabel(meta, config.VisibilityLabelKey, config.VisibilityClusterLocal) + } else { + DeleteLabel(meta, config.VisibilityLabelKey) + } +} + +// SetLabel sets/update the label of the an ObjectMeta +func SetLabel(meta *v1.ObjectMeta, key string, value string) { + if meta.Labels == nil { + meta.Labels = make(map[string]string) + } + + meta.Labels[key] = value +} + +// DeleteLabel removes a label from the ObjectMeta +func DeleteLabel(meta *v1.ObjectMeta, key string) { + if meta.Labels != nil { + delete(meta.Labels, key) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels_test.go new file mode 100644 index 0000000000..994d6e70f1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/labels/labels_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "testing" + + "knative.dev/serving/pkg/reconciler/route/config" + + "github.com/google/go-cmp/cmp" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestDeleteLabel(t *testing.T) { + tests := []struct { + name string + meta *v1.ObjectMeta + key string + expected v1.ObjectMeta + }{ + { + name: "No labels in object meta", + meta: &v1.ObjectMeta{}, + key: "key", + expected: v1.ObjectMeta{}, + }, + { + name: "No matching key", + meta: &v1.ObjectMeta{ + Labels: map[string]string{"some label": "some value"}, + }, + key: "unknown", + expected: v1.ObjectMeta{ + Labels: map[string]string{"some label": "some value"}, + }, + }, + { + name: "Has matching key", + meta: &v1.ObjectMeta{ + Labels: map[string]string{"some label": "some value"}, + }, + key: "some label", + expected: v1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + DeleteLabel(tt.meta, tt.key) + + if !cmp.Equal(tt.expected, *tt.meta) { + t.Errorf("DeleteLabel (-want, +got) = %v", + cmp.Diff(tt.expected, *tt.meta)) + } + }) + } +} + +func TestSetLabel(t *testing.T) { + tests := []struct { + name string + meta *v1.ObjectMeta + key string + value string + + expected v1.ObjectMeta + }{ + { + name: "No labels in object meta", + meta: &v1.ObjectMeta{}, + key: "key", + value: "value", + expected: v1.ObjectMeta{ + Labels: map[string]string{"key": "value"}, + }, + }, + { + name: "Empty labels", + meta: &v1.ObjectMeta{ + Labels: map[string]string{}, + }, + key: "key", + value: "value", + expected: v1.ObjectMeta{ + Labels: map[string]string{"key": "value"}, + }, + }, + { + name: "Conflicting labels", + meta: &v1.ObjectMeta{ + Labels: map[string]string{"key": "old value"}, + }, + key: "key", + value: "new value", + expected: v1.ObjectMeta{ + Labels: map[string]string{"key": "new value"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + SetLabel(tt.meta, tt.key, tt.value) + + if !cmp.Equal(tt.expected, *tt.meta) { + t.Errorf("DeleteLabel (-want, +got) = %v", + cmp.Diff(tt.expected, *tt.meta)) + } + }) + } +} + +func TestSetVisibility(t *testing.T) { + tests := []struct { + name string + meta *v1.ObjectMeta + isClusterLocal bool + expected v1.ObjectMeta + }{ + { + name: "Set cluster local true", + meta: &v1.ObjectMeta{}, + isClusterLocal: true, + expected: v1.ObjectMeta{Labels: map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal}}, + }, + { + name: "Set cluster local false", + meta: &v1.ObjectMeta{Labels: map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal}}, + isClusterLocal: false, + expected: v1.ObjectMeta{Labels: map[string]string{}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + SetVisibility(tt.meta, tt.isClusterLocal) + }) + } +} diff --git a/test/vendor/github.com/knative/pkg/kmp/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/doc.go similarity index 78% rename from test/vendor/github.com/knative/pkg/kmp/doc.go rename to test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/doc.go index 4e54ae1e30..aa96d4baa3 100644 --- a/test/vendor/github.com/knative/pkg/kmp/doc.go +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/doc.go @@ -14,6 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package kmp wraps github.com/google/go-cmp with custom Comparers for -// frequently used kubernetes resources that have unexported fields. -package kmp +// Package names holds simple functions for synthesizing resource names. +package names diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names.go new file mode 100644 index 0000000000..6d413e8525 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "fmt" + + "knative.dev/pkg/kmeta" + "knative.dev/pkg/network" +) + +func K8sService(route kmeta.Accessor) string { + return route.GetName() +} + +func K8sServiceFullname(route kmeta.Accessor) string { + return network.GetServiceHostname(K8sService(route), route.GetNamespace()) +} + +// Ingress returns the name for the Ingress +// child resource for the given Route. +func Ingress(route kmeta.Accessor) string { + return kmeta.ChildName(route.GetName(), "") +} + +// Certificate returns the name for the Certificate +// child resource for the given Route. +func Certificate(route kmeta.Accessor) string { + return fmt.Sprintf("route-%s", route.GetUID()) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names_test.go new file mode 100644 index 0000000000..2ab5b5268d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/names/names_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestNamer(t *testing.T) { + tests := []struct { + name string + route *v1alpha1.Route + f func(kmeta.Accessor) string + want string + }{{ + name: "K8sService", + route: getRoute("blah", "default", ""), + f: K8sService, + want: "blah", + }, { + name: "K8sServiceFullname", + route: getRoute("bar", "default", ""), + f: K8sServiceFullname, + want: "bar.default.svc.cluster.local", + }, { + name: "IngressPrefix", + route: getRoute("bar", "default", "1234-5678-910"), + f: Ingress, + want: "bar", + }, { + name: "Certificate", + route: getRoute("bar", "default", "1234-5678-910"), + f: Certificate, + want: "route-1234-5678-910", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.f(test.route) + if got != test.want { + t.Errorf("%s() = %v, wanted %v", test.name, got, test.want) + } + }) + } +} + +func getRoute(name, ns string, uid types.UID) *v1alpha1.Route { + return &v1alpha1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + UID: uid, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service.go new file mode 100644 index 0000000000..9e3a357324 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service.go @@ -0,0 +1,201 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/route/domains" +) + +var errLoadBalancerNotFound = errors.New("failed to fetch loadbalancer domain/IP from ingress status") + +// GetNames returns a set of service names. +func GetNames(services []*corev1.Service) sets.String { + names := sets.NewString() + + for i := range services { + names.Insert(services[i].Name) + } + + return names +} + +// SelectorFromRoute creates a label selector given a specific route. +func SelectorFromRoute(route *v1alpha1.Route) labels.Selector { + return labels.SelectorFromSet( + labels.Set{ + serving.RouteLabelKey: route.Name, + }, + ) +} + +// MakeK8sPlaceholderService creates a placeholder Service to prevent naming collisions. It's owned by the +// provided v1alpha1.Route. The purpose of this service is to provide a placeholder domain name for Istio routing. +func MakeK8sPlaceholderService(ctx context.Context, route *v1alpha1.Route, targetName string) (*corev1.Service, error) { + hostname, err := domains.HostnameFromTemplate(ctx, route.Name, targetName) + if err != nil { + return nil, err + } + fullName, err := domains.DomainNameFromTemplate(ctx, route.ObjectMeta, hostname) + if err != nil { + return nil, err + } + + service, err := makeK8sService(ctx, route, targetName) + if err != nil { + return nil, err + } + service.Spec = corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: fullName, + SessionAffinity: corev1.ServiceAffinityNone, + } + + return service, nil +} + +// MakeK8sService creates a Service that redirect to the loadbalancer specified +// in Ingress status. It's owned by the provided v1alpha1.Route. +// The purpose of this service is to provide a domain name for Istio routing. +func MakeK8sService(ctx context.Context, route *v1alpha1.Route, targetName string, ingress *netv1alpha1.Ingress, isPrivate bool) (*corev1.Service, error) { + svcSpec, err := makeServiceSpec(ingress, isPrivate) + if err != nil { + return nil, err + } + + service, err := makeK8sService(ctx, route, targetName) + if err != nil { + return nil, err + } + service.Spec = *svcSpec + return service, nil +} + +func makeK8sService(ctx context.Context, route *v1alpha1.Route, targetName string) (*corev1.Service, error) { + hostname, err := domains.HostnameFromTemplate(ctx, route.Name, targetName) + if err != nil { + return nil, err + } + + svcLabels := map[string]string{ + serving.RouteLabelKey: route.Name, + } + + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: hostname, + Namespace: route.Namespace, + OwnerReferences: []metav1.OwnerReference{ + // This service is owned by the Route. + *kmeta.NewControllerRef(route), + }, + Labels: svcLabels, + }, + }, nil +} + +func makeServiceSpec(ingress *netv1alpha1.Ingress, isPrivate bool) (*corev1.ServiceSpec, error) { + ingressStatus := ingress.Status + + var lbStatus *netv1alpha1.LoadBalancerStatus + + if isPrivate || ingressStatus.PrivateLoadBalancer != nil { + // Always use private load balancer if it exists, + // because k8s service is only useful for inter-cluster communication. + // External communication will be handle via ingress gateway, which won't be affected by what is configured here. + lbStatus = ingressStatus.PrivateLoadBalancer + } else { + lbStatus = ingressStatus.PublicLoadBalancer + } + + if lbStatus == nil || len(lbStatus.Ingress) == 0 { + return nil, errLoadBalancerNotFound + } + if len(lbStatus.Ingress) > 1 { + // Return error as we only support one LoadBalancer currently. + return nil, fmt.Errorf("more than one ingress are specified in status(LoadBalancer) of Ingress %s", ingress.GetName()) + } + balancer := lbStatus.Ingress[0] + + // Here we decide LoadBalancer information in the order of + // DomainInternal > Domain > LoadBalancedIP to prioritize cluster-local, + // and domain (since it would change less than IP). + switch { + case len(balancer.DomainInternal) != 0: + return &corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: balancer.DomainInternal, + SessionAffinity: corev1.ServiceAffinityNone, + }, nil + case len(balancer.Domain) != 0: + return &corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: balancer.Domain, + }, nil + case balancer.MeshOnly: + // The Ingress is loadbalanced through a Service mesh. + // We won't have a specific LB endpoint to route traffic to, + // but we still need to create a ClusterIP service to make + // sure the domain name is available for access within the + // mesh. + return &corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Port: networking.ServiceHTTPPort, + }}, + }, nil + case len(balancer.IP) != 0: + // TODO(lichuqiang): deal with LoadBalancer IP. + // We'll also need ports info to make it take effect. + } + return nil, errLoadBalancerNotFound +} + +// GetDesiredServiceNames returns a list of service names that we expect to create +func GetDesiredServiceNames(ctx context.Context, route *v1alpha1.Route) (sets.String, error) { + traffic := route.Spec.Traffic + + // We always want create the route with the service name. + // If the traffic stanza only contains revision targets, then + // this will not be added below, and as a consequence we'll create + // a public route to it. + names := sets.NewString(route.Name) + + for _, t := range traffic { + serviceName, err := domains.HostnameFromTemplate(ctx, route.Name, t.Tag) + if err != nil { + return sets.String{}, err + } + names.Insert(serviceName) + } + + return names, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service_test.go new file mode 100644 index 0000000000..1b0d9931f9 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/resources/service_test.go @@ -0,0 +1,481 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/traffic" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/kmeta" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +var ( + r = Route("test-ns", "test-route") + expectedMeta = metav1.ObjectMeta{ + Name: "test-route", + Namespace: "test-ns", + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(r), + }, + Labels: map[string]string{ + serving.RouteLabelKey: r.Name, + }, + } +) + +func TestNewMakeK8SService(t *testing.T) { + scenarios := map[string]struct { + // Inputs + route *v1alpha1.Route + ingress *netv1alpha1.Ingress + targetName string + expectedSpec corev1.ServiceSpec + expectedMeta metav1.ObjectMeta + shouldFail bool + }{ + "no-loadbalancer": { + route: r, + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{}, + }, + expectedMeta: expectedMeta, + shouldFail: true, + }, + "empty-loadbalancer": { + route: r, + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{}}, + }, + PublicLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{}}, + }, + PrivateLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{}}, + }, + }, + }, + expectedMeta: expectedMeta, + shouldFail: true, + }, + "multi-loadbalancer": { + route: r, + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{ + Domain: "domain.com", + }, { + DomainInternal: "domain.com", + }}, + }, + PublicLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{ + Domain: "domain.com", + }, { + DomainInternal: "domain.com", + }}, + }, + PrivateLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{ + Domain: "domain.com", + }, { + DomainInternal: "domain.com", + }}, + }, + }, + }, + expectedMeta: expectedMeta, + shouldFail: true, + }, + "ingress-with-domain": { + route: r, + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{Domain: "domain.com"}}, + }, + PublicLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{Domain: "domain.com"}}, + }, + PrivateLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{Domain: "domain.com"}}, + }, + }, + }, + expectedMeta: expectedMeta, + expectedSpec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "domain.com", + }, + }, + "ingress-with-domaininternal": { + route: r, + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{DomainInternal: "istio-ingressgateway.istio-system.svc.cluster.local"}}, + }, + PublicLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{DomainInternal: "istio-ingressgateway.istio-system.svc.cluster.local"}}, + }, + PrivateLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{DomainInternal: "private-istio-ingressgateway.istio-system.svc.cluster.local"}}, + }, + }, + }, + expectedMeta: expectedMeta, + expectedSpec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "private-istio-ingressgateway.istio-system.svc.cluster.local", + SessionAffinity: corev1.ServiceAffinityNone, + }, + }, + "ingress-with-only-mesh": { + route: r, + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + PublicLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + PrivateLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + }, + }, + expectedMeta: expectedMeta, + expectedSpec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{ + Name: "http", + Port: 80, + }}, + }, + }, + "with-target-name-specified": { + route: r, + targetName: "my-target-name", + ingress: &netv1alpha1.Ingress{ + Status: netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + PublicLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + PrivateLoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{MeshOnly: true}}, + }, + }, + }, + expectedMeta: metav1.ObjectMeta{ + Name: "my-target-name-test-route", + Namespace: r.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(r), + }, + Labels: map[string]string{ + serving.RouteLabelKey: r.Name, + }, + }, + expectedSpec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{ + Name: "http", + Port: 80, + }}, + }, + }, + } + + for name, scenario := range scenarios { + t.Run(name, func(t *testing.T) { + cfg := testConfig() + ctx := config.ToContext(context.Background(), cfg) + service, err := MakeK8sService(ctx, scenario.route, scenario.targetName, scenario.ingress, false) + // Validate + if scenario.shouldFail && err == nil { + t.Errorf("Test %q failed: returned success but expected error", name) + } + if !scenario.shouldFail { + if err != nil { + t.Errorf("Test %q failed: returned error: %v", name, err) + } + + if !cmp.Equal(scenario.expectedMeta, service.ObjectMeta) { + t.Errorf("Unexpected Metadata (-want +got): %s", cmp.Diff(scenario.expectedMeta, service.ObjectMeta)) + } + if !cmp.Equal(scenario.expectedSpec, service.Spec) { + t.Errorf("Unexpected ServiceSpec (-want +got): %s", cmp.Diff(scenario.expectedSpec, service.Spec)) + } + } + }) + } +} + +func TestMakeK8sPlaceholderService(t *testing.T) { + tests := []struct { + name string + expectedSpec corev1.ServiceSpec + expectedLabels map[string]string + wantErr bool + route *v1alpha1.Route + }{{ + name: "default public domain route", + route: r, + expectedSpec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "foo-test-route.test-ns.example.com", + SessionAffinity: corev1.ServiceAffinityNone, + }, + expectedLabels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + wantErr: false, + }, { + name: "cluster local route", + route: Route("test-ns", "test-route", WithRouteLabel(map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal})), + expectedSpec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "foo-test-route.test-ns.svc.cluster.local", + SessionAffinity: corev1.ServiceAffinityNone, + }, + expectedLabels: map[string]string{ + serving.RouteLabelKey: "test-route", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := testConfig() + ctx := config.ToContext(context.Background(), cfg) + target := traffic.RevisionTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "foo", + }, + } + + got, err := MakeK8sPlaceholderService(ctx, tt.route, target.Tag) + if (err != nil) != tt.wantErr { + t.Errorf("MakeK8sPlaceholderService() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if got == nil { + t.Fatal("Unexpected nil service") + } + + if !cmp.Equal(tt.expectedLabels, got.ObjectMeta.Labels) { + t.Errorf("Unexpected Labels (-want +got): %s", cmp.Diff(tt.expectedLabels, got.ObjectMeta.Labels)) + } + if !cmp.Equal(tt.expectedSpec, got.Spec) { + t.Errorf("Unexpected ServiceSpec (-want +got): %s", cmp.Diff(tt.expectedSpec, got.Spec)) + } + }) + } +} + +func TestSelectorFromRoute(t *testing.T) { + selector := SelectorFromRoute(r) + if !selector.Matches(labels.Set{serving.RouteLabelKey: r.Name}) { + t.Errorf("Unexpected labels in selector") + } +} + +func testConfig() *config.Config { + return &config.Config{ + Domain: &config.Domain{ + Domains: map[string]*config.LabelSelector{ + "example.com": {}, + "another-example.com": { + Selector: map[string]string{"app": "prod"}, + }, + }, + }, + Network: &network.Config{ + DefaultIngressClass: "test-ingress-class", + DomainTemplate: network.DefaultDomainTemplate, + TagTemplate: network.DefaultTagTemplate, + }, + GC: &gc.Config{ + StaleRevisionLastpinnedDebounce: time.Duration(1 * time.Minute), + }, + } +} + +func TestGetNames(t *testing.T) { + tests := []struct { + name string + services []*corev1.Service + want sets.String + }{ + { + name: "nil services", + want: sets.String{}, + }, + { + name: "multiple services", + services: []*corev1.Service{ + {ObjectMeta: metav1.ObjectMeta{Name: "svc1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "svc2"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "svc3"}}, + }, + want: sets.NewString("svc1", "svc2", "svc3"), + }, + { + name: "duplicate services", + services: []*corev1.Service{ + {ObjectMeta: metav1.ObjectMeta{Name: "svc1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "svc1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "svc1"}}, + }, + want: sets.NewString("svc1"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetNames(tt.services); !cmp.Equal(got, tt.want) { + t.Errorf("GetNames() (-want, +got) = %v", cmp.Diff(tt.want, got)) + } + }) + } +} + +func TestGetDesiredServiceNames(t *testing.T) { + var route *v1alpha1.Route + tests := []struct { + name string + traffic RouteOption + want sets.String + tmpl string + wantErr bool + }{{ + name: "no traffic defined", + want: sets.NewString("myroute"), + }, { + name: "only default traffic", + traffic: WithSpecTraffic(v1alpha1.TrafficTarget{TrafficTarget: v1.TrafficTarget{}}), + want: sets.NewString("myroute"), + }, { + name: "traffic targets with default and tags", + traffic: WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{}, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "hello", + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "hello", + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "bye", + }, + }, + ), + want: sets.NewString("myroute", "hello-myroute", "bye-myroute"), + }, { + name: "traffic targets with default and tags custom template", + tmpl: "{{.Name}}<=>{{.Tag}}", + traffic: WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{}, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "hello", + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "hello", + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "bye", + }, + }, + ), + want: sets.NewString("myroute", "myroute<=>hello", "myroute<=>bye"), + }, { + name: "bad tag template", + tmpl: "{{.Bullet}}<=>{{.WithButterflyWings}}", + traffic: WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "bye", + }, + }), + wantErr: true, + }, { + name: "traffic targets with NO default and tags", + traffic: WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "hello", + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "hello", + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "bye", + }, + }, + ), + want: sets.NewString("myroute", "hello-myroute", "bye-myroute"), + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := testConfig() + if tt.tmpl != "" { + cfg.Network.TagTemplate = tt.tmpl + } + ctx := config.ToContext(context.Background(), cfg) + + if tt.traffic != nil { + route = Route("default", "myroute", tt.traffic) + } else { + route = Route("default", "myroute") + } + got, err := GetDesiredServiceNames(ctx, route) + if (err != nil) != tt.wantErr { + t.Errorf("GetDesiredServiceNames() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !cmp.Equal(got, tt.want) { + t.Errorf("GetDesiredServiceNames() (-want, +got) = %v", cmp.Diff(tt.want, got)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/route.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/route.go new file mode 100644 index 0000000000..7dde261a4d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/route.go @@ -0,0 +1,612 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + "sort" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + kubelabels "k8s.io/apimachinery/pkg/labels" + "knative.dev/pkg/apis" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/system" + "knative.dev/pkg/tracker" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + clientset "knative.dev/serving/pkg/client/clientset/versioned" + networkinglisters "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" + networkaccessor "knative.dev/serving/pkg/reconciler/accessor/networking" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/domains" + "knative.dev/serving/pkg/reconciler/route/resources" + "knative.dev/serving/pkg/reconciler/route/resources/labels" + resourcenames "knative.dev/serving/pkg/reconciler/route/resources/names" + "knative.dev/serving/pkg/reconciler/route/traffic" +) + +// routeFinalizer is the name that we put into the resource finalizer list, e.g. +// metadata: +// finalizers: +// - routes.serving.knative.dev +var ( + routeResource = v1alpha1.Resource("routes") + routeFinalizer = routeResource.String() +) + +// Reconciler implements controller.Reconciler for Route resources. +type Reconciler struct { + *reconciler.Base + + // Listers index properties about resources + routeLister listers.RouteLister + configurationLister listers.ConfigurationLister + revisionLister listers.RevisionLister + serviceLister corev1listers.ServiceLister + ingressLister networkinglisters.IngressLister + certificateLister networkinglisters.CertificateLister + configStore reconciler.ConfigStore + tracker tracker.Interface + + clock system.Clock +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Route resource +// with the current status of the resource. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + ctx = c.configStore.ToContext(ctx) + ctx = controller.WithEventRecorder(ctx, c.Recorder) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + // Get the Route resource with this namespace/name. + original, err := c.routeLister.Routes(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logger.Info("Route in work queue no longer exists") + return nil + } else if err != nil { + return err + } + // Don't modify the informers copy. + route := original.DeepCopy() + + // Reconcile this copy of the route and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := c.reconcile(ctx, route) + if equality.Semantic.DeepEqual(original.Status, route.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + } else if err = c.updateStatus(original, route); err != nil { + logger.Warnw("Failed to update route status", zap.Error(err)) + c.Recorder.Eventf(route, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for Route %q: %v", route.Name, err) + return err + } + if reconcileErr != nil { + c.Recorder.Event(route, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + return reconcileErr + } + // TODO(mattmoor): Remove this after 0.7 cuts. + // If the spec has changed, then assume we need an upgrade and issue a patch to trigger + // the webhook to upgrade via defaulting. Status updates do not trigger this due to the + // use of the /status resource. + if !equality.Semantic.DeepEqual(original.Spec, route.Spec) { + routes := v1alpha1.SchemeGroupVersion.WithResource("routes") + if err := c.MarkNeedsUpgrade(routes, route.Namespace, route.Name); err != nil { + return err + } + } + return nil +} + +func ingressClassForRoute(ctx context.Context, r *v1alpha1.Route) string { + if ingressClass := r.Annotations[networking.IngressClassAnnotationKey]; ingressClass != "" { + return ingressClass + } + return config.FromContext(ctx).Network.DefaultIngressClass +} + +func certClass(ctx context.Context, r *v1alpha1.Route) string { + if class := r.Annotations[networking.CertificateClassAnnotationKey]; class != "" { + return class + } + return config.FromContext(ctx).Network.DefaultCertificateClass +} + +func (c *Reconciler) getServices(route *v1alpha1.Route) ([]*corev1.Service, error) { + currentServices, err := c.serviceLister.Services(route.Namespace).List(resources.SelectorFromRoute(route)) + if err != nil { + return nil, err + } + + serviceCopy := make([]*corev1.Service, len(currentServices)) + for i, svc := range currentServices { + serviceCopy[i] = svc.DeepCopy() + } + + return serviceCopy, err +} + +func (c *Reconciler) reconcile(ctx context.Context, r *v1alpha1.Route) error { + logger := logging.FromContext(ctx) + if r.GetDeletionTimestamp() != nil { + // Check for a DeletionTimestamp. If present, elide the normal reconcile logic. + return c.reconcileDeletion(ctx, r) + } + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + r.SetDefaults(v1.WithUpgradeViaDefaulting(ctx)) + r.Status.InitializeConditions() + + if err := r.ConvertUp(ctx, &v1beta1.Route{}); err != nil { + return err + } + + logger.Infof("Reconciling route: %#v", r) + + serviceNames, err := c.getServiceNames(ctx, r) + if err != nil { + return err + } + + if err := c.updateRouteStatusURL(ctx, r, serviceNames.clusterLocal()); err != nil { + return err + } + + // Configure traffic based on the RouteSpec. + traffic, err := c.configureTraffic(ctx, r, serviceNames.desiredClusterLocalServiceNames) + if traffic == nil || err != nil { + // Traffic targets aren't ready, no need to configure child resources. + // Need to update ObservedGeneration, otherwise Route's Ready state won't + // be propagated to Service and the Service's RoutesReady will stay in + // 'Unknown'. + r.Status.ObservedGeneration = r.Generation + return err + } + + logger.Info("Updating targeted revisions.") + // In all cases we will add annotations to the referred targets. This is so that when they become + // routable we can know (through a listener) and attempt traffic configuration again. + if err := c.reconcileTargetRevisions(ctx, traffic, r); err != nil { + return err + } + + r.Status.Address = &duckv1alpha1.Addressable{ + Addressable: duckv1beta1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: resourcenames.K8sServiceFullname(r), + }, + }, + } + + logger.Info("Creating placeholder k8s services") + services, err := c.reconcilePlaceholderServices(ctx, r, traffic.Targets, serviceNames.existing()) + if err != nil { + return err + } + + clusterLocalServiceNames := serviceNames.clusterLocal() + tls, acmeChallenges, err := c.tls(ctx, r.Status.URL.Host, r, traffic, clusterLocalServiceNames) + if err != nil { + return err + } + + // Reconcile ingress and its children resources. + ingress, err := c.reconcileIngressResources(ctx, r, traffic, tls, clusterLocalServiceNames, ingressClassForRoute(ctx, r), acmeChallenges...) + + if err != nil { + return err + } + + if ingress.GetObjectMeta().GetGeneration() != ingress.Status.ObservedGeneration || !ingress.Status.IsReady() { + r.Status.MarkIngressNotConfigured() + } else { + r.Status.PropagateIngressStatus(ingress.Status) + } + + logger.Info("Updating placeholder k8s services with ingress information") + if err := c.updatePlaceholderServices(ctx, r, services, ingress); err != nil { + return err + } + + r.Status.ObservedGeneration = r.Generation + logger.Info("Route successfully synced") + return nil +} + +func (c *Reconciler) reconcileIngressResources(ctx context.Context, r *v1alpha1.Route, tc *traffic.Config, tls []netv1alpha1.IngressTLS, + clusterLocalServices sets.String, ingressClass string, acmeChallenges ...netv1alpha1.HTTP01Challenge) (*netv1alpha1.Ingress, error) { + + desired, err := resources.MakeIngress(ctx, r, tc, tls, clusterLocalServices, ingressClass, acmeChallenges...) + if err != nil { + return nil, err + } + + ingress, err := c.reconcileIngress(ctx, r, desired) + if err != nil { + return nil, err + } + + return ingress, nil +} + +func (c *Reconciler) tls(ctx context.Context, host string, r *v1alpha1.Route, traffic *traffic.Config, clusterLocalServiceNames sets.String) ([]netv1alpha1.IngressTLS, []netv1alpha1.HTTP01Challenge, error) { + tls := []netv1alpha1.IngressTLS{} + if !config.FromContext(ctx).Network.AutoTLS { + return tls, nil, nil + } + domainToTagMap, err := domains.GetAllDomainsAndTags(ctx, r, getTrafficNames(traffic.Targets), clusterLocalServiceNames) + if err != nil { + return nil, nil, err + } + + for domain := range domainToTagMap { + if domains.IsClusterLocal(domain) { + delete(domainToTagMap, domain) + } + } + + routeDomain := config.FromContext(ctx).Domain.LookupDomainForLabels(r.Labels) + labelSelector := kubelabels.SelectorFromSet( + kubelabels.Set{ + networking.WildcardCertDomainLabelKey: routeDomain, + }, + ) + + allWildcardCerts, err := c.certificateLister.Certificates(r.Namespace).List(labelSelector) + if err != nil { + return nil, nil, err + } + + acmeChallenges := []netv1alpha1.HTTP01Challenge{} + desiredCerts := resources.MakeCertificates(r, domainToTagMap, certClass(ctx, r)) + for _, desiredCert := range desiredCerts { + dnsNames := sets.NewString(desiredCert.Spec.DNSNames...) + // Look for a matching wildcard cert before provisioning a new one. This saves the + // the time required to provision a new cert and reduces the chances of hitting the + // Let's Encrypt API rate limits. + cert := findMatchingWildcardCert(ctx, desiredCert.Spec.DNSNames, allWildcardCerts) + + if cert == nil { + cert, err = networkaccessor.ReconcileCertificate(ctx, r, desiredCert, c) + if err != nil { + if kaccessor.IsNotOwned(err) { + r.Status.MarkCertificateNotOwned(desiredCert.Name) + } else { + r.Status.MarkCertificateProvisionFailed(desiredCert.Name) + } + return nil, nil, err + } + dnsNames = sets.NewString(cert.Spec.DNSNames...) + } + + // r.Status.URL is for the major domain, so only change if the cert is for + // the major domain + if dnsNames.Has(host) { + r.Status.URL.Scheme = "https" + } + // TODO: we should only mark https for the public visible targets when + // we are able to configure visibility per target. + setTargetsScheme(&r.Status, dnsNames.List(), "https") + if cert.Status.IsReady() { + r.Status.MarkCertificateReady(cert.Name) + } else { + acmeChallenges = append(acmeChallenges, cert.Status.HTTP01Challenges...) + r.Status.MarkCertificateNotReady(cert.Name) + // When httpProtocol is enabled, downward http scheme. + if config.FromContext(ctx).Network.HTTPProtocol == network.HTTPEnabled { + if dnsNames.Has(host) { + r.Status.URL = &apis.URL{ + Scheme: "http", + Host: host, + } + } + setTargetsScheme(&r.Status, dnsNames.List(), "http") + } + } + tls = append(tls, resources.MakeIngressTLS(cert, dnsNames.List())) + } + sort.Slice(acmeChallenges, func(i, j int) bool { + return acmeChallenges[i].URL.String() < acmeChallenges[j].URL.String() + }) + return tls, acmeChallenges, nil +} + +func (c *Reconciler) reconcileDeletion(ctx context.Context, r *v1alpha1.Route) error { + logger := logging.FromContext(ctx) + + // If our Finalizer is first, delete the Ingress for this Route + // and remove the finalizer. + if len(r.Finalizers) == 0 || r.Finalizers[0] != routeFinalizer { + return nil + } + + // Delete the Ingress resources for this Route. + logger.Info("Cleaning up Ingress") + if err := c.deleteIngressForRoute(r); err != nil { + return err + } + + // Update the Route to remove the Finalizer. + logger.Info("Removing Finalizer") + r.Finalizers = r.Finalizers[1:] + _, err := c.ServingClientSet.ServingV1alpha1().Routes(r.Namespace).Update(r) + return err +} + +// configureTraffic attempts to configure traffic based on the RouteSpec. If there are missing +// targets (e.g. Configurations without a Ready Revision, or Revision that isn't Ready or Inactive), +// no traffic will be configured. +// +// If traffic is configured we update the RouteStatus with AllTrafficAssigned = True. Otherwise we +// mark AllTrafficAssigned = False, with a message referring to one of the missing target. +func (c *Reconciler) configureTraffic(ctx context.Context, r *v1alpha1.Route, clusterLocalServices sets.String) (*traffic.Config, error) { + logger := logging.FromContext(ctx) + t, err := traffic.BuildTrafficConfiguration(c.configurationLister, c.revisionLister, r) + + if t == nil { + return nil, err + } + + // Tell our trackers to reconcile Route whenever the things referred to by our + // traffic stanza change. We also track missing targets since there may be + // race conditions were routes are reconciled before their targets appear + // in the informer cache + for _, obj := range t.MissingTargets { + if err := c.tracker.Track(obj, r); err != nil { + return nil, err + } + } + for _, configuration := range t.Configurations { + if err := c.tracker.Track(objectRef(configuration), r); err != nil { + return nil, err + } + } + for _, revision := range t.Revisions { + if revision.Status.IsActivationRequired() { + logger.Infof("Revision %s/%s is inactive", revision.Namespace, revision.Name) + } + if err := c.tracker.Track(objectRef(revision), r); err != nil { + return nil, err + } + } + + badTarget, isTargetError := err.(traffic.TargetError) + if err != nil && !isTargetError { + // An error that's not due to missing traffic target should + // make us fail fast. + r.Status.MarkUnknownTrafficError(err.Error()) + return nil, err + } + if badTarget != nil && isTargetError { + logger.Infof("Marking bad traffic target: %v", badTarget) + badTarget.MarkBadTrafficTarget(&r.Status) + + // Traffic targets aren't ready, no need to configure Route. + return nil, nil + } + + logger.Info("All referred targets are routable, marking AllTrafficAssigned with traffic information.") + // Domain should already be present + r.Status.Traffic, err = t.GetRevisionTrafficTargets(ctx, r, clusterLocalServices) + if err != nil { + return nil, err + } + + r.Status.MarkTrafficAssigned() + + return t, nil +} + +func (c *Reconciler) updateRouteStatusURL(ctx context.Context, route *v1alpha1.Route, clusterLocalServices sets.String) error { + mainRouteServiceName, err := domains.HostnameFromTemplate(ctx, route.Name, "") + if err != nil { + return err + } + + mainRouteMeta := route.ObjectMeta.DeepCopy() + isClusterLocal := clusterLocalServices.Has(mainRouteServiceName) || labels.IsObjectLocalVisibility(route.ObjectMeta) + labels.SetVisibility(mainRouteMeta, isClusterLocal) + + host, err := domains.DomainNameFromTemplate(ctx, *mainRouteMeta, route.Name) + if err != nil { + return err + } + + route.Status.URL = &apis.URL{ + Scheme: "http", + Host: host, + } + + return nil +} + +func (c *Reconciler) getServiceNames(ctx context.Context, route *v1alpha1.Route) (*serviceNames, error) { + // Populate existing service name sets + existingServices, err := c.getServices(route) + if err != nil { + return nil, err + } + existingServiceNames := resources.GetNames(existingServices) + existingClusterLocalServices := resources.FilterService(existingServices, resources.IsClusterLocalService) + existingClusterLocalServiceNames := resources.GetNames(existingClusterLocalServices) + existingPublicServiceNames := existingServiceNames.Difference(existingClusterLocalServiceNames) + + // Populate desired service name sets + desiredServiceNames, err := resources.GetDesiredServiceNames(ctx, route) + if err != nil { + return nil, err + } + if labels.IsObjectLocalVisibility(route.ObjectMeta) { + return &serviceNames{ + existingPublicServiceNames: existingPublicServiceNames, + existingClusterLocalServiceNames: existingClusterLocalServiceNames, + desiredPublicServiceNames: sets.NewString(), + desiredClusterLocalServiceNames: desiredServiceNames, + }, nil + } + desiredPublicServiceNames := desiredServiceNames.Intersection(existingPublicServiceNames) + desiredClusterLocalServiceNames := desiredServiceNames.Intersection(existingClusterLocalServiceNames) + + // Any new desired services will follow the default route visibility, which is public. + serviceWithDefaultVisibility := desiredServiceNames.Difference(existingServiceNames) + desiredPublicServiceNames = desiredPublicServiceNames.Union(serviceWithDefaultVisibility) + + return &serviceNames{ + existingPublicServiceNames: existingPublicServiceNames, + existingClusterLocalServiceNames: existingClusterLocalServiceNames, + desiredPublicServiceNames: desiredPublicServiceNames, + desiredClusterLocalServiceNames: desiredClusterLocalServiceNames, + }, nil +} + +// GetServingClient returns the client to access Knative serving resources. +func (c *Reconciler) GetServingClient() clientset.Interface { + return c.ServingClientSet +} + +// GetCertificateLister returns the lister for Knative Certificate. +func (c *Reconciler) GetCertificateLister() networkinglisters.CertificateLister { + return c.certificateLister +} + +///////////////////////////////////////// +// Misc helpers. +///////////////////////////////////////// + +type serviceNames struct { + existingPublicServiceNames sets.String + existingClusterLocalServiceNames sets.String + desiredPublicServiceNames sets.String + desiredClusterLocalServiceNames sets.String +} + +func (sn serviceNames) existing() sets.String { + return sn.existingPublicServiceNames.Union(sn.existingClusterLocalServiceNames) +} + +func (sn serviceNames) clusterLocal() sets.String { + return sn.existingClusterLocalServiceNames.Union(sn.desiredClusterLocalServiceNames) +} + +type accessor interface { + GetGroupVersionKind() schema.GroupVersionKind + GetNamespace() string + GetName() string +} + +func objectRef(a accessor) corev1.ObjectReference { + gvk := a.GetGroupVersionKind() + apiVersion, kind := gvk.ToAPIVersionAndKind() + return corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Namespace: a.GetNamespace(), + Name: a.GetName(), + } +} + +func getTrafficNames(targets map[string]traffic.RevisionTargets) []string { + names := []string{} + for name := range targets { + names = append(names, name) + } + return names +} + +// Sets the traffic URL scheme to scheme if the URL matches the dnsNames. +// dnsNames are DNS names under a certificate for a particular domain, and so only change +// the corresponding traffic under the route, rather than all traffic +func setTargetsScheme(rs *v1alpha1.RouteStatus, dnsNames []string, scheme string) { + for i := range rs.Traffic { + if rs.Traffic[i].URL == nil { + continue + } + for _, dnsName := range dnsNames { + if rs.Traffic[i].URL.Host == dnsName { + rs.Traffic[i].URL.Scheme = scheme + break + } + } + } +} + +func findMatchingWildcardCert(ctx context.Context, domains []string, certs []*netv1alpha1.Certificate) *netv1alpha1.Certificate { + for _, cert := range certs { + if wildcardCertMatches(ctx, domains, cert) { + return cert + } + } + return nil +} + +func wildcardCertMatches(ctx context.Context, domains []string, cert *netv1alpha1.Certificate) bool { + dnsNames := sets.NewString() + logger := logging.FromContext(ctx) + + for _, dns := range cert.Spec.DNSNames { + dnsParts := strings.SplitAfterN(dns, ".", 2) + if len(dnsParts) < 2 { + logger.Infof("got non-FQDN DNSName %s in certificate %s", dns, cert.Name) + continue + } + dnsNames.Insert(dnsParts[1]) + } + for _, domain := range domains { + domainParts := strings.SplitAfterN(domain, ".", 2) + if len(domainParts) < 2 || !dnsNames.Has(domainParts[1]) { + return false + } + } + + return true +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/route_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/route_test.go new file mode 100644 index 0000000000..41925416f0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/route_test.go @@ -0,0 +1,1070 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + // Inject the informers this controller depends on. + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + _ "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/certificate/fake" + fakeingressinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/ingress/fake" + fakecfginformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake" + fakerevisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + fakerouteinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake" + + "github.com/google/go-cmp/cmp" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/domains" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const ( + testNamespace = "test" + defaultDomainSuffix = "test-domain.dev" + prodDomainSuffix = "prod-domain.com" +) + +func getTestRouteWithTrafficTargets(trafficTarget RouteOption) *v1alpha1.Route { + return Route(testNamespace, "test-route", WithRouteLabel(map[string]string{"route": "test-route"}), trafficTarget) +} + +func getTestRevision(name string) *v1alpha1.Revision { + return getTestRevisionWithCondition(name, apis.Condition{ + Type: v1alpha1.RevisionConditionReady, + Status: corev1.ConditionTrue, + Reason: "ServiceReady", + }) +} + +func getTestRevisionWithCondition(name string, cond apis.Condition) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: fmt.Sprintf("/apis/serving/v1alpha1/namespaces/test/revisions/%s", name), + Name: name, + Namespace: testNamespace, + }, + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "test-image", + }, + }, + Status: v1alpha1.RevisionStatus{ + ServiceName: name, + Status: duckv1.Status{ + Conditions: duckv1.Conditions{cond}, + }, + }, + } +} + +func getTestConfiguration() *v1alpha1.Configuration { + return &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/revisiontemplates/test-config", + Name: "test-config", + Namespace: testNamespace, + }, + Spec: v1alpha1.ConfigurationSpec{ + // This is a workaround for generation initialization + DeprecatedGeneration: 1, + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "test-image", + }, + }, + }, + }, + } +} + +func getTestRevisionForConfig(config *v1alpha1.Configuration) *v1alpha1.Revision { + rev := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + SelfLink: "/apis/serving/v1alpha1/namespaces/test/revisions/p-deadbeef", + Name: "p-deadbeef", + Namespace: testNamespace, + Labels: map[string]string{ + serving.ConfigurationLabelKey: config.Name, + }, + }, + Spec: *config.Spec.GetTemplate().Spec.DeepCopy(), + Status: v1alpha1.RevisionStatus{ + ServiceName: "p-deadbeef", + }, + } + rev.Status.MarkResourcesAvailableTrue() + rev.Status.MarkContainerHealthyTrue() + return rev +} + +func newTestReconciler(t *testing.T, configs ...*corev1.ConfigMap) ( + ctx context.Context, + informers []controller.Informer, + reconciler *Reconciler, + configMapWatcher *configmap.ManualWatcher, + cf context.CancelFunc) { + ctx, informers, _, reconciler, configMapWatcher, cf = newTestSetup(t) + return +} + +func newTestSetup(t *testing.T, configs ...*corev1.ConfigMap) ( + ctx context.Context, + informers []controller.Informer, + ctrl *controller.Impl, + reconciler *Reconciler, + configMapWatcher *configmap.ManualWatcher, + cf context.CancelFunc) { + + ctx, cf, informers = SetupFakeContextWithCancel(t) + configMapWatcher = &configmap.ManualWatcher{Namespace: system.Namespace()} + ctrl = NewController(ctx, configMapWatcher) + reconciler = ctrl.Reconciler.(*Reconciler) + + cms := append([]*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + defaultDomainSuffix: "", + prodDomainSuffix: "selector:\n app: prod", + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: network.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: gc.ConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{}, + }}, configs...) + + for _, cfg := range cms { + configMapWatcher.OnChange(cfg) + } + return +} + +func getRouteIngressFromClient(ctx context.Context, t *testing.T, route *v1alpha1.Route) *netv1alpha1.Ingress { + opts := metav1.ListOptions{ + LabelSelector: labels.Set(map[string]string{ + serving.RouteLabelKey: route.Name, + serving.RouteNamespaceLabelKey: route.Namespace, + }).AsSelector().String(), + } + ingresses, err := fakeservingclient.Get(ctx).NetworkingV1alpha1().Ingresses(route.Namespace).List(opts) + if err != nil { + t.Errorf("Ingress.Get(%v) = %v", opts, err) + } + + if len(ingresses.Items) != 1 { + t.Errorf("Ingress.Get(%v), expect 1 instance, but got %d", opts, len(ingresses.Items)) + } + + return &ingresses.Items[0] +} +func getCertificateFromClient(t *testing.T, ctx context.Context, desired *netv1alpha1.Certificate) *netv1alpha1.Certificate { + created, err := fakeservingclient.Get(ctx).NetworkingV1alpha1().Certificates(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Certificates(%s).Get(%s) = %v", desired.Namespace, desired.Name, err) + } + return created +} + +func addResourcesToInformers(t *testing.T, ctx context.Context, route *v1alpha1.Route) { + t.Helper() + + ns := route.Namespace + + route, err := fakeservingclient.Get(ctx).ServingV1alpha1().Routes(ns).Get(route.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Route.Get(%v) = %v", route.Name, err) + } + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + + if ci := getRouteIngressFromClient(ctx, t, route); ci != nil { + fakeingressinformer.Get(ctx).Informer().GetIndexer().Add(ci) + } + ingress := getRouteIngressFromClient(ctx, t, route) + fakeingressinformer.Get(ctx).Informer().GetIndexer().Add(ingress) +} + +// Test the only revision in the route is in Reserve (inactive) serving status. +func TestCreateRouteForOneReserveRevision(t *testing.T) { + ctx, _, reconciler, _, cf := newTestReconciler(t) + defer cf() + + fakeRecorder := reconciler.Base.Recorder.(*record.FakeRecorder) + + // An inactive revision + rev := getTestRevision("test-rev") + rev.Status.MarkActiveFalse("NoTraffic", "no message") + + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // A route targeting the revision + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "test-rev", + ConfigurationName: "test-config", + Percent: ptr.Int64(100), + }, + })) + fakeservingclient.Get(ctx).ServingV1alpha1().Routes(testNamespace).Create(route) + // Since Reconcile looks in the lister, we need to add it to the informer + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + + ci := getRouteIngressFromClient(ctx, t, route) + + // Check labels + expectedLabels := map[string]string{ + serving.RouteLabelKey: route.Name, + serving.RouteNamespaceLabelKey: route.Namespace, + "route": "test-route", + } + if diff := cmp.Diff(expectedLabels, ci.Labels); diff != "" { + t.Errorf("Unexpected label diff (-want +got): %v", diff) + } + + domain := strings.Join([]string{route.Name, route.Namespace, defaultDomainSuffix}, ".") + expectedSpec := netv1alpha1.IngressSpec{ + Visibility: netv1alpha1.IngressVisibilityExternalIP, + TLS: []netv1alpha1.IngressTLS{}, + Rules: []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test.svc.cluster.local", + domain, + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: rev.Status.ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": "test-rev", + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }}, + } + if diff := cmp.Diff(expectedSpec, ci.Spec); diff != "" { + t.Errorf("Unexpected rule spec diff (-want +got): %s", diff) + } + + // Update ingress loadbalancer to trigger placeholder service creation. + ci.Status = netv1alpha1.IngressStatus{ + LoadBalancer: &netv1alpha1.LoadBalancerStatus{ + Ingress: []netv1alpha1.LoadBalancerIngressStatus{{ + DomainInternal: "test-domain", + }}, + }, + } + fakeingressinformer.Get(ctx).Informer().GetIndexer().Update(ci) + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + + // Look for the events. Events are delivered asynchronously so we need to use + // hooks here. Each hook tests for a specific event. + select { + case got := <-fakeRecorder.Events: + const want = `Normal Created Created placeholder service "test-route"` + if got != want { + t.Errorf("<-Events = %s, wanted %s", got, want) + } + case <-time.After(3 * time.Second): + t.Error("timed out waiting for expected events.") + } + select { + case got := <-fakeRecorder.Events: + const wantPrefix = `Normal Created Created Ingress` + if !strings.HasPrefix(got, wantPrefix) { + t.Errorf("<-Events = %s, wanted prefix %s", got, wantPrefix) + } + case <-time.After(3 * time.Second): + t.Error("timed out waiting for expected events.") + } +} + +func TestCreateRouteWithMultipleTargets(t *testing.T) { + ctx, informers, reconciler, _, cf := newTestReconciler(t) + wicb, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Error starting informers: %v", err) + } + defer func() { + cf() + wicb() + }() + // A standalone revision + rev := getTestRevision("test-rev") + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // A configuration and associated revision. Normally the revision would be + // created by the configuration reconciler. + config := getTestConfiguration() + cfgrev := getTestRevisionForConfig(config) + config.Status.SetLatestCreatedRevisionName(cfgrev.Name) + config.Status.SetLatestReadyRevisionName(cfgrev.Name) + fakeservingclient.Get(ctx).ServingV1alpha1().Configurations(testNamespace).Create(config) + // Since Reconcile looks in the lister, we need to add it to the informer + fakecfginformer.Get(ctx).Informer().GetIndexer().Add(config) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(cfgrev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(cfgrev) + + // A route targeting both the config and standalone revision. + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: config.Name, + Percent: ptr.Int64(90), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: rev.Name, + Percent: ptr.Int64(10), + }, + })) + fakeservingclient.Get(ctx).ServingV1alpha1().Routes(testNamespace).Create(route) + // Since Reconcile looks in the lister, we need to add it to the informer. + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + + ci := getRouteIngressFromClient(ctx, t, route) + domain := strings.Join([]string{route.Name, route.Namespace, defaultDomainSuffix}, ".") + expectedSpec := netv1alpha1.IngressSpec{ + Visibility: netv1alpha1.IngressVisibilityExternalIP, + TLS: []netv1alpha1.IngressTLS{}, + Rules: []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test.svc.cluster.local", + domain, + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: cfgrev.Status.ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 90, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": cfgrev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }, { + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: rev.Status.ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 10, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }}, + } + + if diff := cmp.Diff(expectedSpec, ci.Spec); diff != "" { + t.Errorf("Unexpected rule spec diff (-want +got): %v", diff) + } +} + +// Test one out of multiple target revisions is in Reserve serving state. +func TestCreateRouteWithOneTargetReserve(t *testing.T) { + ctx, _, reconciler, _, cf := newTestReconciler(t) + defer cf() + // A standalone inactive revision + rev := getTestRevision("test-rev") + rev.Status.MarkActiveFalse("NoTraffic", "no message") + + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // A configuration and associated revision. Normally the revision would be + // created by the configuration reconciler. + config := getTestConfiguration() + cfgrev := getTestRevisionForConfig(config) + config.Status.SetLatestCreatedRevisionName(cfgrev.Name) + config.Status.SetLatestReadyRevisionName(cfgrev.Name) + fakeservingclient.Get(ctx).ServingV1alpha1().Configurations(testNamespace).Create(config) + // Since Reconcile looks in the lister, we need to add it to the informer + fakecfginformer.Get(ctx).Informer().GetIndexer().Add(config) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(cfgrev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(cfgrev) + + // A route targeting both the config and standalone revision + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: config.Name, + Percent: ptr.Int64(90), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: rev.Name, + ConfigurationName: "test-config", + Percent: ptr.Int64(10), + }, + })) + fakeservingclient.Get(ctx).ServingV1alpha1().Routes(testNamespace).Create(route) + // Since Reconcile looks in the lister, we need to add it to the informer + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + + ci := getRouteIngressFromClient(ctx, t, route) + domain := strings.Join([]string{route.Name, route.Namespace, defaultDomainSuffix}, ".") + expectedSpec := netv1alpha1.IngressSpec{ + Visibility: netv1alpha1.IngressVisibilityExternalIP, + TLS: []netv1alpha1.IngressTLS{}, + Rules: []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test.svc.cluster.local", + domain, + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: cfgrev.Status.ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 90, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": cfgrev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }, { + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: rev.Status.ServiceName, + ServicePort: intstr.FromInt(80), + }, + Percent: 10, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }}, + } + if diff := cmp.Diff(expectedSpec, ci.Spec); diff != "" { + t.Errorf("Unexpected rule spec diff (-want +got): %v", diff) + } +} + +func TestCreateRouteWithDuplicateTargets(t *testing.T) { + ctx, _, reconciler, _, cf := newTestReconciler(t) + defer cf() + + // A standalone revision + rev := getTestRevision("test-rev") + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // A configuration and associated revision. Normally the revision would be + // created by the configuration reconciler. + config := getTestConfiguration() + cfgrev := getTestRevisionForConfig(config) + config.Status.SetLatestCreatedRevisionName(cfgrev.Name) + config.Status.SetLatestReadyRevisionName(cfgrev.Name) + fakeservingclient.Get(ctx).ServingV1alpha1().Configurations(testNamespace).Create(config) + // Since Reconcile looks in the lister, we need to add it to the informer + fakecfginformer.Get(ctx).Informer().GetIndexer().Add(config) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(cfgrev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(cfgrev) + + // A route with duplicate targets. These will be deduped. + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "test-config", + Percent: ptr.Int64(30), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "test-config", + Percent: ptr.Int64(20), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "test-rev", + Percent: ptr.Int64(10), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "test-rev", + Percent: ptr.Int64(5), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "test-revision-1", + RevisionName: "test-rev", + Percent: ptr.Int64(10), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "test-revision-1", + RevisionName: "test-rev", + Percent: ptr.Int64(10), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "test-revision-2", + RevisionName: "test-rev", + Percent: ptr.Int64(15), + }, + })) + fakeservingclient.Get(ctx).ServingV1alpha1().Routes(testNamespace).Create(route) + // Since Reconcile looks in the lister, we need to add it to the informer + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + + ci := getRouteIngressFromClient(ctx, t, route) + domain := strings.Join([]string{route.Name, route.Namespace, defaultDomainSuffix}, ".") + expectedSpec := netv1alpha1.IngressSpec{ + Visibility: netv1alpha1.IngressVisibilityExternalIP, + TLS: []netv1alpha1.IngressTLS{}, + Rules: []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test.svc.cluster.local", + domain, + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: cfgrev.Name, + ServicePort: intstr.FromInt(80), + }, + Percent: 50, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": cfgrev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }, { + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: rev.Name, + ServicePort: intstr.FromInt(80), + }, + Percent: 50, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "test-revision-1-test-route.test.svc.cluster.local", + "test-revision-1-test-route.test.test-domain.dev", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: "test-rev", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "test-revision-2-test-route.test.svc.cluster.local", + "test-revision-2-test-route.test.test-domain.dev", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: "test-rev", + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }}, + } + + if diff := cmp.Diff(expectedSpec, ci.Spec); diff != "" { + fmt.Printf("%+v\n", ci.Spec) + t.Errorf("Unexpected rule spec diff (-want +got): %v", diff) + } +} + +func TestCreateRouteWithNamedTargets(t *testing.T) { + ctx, _, reconciler, _, cf := newTestReconciler(t) + defer cf() + // A standalone revision + rev := getTestRevision("test-rev") + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(rev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(rev) + + // A configuration and associated revision. Normally the revision would be + // created by the configuration reconciler. + config := getTestConfiguration() + cfgrev := getTestRevisionForConfig(config) + config.Status.SetLatestCreatedRevisionName(cfgrev.Name) + config.Status.SetLatestReadyRevisionName(cfgrev.Name) + fakeservingclient.Get(ctx).ServingV1alpha1().Configurations(testNamespace).Create(config) + // Since Reconcile looks in the lister, we need to add it to the informer + fakecfginformer.Get(ctx).Informer().GetIndexer().Add(config) + fakeservingclient.Get(ctx).ServingV1alpha1().Revisions(testNamespace).Create(cfgrev) + fakerevisioninformer.Get(ctx).Informer().GetIndexer().Add(cfgrev) + + // A route targeting both the config and standalone revision with named + // targets + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "foo", + RevisionName: "test-rev", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "bar", + ConfigurationName: "test-config", + Percent: ptr.Int64(50), + }, + })) + + fakeservingclient.Get(ctx).ServingV1alpha1().Routes(testNamespace).Create(route) + // Since Reconcile looks in the lister, we need to add it to the informer + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + + ci := getRouteIngressFromClient(ctx, t, route) + domain := strings.Join([]string{route.Name, route.Namespace, defaultDomainSuffix}, ".") + expectedSpec := netv1alpha1.IngressSpec{ + Visibility: netv1alpha1.IngressVisibilityExternalIP, + TLS: []netv1alpha1.IngressTLS{}, + Rules: []netv1alpha1.IngressRule{{ + Hosts: []string{ + "test-route.test.svc.cluster.local", + domain, + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: rev.Name, + ServicePort: intstr.FromInt(80), + }, + Percent: 50, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }, { + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: cfgrev.Name, + ServicePort: intstr.FromInt(80), + }, + Percent: 50, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": cfgrev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "bar-test-route.test.svc.cluster.local", + "bar-test-route.test.test-domain.dev", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: cfgrev.Name, + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": cfgrev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }, { + Hosts: []string{ + "foo-test-route.test.svc.cluster.local", + "foo-test-route.test.test-domain.dev", + }, + HTTP: &netv1alpha1.HTTPIngressRuleValue{ + Paths: []netv1alpha1.HTTPIngressPath{{ + Splits: []netv1alpha1.IngressBackendSplit{{ + IngressBackend: netv1alpha1.IngressBackend{ + ServiceNamespace: testNamespace, + ServiceName: rev.Name, + ServicePort: intstr.FromInt(80), + }, + Percent: 100, + AppendHeaders: map[string]string{ + "Knative-Serving-Revision": rev.Name, + "Knative-Serving-Namespace": testNamespace, + }, + }}, + }}, + }, + Visibility: netv1alpha1.IngressVisibilityExternalIP, + }}, + } + + if diff := cmp.Diff(expectedSpec, ci.Spec); diff != "" { + fmt.Printf("%+v\n", ci.Spec) + t.Errorf("Unexpected rule spec diff (-want +got): %v", diff) + } +} + +func TestUpdateDomainConfigMap(t *testing.T) { + ctx, _, reconciler, watcher, cf := newTestReconciler(t) + defer cf() + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + routeClient := fakeservingclient.Get(ctx).ServingV1alpha1().Routes(route.Namespace) + + // Create a route. + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + routeClient.Create(route) + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + addResourcesToInformers(t, ctx, route) + + route.ObjectMeta.Labels = map[string]string{"app": "prod"} + + // Test changes in domain config map. Routes should get updated appropriately. + expectations := []struct { + apply func() + expectedDomainSuffix string + }{{ + expectedDomainSuffix: prodDomainSuffix, + apply: func() {}, + }, { + expectedDomainSuffix: "mytestdomain.com", + apply: func() { + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + defaultDomainSuffix: "", + "mytestdomain.com": "selector:\n app: prod", + }, + } + watcher.OnChange(&domainConfig) + }, + }, { + expectedDomainSuffix: "newdefault.net", + apply: func() { + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "newdefault.net": "", + "mytestdomain.com": "selector:\n app: prod", + }, + } + watcher.OnChange(&domainConfig) + route.Labels = make(map[string]string) + }, + }, { + // When no domain with an open selector is specified, we fallback + // on the default of example.com. + expectedDomainSuffix: config.DefaultDomain, + apply: func() { + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + "mytestdomain.com": "selector:\n app: prod", + }, + } + watcher.OnChange(&domainConfig) + route.Labels = make(map[string]string) + }, + }} + + for _, expectation := range expectations { + t.Run(expectation.expectedDomainSuffix, func(t *testing.T) { + expectation.apply() + fakerouteinformer.Get(ctx).Informer().GetIndexer().Add(route) + routeClient.Update(route) + reconciler.Reconcile(context.Background(), KeyOrDie(route)) + addResourcesToInformers(t, ctx, route) + + route, _ = routeClient.Get(route.Name, metav1.GetOptions{}) + expectedDomain := fmt.Sprintf("%s.%s.%s", route.Name, route.Namespace, expectation.expectedDomainSuffix) + if route.Status.URL.Host != expectedDomain { + t.Errorf("Expected domain %q but saw %q", expectedDomain, route.Status.URL.Host) + } + }) + } +} + +func TestGlobalResyncOnUpdateDomainConfigMap(t *testing.T) { + // Test changes in domain config map. Routes should get updated appropriately. + // We're expecting exactly one route modification per config-map change. + tests := []struct { + doThings func(*configmap.ManualWatcher) + expectedDomainSuffix string + }{{ + expectedDomainSuffix: prodDomainSuffix, + doThings: func(*configmap.ManualWatcher) {}, // The update will still happen: status will be updated to match the route labels + }, { + expectedDomainSuffix: "mytestdomain.com", + doThings: func(watcher *configmap.ManualWatcher) { + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + defaultDomainSuffix: "", + "mytestdomain.com": "selector:\n app: prod", + }, + } + watcher.OnChange(&domainConfig) + }, + }, { + expectedDomainSuffix: "newprod.net", + doThings: func(watcher *configmap.ManualWatcher) { + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + defaultDomainSuffix: "", + "newprod.net": "selector:\n app: prod", + }, + } + watcher.OnChange(&domainConfig) + }, + }, { + expectedDomainSuffix: defaultDomainSuffix, + doThings: func(watcher *configmap.ManualWatcher) { + domainConfig := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.DomainConfigName, + Namespace: system.Namespace(), + }, + Data: map[string]string{ + defaultDomainSuffix: "", + }, + } + watcher.OnChange(&domainConfig) + }, + }} + + for _, test := range tests { + test := test + t.Run(test.expectedDomainSuffix, func(t *testing.T) { + ctx, informers, ctrl, _, watcher, cf := newTestSetup(t) + + grp := errgroup.Group{} + + servingClient := fakeservingclient.Get(ctx) + h := NewHooks() + + // Check for Ingress created as a signal that syncHandler ran + h.OnUpdate(&servingClient.Fake, "routes", func(obj runtime.Object) HookResult { + rt := obj.(*v1alpha1.Route) + t.Logf("route updated: %q", rt.Name) + + expectedDomain := fmt.Sprintf("%s.%s.%s", rt.Name, rt.Namespace, test.expectedDomainSuffix) + if rt.Status.URL.Host != expectedDomain { + t.Logf("Expected domain %q but saw %q", expectedDomain, rt.Status.URL.Host) + return HookIncomplete + } + + return HookComplete + }) + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Failed to start informers: %v", err) + } + defer func() { + cf() + if err := grp.Wait(); err != nil { + t.Errorf("Wait() = %v", err) + } + waitInformers() + }() + + if err := watcher.Start(ctx.Done()); err != nil { + t.Fatalf("failed to start configuration manager: %v", err) + } + + grp.Go(func() error { return ctrl.Run(1, ctx.Done()) }) + + // Create a route. + route := getTestRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + route.Labels = map[string]string{"app": "prod"} + + servingClient.ServingV1alpha1().Routes(route.Namespace).Create(route) + + test.doThings(watcher) + + if err := h.WaitForHooks(3 * time.Second); err != nil { + t.Error(err) + } + }) + } +} + +func TestRouteDomain(t *testing.T) { + route := Route("default", "myapp", WithRouteLabel(map[string]string{"route": "myapp"}), WithRouteAnnotation(map[string]string{"sub": "mysub"})) + ctx := context.Background() + cfg := ReconcilerTestConfig(false) + ctx = config.ToContext(ctx, cfg) + + tests := []struct { + Name string + Template string + Pass bool + Expected string + }{{ + Name: "Default", + Template: "{{.Name}}.{{.Namespace}}.{{.Domain}}", + Pass: true, + Expected: "myapp.default.example.com", + }, { + Name: "Dash", + Template: "{{.Name}}-{{.Namespace}}.{{.Domain}}", + Pass: true, + Expected: "myapp-default.example.com", + }, { + Name: "Short", + Template: "{{.Name}}.{{.Domain}}", + Pass: true, + Expected: "myapp.example.com", + }, { + Name: "SuperShort", + Template: "{{.Name}}", + Pass: true, + Expected: "myapp", + }, { + Name: "Annotations", + Template: `{{.Name}}.{{ index .Annotations "sub"}}.{{.Domain}}`, + Pass: true, + Expected: "myapp.mysub.example.com", + }, { + // This cannot get through our validation, but verify we handle errors. + Name: "BadVarName", + Template: "{{.Name}}.{{.NNNamespace}}.{{.Domain}}", + Pass: false, + Expected: "", + }} + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + cfg.Network.DomainTemplate = test.Template + + res, err := domains.DomainNameFromTemplate(ctx, route.ObjectMeta, route.Name) + + if test.Pass != (err == nil) { + t.Fatal("DomainNameFromTemplate supposed to fail but didn't") + } + if got, want := res, test.Expected; got != want { + t.Errorf("DomainNameFromTemplate = %q, want: %q", res, test.Expected) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/table_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/table_test.go new file mode 100644 index 0000000000..84d4330c93 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/table_test.go @@ -0,0 +1,2760 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "context" + "fmt" + "testing" + "time" + + "knative.dev/serving/pkg/apis/networking" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + clientgotesting "k8s.io/client-go/testing" + + "knative.dev/pkg/apis" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmeta" + pkgnet "knative.dev/pkg/network" + "knative.dev/pkg/ptr" + "knative.dev/pkg/tracker" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler" + kaccessor "knative.dev/serving/pkg/reconciler/accessor" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/resources" + "knative.dev/serving/pkg/reconciler/route/traffic" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const TestIngressClass = "ingress-class-foo" + +var fakeCurTime = time.Unix(1e9, 0) + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + // Make sure Reconcile handles bad keys. + Key: "too/many/parts", + }, { + Name: "key not found", + // Make sure Reconcile handles good keys that don't exist. + Key: "foo/not-found", + }, { + Name: "configuration not yet ready", + Objects: []runtime.Object{ + Route("default", "first-reconcile", WithConfigTarget("not-ready")), + cfg("default", "not-ready", WithGeneration(1), WithLatestCreated("not-ready-00001")), + rev("default", "not-ready", 1, WithInitRevConditions, WithRevName("not-ready-00001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "first-reconcile", WithConfigTarget("not-ready"), WithURL, + // The first reconciliation initializes the conditions and reflects + // that the referenced configuration is not yet ready. + WithInitRouteConditions, MarkConfigurationNotReady("not-ready")), + }}, + Key: "default/first-reconcile", + }, { + Name: "configuration permanently failed", + Objects: []runtime.Object{ + Route("default", "first-reconcile", WithConfigTarget("permanently-failed")), + cfg("default", "permanently-failed", + WithGeneration(1), WithLatestCreated("permanently-failed-00001"), MarkLatestCreatedFailed("blah")), + rev("default", "permanently-failed", 1, + WithRevName("permanently-failed-00001"), + WithInitRevConditions, MarkContainerMissing), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "first-reconcile", WithConfigTarget("permanently-failed"), WithURL, + WithInitRouteConditions, MarkConfigurationFailed("permanently-failed")), + }}, + Key: "default/first-reconcile", + }, { + Name: "failure updating route status", + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "routes"), + }, + Objects: []runtime.Object{ + Route("default", "first-reconcile", WithConfigTarget("not-ready")), + cfg("default", "not-ready", WithGeneration(1), WithLatestCreated("not-ready-00001")), + rev("default", "not-ready", 1, WithInitRevConditions, WithRevName("not-ready-00001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "first-reconcile", WithConfigTarget("not-ready"), WithURL, + WithInitRouteConditions, MarkConfigurationNotReady("not-ready")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for Route %q: %v", + "first-reconcile", "inducing failure for update routes"), + }, + Key: "default/first-reconcile", + }, { + Name: "simple route becomes ready, ingress unknown", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + }, + WantCreates: []runtime.Object{ + simpleIngress( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + "", + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "custom ingress route becomes ready, ingress unknown", + Objects: []runtime.Object{ + Route("default", "becomes-ready", + WithConfigTarget("config"), WithRouteUID("12-34"), WithIngressClass("custom-ingress-class")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("bk")), + }, + WantCreates: []runtime.Object{ + ingressWithClass( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "bk", + Active: true, + }}, + }, + }, + "custom-ingress-class", + sets.NewString(), + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "becomes-ready", + WithConfigTarget("config"), WithRouteUID("12-34"), WithIngressClass("custom-ingress-class")), + "", + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), WithIngressClass("custom-ingress-class"), + // Populated by reconciliation when all traffic has been assigned. + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "cluster local route becomes ready, ingress unknown", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithLocalDomain, + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"}), + WithRouteUID("65-23")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("tb")), + }, + WantCreates: []runtime.Object{ + simpleIngressWithVisibility( + Route("default", "becomes-ready", WithConfigTarget("config"), + WithLocalDomain, WithRouteUID("65-23"), + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + sets.NewString("becomes-ready"), + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "becomes-ready", WithConfigTarget("config"), WithLocalDomain, + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"}), + WithRouteUID("65-23")), + "", + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("65-23"), + // Populated by reconciliation when all traffic has been assigned. + WithLocalDomain, WithAddress, WithInitRouteConditions, + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"}), + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "simple route becomes ready", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + }, + WantCreates: []runtime.Object{ + simplePlaceholderK8sService(getContext(), Route("default", "becomes-ready", WithConfigTarget("config")), ""), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{ + {Object: simpleK8sService(Route("default", "becomes-ready", WithConfigTarget("config")))}, + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + // Populated by reconciliation when the route becomes ready. + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "failure creating k8s placeholder service", + // We induce a failure creating the placeholder service + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "services"), + }, + Objects: []runtime.Object{ + Route("default", "create-svc-failure", WithConfigTarget("config"), WithRouteFinalizer), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + }, + WantCreates: []runtime.Object{ + simplePlaceholderK8sService(getContext(), Route("default", "create-svc-failure", WithConfigTarget("config")), ""), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "create-svc-failure", WithConfigTarget("config"), + WithRouteFinalizer, + // Populated by reconciliation when we've failed to create + // the K8s service. + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create placeholder service %q: %v", + "create-svc-failure", "inducing failure for create services"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create placeholder service: inducing failure for create services"), + }, + Key: "default/create-svc-failure", + }, { + Name: "failure creating ingress", + Objects: []runtime.Object{ + Route("default", "ingress-create-failure", WithConfigTarget("config"), WithRouteFinalizer), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("astrid")), + }, + // We induce a failure creating the Ingress. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "ingresses"), + }, + WantCreates: []runtime.Object{ + //This is the Create we see for the ingress, but we induce a failure. + simpleIngress( + Route("default", "ingress-create-failure", WithConfigTarget("config"), + WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "astrid", + Active: true, + }}, + }, + }, + ), + func() *corev1.Service { + result, _ := resources.MakeK8sPlaceholderService(getContext(), + Route("default", "ingress-create-failure", WithConfigTarget("config"), WithRouteFinalizer), + "") + return result + }(), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "ingress-create-failure", WithConfigTarget("config"), + WithRouteFinalizer, + // Populated by reconciliation when we fail to create + // the cluster ingress. + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "ingress-create-failure"), + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Ingress: inducing failure for create ingresses"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create Ingress: inducing failure for create ingresses"), + }, + Key: "default/ingress-create-failure", + }, { + Name: "steady state", + Objects: []runtime.Object{ + Route("default", "steady-state", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, + WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "steady-state"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "steady-state", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "steady-state", WithConfigTarget("config"))), + }, + Key: "default/steady-state", + }, { + Name: "unhappy about ownership of placeholder service", + WantErr: true, + Objects: []runtime.Object{ + Route("default", "unhappy-owner", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "unhappy-owner"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleK8sService(Route("default", "unhappy-owner", WithConfigTarget("config")), + WithK8sSvcOwnersRemoved), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "unhappy-owner", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), + // The owner is not us, so we are unhappy. + MarkServiceNotOwned), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `route: "unhappy-owner" does not own Service: "unhappy-owner"`), + }, + Key: "default/unhappy-owner", + }, { + // This tests that when the Route is labelled differently, it is configured with a + // different domain from config-domain.yaml. This is otherwise a copy of the steady + // state test above. + Name: "different labels, different domain - steady state", + Objects: []runtime.Object{ + Route("default", "different-domain", WithConfigTarget("config"), + WithAnotherDomain, WithAddress, + WithInitRouteConditions, MarkTrafficAssigned, MarkIngressReady, + WithRouteFinalizer, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), WithRouteLabel(map[string]string{"app": "prod"})), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "different-domain"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("my-service")), + simpleReadyIngress( + Route("default", "different-domain", WithConfigTarget("config"), + WithAnotherDomain), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + ServiceName: "my-service", + }}, + }, + }, + ), + simpleK8sService(Route("default", "different-domain", WithConfigTarget("config"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{ + { + Object: simpleReadyIngress( + Route("default", "different-domain", WithConfigTarget("config"), + WithAnotherDomain), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + ServiceName: "my-service", + }}, + }, + }, + WithHosts( + 0, + "different-domain.default.svc.cluster.local", + "different-domain.default.another-example.com", + ), + ), + }, + }, + Key: "default/different-domain", + }, { + Name: "new latest created revision", + Objects: []runtime.Object{ + Route("default", "new-latest-created", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(2), WithLatestReady("config-00001"), WithLatestCreated("config-00002"), + WithConfigLabel("serving.knative.dev/route", "new-latest-created"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("daisy")), + // This is the name of the new revision we're referencing above. + rev("default", "config", 2, WithInitRevConditions, WithRevName("config-00002")), + simpleReadyIngress( + Route("default", "new-latest-created", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "daisy", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "new-latest-created", WithConfigTarget("config"))), + }, + // A new LatestCreatedRevisionName on the Configuration alone should result in no changes to the Route. + Key: "default/new-latest-created", + }, { + Name: "new latest ready revision", + Objects: []runtime.Object{ + Route("default", "new-latest-ready", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + })), + cfg("default", "config", + WithGeneration(2), WithLatestCreated("config-00002"), WithLatestReady("config-00002"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "new-latest-ready"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("magnolia")), + // This is the name of the new revision we're referencing above. + rev("default", "config", 2, MarkRevisionReady, WithRevName("config-00002"), WithServiceName("belltown")), + simpleReadyIngress( + Route("default", "new-latest-ready", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "magnolia", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "new-latest-ready", WithConfigTarget("config"))), + }, + // A new LatestReadyRevisionName on the Configuration should result in the new Revision being rolled out. + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleReadyIngress( + Route("default", "new-latest-ready", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // This is the new config we're making become ready. + RevisionName: "config-00002", + Percent: ptr.Int64(100), + }, + ServiceName: "belltown", + Active: true, + }}, + }, + }, + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "new-latest-ready", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00002", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + Key: "default/new-latest-ready", + }, { + Name: "public becomes cluster local", + Objects: []runtime.Object{ + Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"}), + WithRouteUID("65-23")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("tb")), + simpleIngress( + Route("default", "becomes-local", WithConfigTarget("config"), WithRouteUID("65-23"), WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"}), + WithRouteUID("65-23"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleIngressWithVisibility( + Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteUID("65-23"), + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + sets.NewString("becomes-local"), + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteUID("65-23"), + MarkTrafficAssigned, MarkIngressNotConfigured, + WithLocalDomain, WithAddress, WithInitRouteConditions, + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"}), + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + Key: "default/becomes-local", + }, { + Name: "cluster local becomes public", + Objects: []runtime.Object{ + Route("default", "becomes-public", WithConfigTarget("config"), + WithRouteUID("65-23")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("tb")), + simpleIngressWithVisibility( + Route("default", "becomes-public", WithConfigTarget("config"), WithRouteUID("65-23"), + WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + sets.NewString("becomes-public"), + ), + simpleK8sService(Route("default", "becomes-public", WithConfigTarget("config"), + WithRouteUID("65-23"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleIngress( + Route("default", "becomes-public", WithConfigTarget("config"), + WithRouteUID("65-23"), WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-public", WithConfigTarget("config"), + WithRouteUID("65-23"), + MarkTrafficAssigned, MarkIngressNotConfigured, + WithAddress, WithInitRouteConditions, WithURL, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + Key: "default/becomes-public", + }, { + Name: "failure updating cluster ingress", + // Starting from the new latest ready, induce a failure updating the cluster ingress. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "ingresses"), + }, + Objects: []runtime.Object{ + Route("default", "update-ci-failure", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + })), + cfg("default", "config", + WithGeneration(2), WithLatestCreated("config-00002"), WithLatestReady("config-00002"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "update-ci-failure"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("fremont")), + // This is the name of the new revision we're referencing above. + rev("default", "config", 2, MarkRevisionReady, WithRevName("config-00002"), WithServiceName("wallingford")), + simpleReadyIngress( + Route("default", "update-ci-failure", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + // Use the Revision name from the config. + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "fremont", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "update-ci-failure", WithConfigTarget("config"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleReadyIngress( + Route("default", "update-ci-failure", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // This is the new config we're making become ready. + RevisionName: "config-00002", + Percent: ptr.Int64(100), + }, + ServiceName: "wallingford", + Active: true, + }}, + }, + }, + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "update-ci-failure", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00002", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "failed to update Ingress: inducing failure for update ingresses"), + }, + Key: "default/update-ci-failure", + }, { + Name: "reconcile service mutation", + Objects: []runtime.Object{ + Route("default", "svc-mutation", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "svc-mutation"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "svc-mutation", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "svc-mutation", + WithConfigTarget("config")), MutateK8sService), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleK8sService(Route("default", "svc-mutation", WithConfigTarget("config"))), + }}, + Key: "default/svc-mutation", + }, { + Name: "failure updating k8s service", + // We start from the service mutation test, but induce a failure updating the service resource. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "services"), + }, + Objects: []runtime.Object{ + Route("default", "svc-mutation", WithConfigTarget("config"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "svc-mutation"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "svc-mutation", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "svc-mutation", + WithConfigTarget("config")), MutateK8sService), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleK8sService(Route("default", "svc-mutation", WithConfigTarget("config"))), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "inducing failure for update services"), + }, + Key: "default/svc-mutation", + }, { + // In #1789 we switched this to an ExternalName Service. Services created in + // 0.1 will still have ClusterIP set, which is Forbidden for ExternalName + // Services. Ensure that we drop the ClusterIP if it is set in the spec. + Name: "drop cluster ip", + Objects: []runtime.Object{ + Route("default", "cluster-ip", WithConfigTarget("config"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "cluster-ip"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "cluster-ip", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "cluster-ip", + WithConfigTarget("config")), WithClusterIP("127.0.0.1")), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleK8sService(Route("default", "cluster-ip", WithConfigTarget("config"))), + }}, + Key: "default/cluster-ip", + }, { + // Make sure we fix the external name if something messes with it. + Name: "fix external name", + Objects: []runtime.Object{ + Route("default", "external-name", WithConfigTarget("config"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "external-name"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "external-name", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "external-name", + WithConfigTarget("config")), WithExternalName("this-is-the-wrong-name")), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleK8sService(Route("default", "external-name", WithConfigTarget("config"))), + }}, + Key: "default/external-name", + }, { + Name: "reconcile cluster ingress mutation", + Objects: []runtime.Object{ + Route("default", "ingress-mutation", WithConfigTarget("config"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "ingress-mutation"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("windemere")), + mutateIngress(simpleReadyIngress( + Route("default", "ingress-mutation", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "magnusson-park", + Active: true, + }}, + }, + }, + )), + simpleK8sService(Route("default", "ingress-mutation", WithConfigTarget("config"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleReadyIngress( + Route("default", "ingress-mutation", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "windemere", + Active: true, + }}, + }, + }, + ), + }}, + Key: "default/ingress-mutation", + }, { + Name: "switch to a different config", + Objects: []runtime.Object{ + // The status reflects "oldconfig", but the spec "newconfig". + Route("default", "change-configs", WithConfigTarget("newconfig"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "oldconfig-00001", + Percent: ptr.Int64(100), + }, + })), + // Both configs exist, but only "oldconfig" is labelled. + cfg("default", "oldconfig", + WithGeneration(1), WithLatestCreated("oldconfig-00001"), WithLatestReady("oldconfig-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "change-configs"), + ), + cfg("default", "newconfig", + WithGeneration(1), WithLatestCreated("newconfig-00001"), WithLatestReady("newconfig-00001")), + rev("default", "oldconfig", 1, MarkRevisionReady, WithRevName("oldconfig-00001"), WithServiceName("greenwood")), + rev("default", "newconfig", 1, MarkRevisionReady, WithRevName("newconfig-00001"), WithServiceName("broadview")), + simpleReadyIngress( + Route("default", "change-configs", WithConfigTarget("oldconfig"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "oldconfig-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "greenwood", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "change-configs", WithConfigTarget("oldconfig"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + // Updated to point to "newconfig" things. + Object: simpleReadyIngress( + Route("default", "change-configs", WithConfigTarget("newconfig"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "newconfig-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "broadview", + Active: true, + }}, + }, + }, + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Status updated to "newconfig" + Object: Route("default", "change-configs", WithConfigTarget("newconfig"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "newconfig-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + Key: "default/change-configs", + }, { + Name: "configuration missing", + Objects: []runtime.Object{ + Route("default", "config-missing", WithConfigTarget("not-found")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "config-missing", WithConfigTarget("not-found"), WithURL, + WithInitRouteConditions, MarkMissingTrafficTarget("Configuration", "not-found")), + }}, + PostConditions: []func(*testing.T, *TableRow){ + AssertTrackingConfig("default", "not-found"), + }, + Key: "default/config-missing", + }, { + Name: "revision missing (direct)", + Objects: []runtime.Object{ + Route("default", "missing-revision-direct", WithRevTarget("not-found")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "missing-revision-direct", WithRevTarget("not-found"), WithURL, + WithInitRouteConditions, MarkMissingTrafficTarget("Revision", "not-found")), + }}, + PostConditions: []func(*testing.T, *TableRow){ + AssertTrackingRevision("default", "not-found"), + }, + Key: "default/missing-revision-direct", + }, { + Name: "revision missing (indirect)", + Objects: []runtime.Object{ + Route("default", "missing-revision-indirect", WithConfigTarget("config")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "missing-revision-indirect", WithConfigTarget("config"), WithURL, + WithInitRouteConditions, MarkMissingTrafficTarget("Revision", "config-00001")), + }}, + Key: "default/missing-revision-indirect", + }, { + Name: "pinned route becomes ready", + Objects: []runtime.Object{ + Route("default", "pinned-becomes-ready", + // Use the Revision name from the config + WithRevTarget("config-00001"), WithRouteFinalizer, + ), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleK8sService(Route("default", "pinned-becomes-ready", WithConfigTarget("config"))), + simpleReadyIngress( + Route("default", "pinned-becomes-ready", WithConfigTarget("config"), + WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "pinned-becomes-ready", + // Use the Revision name from the config + WithRevTarget("config-00001"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + })), + }}, + Key: "default/pinned-becomes-ready", + }, { + Name: "traffic split becomes ready", + Objects: []runtime.Object{ + Route("default", "named-traffic-split", WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "green", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("34-78"), WithRouteFinalizer), + cfg("default", "blue", + WithGeneration(1), WithLatestCreated("blue-00001"), WithLatestReady("blue-00001")), + cfg("default", "green", + WithGeneration(1), WithLatestCreated("green-00001"), WithLatestReady("green-00001")), + rev("default", "blue", 1, MarkRevisionReady, WithRevName("blue-00001"), WithServiceName("blue-ridge")), + rev("default", "green", 1, MarkRevisionReady, WithRevName("green-00001"), WithServiceName("green-lake")), + }, + WantCreates: []runtime.Object{ + simpleIngress( + Route("default", "named-traffic-split", WithURL, WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "green", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("34-78")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "blue-00001", + Percent: ptr.Int64(50), + }, + ServiceName: "blue-ridge", + Active: true, + }, { + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "green-00001", + Percent: ptr.Int64(50), + }, + ServiceName: "green-lake", + Active: true, + }}, + }, + }, + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "named-traffic-split", WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "green", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("34-78"), WithRouteFinalizer), + "", + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "named-traffic-split", WithRouteFinalizer, + WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "green", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("34-78"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "blue-00001", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "green-00001", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "named-traffic-split"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "named-traffic-split"), + }, + Key: "default/named-traffic-split", + }, { + Name: "same revision targets", + Objects: []runtime.Object{ + Route("default", "same-revision-targets", WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + ConfigurationName: "gray", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("1-2"), WithRouteFinalizer), + cfg("default", "gray", + WithGeneration(1), WithLatestCreated("gray-00001"), WithLatestReady("gray-00001")), + rev("default", "gray", 1, MarkRevisionReady, WithRevName("gray-00001"), WithServiceName("shades")), + }, + WantCreates: []runtime.Object{ + simpleIngress( + Route("default", "same-revision-targets", WithURL, WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + ConfigurationName: "gray", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("1-2")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "gray-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "shades", + Active: true, + }}, + "gray": {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "gray-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "shades", + Active: true, + }}, + "also-gray": {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "gray-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "shades", + Active: true, + }}, + }, + }, + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "same-revision-targets", WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + ConfigurationName: "gray", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("1-2"), WithRouteFinalizer), + "", + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "same-revision-targets", WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + ConfigurationName: "gray", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("1-2"), WithRouteFinalizer), + "also-gray", + ), + simplePlaceholderK8sService( + getContext(), + Route("default", "same-revision-targets", WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + ConfigurationName: "gray", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("1-2"), WithRouteFinalizer), + "gray", + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "same-revision-targets", + WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + ConfigurationName: "gray", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + }, + }), WithRouteUID("1-2"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + URL: &apis.URL{ + Scheme: "http", + Host: "gray-same-revision-targets.default.example.com", + }, + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-gray", + RevisionName: "gray-00001", + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + URL: &apis.URL{ + Scheme: "http", + Host: "also-gray-same-revision-targets.default.example.com", + }, + }, + })), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "same-revision-targets"), + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "also-gray-same-revision-targets"), + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "gray-same-revision-targets"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "same-revision-targets"), + }, + Key: "default/same-revision-targets", + }, { + Name: "change route configuration", + // Start from a steady state referencing "blue", and modify the route spec to point to "green" instead. + Objects: []runtime.Object{ + Route("default", "switch-configs", WithConfigTarget("green"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "blue", + RevisionName: "blue-00001", + Percent: ptr.Int64(100), + }, + }), WithRouteFinalizer), + cfg("default", "blue", + WithGeneration(1), WithLatestCreated("blue-00001"), WithLatestReady("blue-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "switch-configs"), + ), + cfg("default", "green", + WithGeneration(1), WithLatestCreated("green-00001"), WithLatestReady("green-00001")), + rev("default", "blue", 1, MarkRevisionReady, WithRevName("blue-00001"), WithServiceName("alki-beach")), + rev("default", "green", 1, MarkRevisionReady, WithRevName("green-00001"), WithServiceName("rainier-beach")), + simpleReadyIngress( + Route("default", "switch-configs", WithConfigTarget("blue"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "blue-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "alki-beach", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "switch-configs", WithConfigTarget("blue"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleReadyIngress( + Route("default", "switch-configs", WithConfigTarget("green"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "green-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "rainier-beach", + Active: true, + }}, + }, + }, + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "switch-configs", WithConfigTarget("green"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "green-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), WithRouteFinalizer), + }}, + Key: "default/switch-configs", + }, { + Name: "update single target to traffic split with unready revision", + // Start from a steady state referencing "blue", and modify the route spec to point to both + // "blue" and "green" instead, while "green" is not ready. + Objects: []runtime.Object{ + Route("default", "split", WithURL, WithAddress, + WithInitRouteConditions, MarkTrafficAssigned, MarkIngressReady, + WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "green", + Percent: ptr.Int64(50), + }, + }), + WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }, + )), + cfg("default", "blue", WithGeneration(1), + WithLatestCreated("blue-00001"), WithLatestReady("blue-00001")), + cfg("default", "green", WithGeneration(1), + WithLatestCreated("green-00001")), + rev("default", "blue", 1, MarkRevisionReady, WithRevName("blue-00001")), + rev("default", "green", 1, WithRevName("green-00001")), + simpleK8sService(Route("default", "split", WithConfigTarget("blue"))), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "split", WithURL, WithAddress, + WithInitRouteConditions, MarkTrafficAssigned, MarkIngressReady, + MarkConfigurationNotReady("green"), + WithSpecTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "green", + Percent: ptr.Int64(50), + }, + }), + WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: "blue", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + Key: "default/split", + }, { + Name: "Update stale lastPinned", + Objects: []runtime.Object{ + Route("default", "stale-lastpinned", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "stale-lastpinned"), + ), + rev("default", "config", 1, MarkRevisionReady, + WithRevName("config-00001"), + WithLastPinned(fakeCurTime.Add(-10*time.Minute))), + simpleReadyIngress( + Route("default", "stale-lastpinned", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "stale-lastpinned", WithConfigTarget("config"))), + }, + WantPatches: []clientgotesting.PatchActionImpl{ + patchLastPinned("default", "config-00001"), + }, + Key: "default/stale-lastpinned", + }, { + Name: "check that we can find the cluster ingress with old naming", + Objects: []runtime.Object{ + Route("default", "old-naming", WithConfigTarget("config"), WithRouteFinalizer, + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), + WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "old-naming"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "old-naming", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "old-naming", WithConfigTarget("config"))), + }, + Key: "default/old-naming", + }, { + Name: "deletes service when route no longer references service", + Objects: []runtime.Object{ + Route("default", "my-route", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, + WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "steady-state"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleReadyIngress( + Route("default", "my-route", WithConfigTarget("config"), WithURL), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "my-route", WithConfigTarget("config"))), + simpleK8sService(Route("default", "my-route"), OverrideServiceName("old-service-name")), + }, + WantDeletes: []clientgotesting.DeleteActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "default", + Verb: "delete", + Resource: schema.GroupVersionResource{ + Group: "core", + Version: "v1", + Resource: "services", + }, + }, + Name: "old-service-name", + }}, + Key: "default/my-route", + }, { + Name: "deletes service fails", + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("delete", "services"), + }, + Objects: []runtime.Object{ + Route("default", "my-route", WithConfigTarget("config"), + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressReady, + WithRouteFinalizer, WithStatusTraffic( + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001"), + // The Route controller attaches our label to this Configuration. + WithConfigLabel("serving.knative.dev/route", "steady-state"), + ), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001")), + simpleK8sService(Route("default", "my-route", WithConfigTarget("config"))), + simpleK8sService(Route("default", "my-route"), OverrideServiceName("old-service-name")), + }, + WantDeletes: []clientgotesting.DeleteActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "default", + Verb: "delete", + Resource: schema.GroupVersionResource{ + Group: "core", + Version: "v1", + Resource: "services", + }, + }, + Name: "old-service-name", + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "failed to delete Service: inducing failure for delete services"), + }, + Key: "default/my-route", + }} + + // TODO(mattmoor): Revision inactive (direct reference) + // TODO(mattmoor): Revision inactive (indirect reference) + // TODO(mattmoor): Multiple inactive Revisions + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + routeLister: listers.GetRouteLister(), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + serviceLister: listers.GetK8sServiceLister(), + ingressLister: listers.GetIngressLister(), + tracker: ctx.Value(TrackerKey).(tracker.Interface), + configStore: &testConfigStore{ + config: ReconcilerTestConfig(false), + }, + clock: FakeClock{Time: fakeCurTime}, + } + })) +} + +func TestReconcile_EnableAutoTLS(t *testing.T) { + table := TableTest{{ + Name: "check that existing wildcard cert is used when creating a Route", + Objects: []runtime.Object{ + wildcardCert("default", "example.com"), + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + }, + WantCreates: []runtime.Object{ + ingressWithTLS( + Route("default", "becomes-ready", WithConfigTarget("config"), WithHTTPSDomain, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + []netv1alpha1.IngressTLS{ + { + Hosts: []string{"becomes-ready.default.example.com"}, + SecretName: "default", + SecretNamespace: "default", + }, + }, + nil, + ), + simpleK8sService( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + WithExternalName("becomes-ready.default.example.com"), + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), WithReadyCertificateName("default.example.com"), WithHTTPSDomain), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "check that Certificate and IngressTLS are correctly configured when creating a Route", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + }, + WantCreates: []runtime.Object{ + resources.MakeCertificates(Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), + map[string]string{"becomes-ready.default.example.com": ""}, network.CertManagerCertificateClassName)[0], + ingressWithTLS( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + []netv1alpha1.IngressTLS{{ + Hosts: []string{"becomes-ready.default.example.com"}, + SecretName: "route-12-34", + SecretNamespace: "default", + }}, + nil, + ), + simpleK8sService( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + WithExternalName("becomes-ready.default.example.com"), + ), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithURL, WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), MarkCertificateNotReady), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Created", "Created Certificate %s/%s", "default", "route-12-34"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "check that Certificate and IngressTLS are correctly updated when updating a Route", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + // MakeCertificates will create a certificate with DNS name "*.test-ns.example.com" which is not the host name + // needed by the input Route. + &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12-34", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")))}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: network.CertManagerCertificateClassName, + }, + Labels: map[string]string{ + serving.RouteLabelKey: "becomes-ready", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"abc.test.example.com"}, + }, + Status: readyCertStatus(), + }, + }, + WantCreates: []runtime.Object{ + ingressWithTLS( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + []netv1alpha1.IngressTLS{ + { + Hosts: []string{"becomes-ready.default.example.com"}, + SecretName: "route-12-34", + SecretNamespace: "default", + }, + }, + nil, + ), + simpleK8sService( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + WithExternalName("becomes-ready.default.example.com"), + ), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: certificateWithStatus(resources.MakeCertificates(Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), + map[string]string{"becomes-ready.default.example.com": ""}, network.CertManagerCertificateClassName)[0], readyCertStatus()), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), MarkCertificateReady, + // The certificate is ready. So we want to have HTTPS URL. + WithHTTPSDomain), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Spec for Certificate %s/%s", "default", "route-12-34"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + Name: "verify ingress rules created for http01 challenges", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12-34", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")))}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: network.CertManagerCertificateClassName, + }, + Labels: map[string]string{ + serving.RouteLabelKey: "becomes-ready", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"becomes-ready.default.example.com"}, + SecretName: "route-12-34", + }, + Status: netv1alpha1.CertificateStatus{ + HTTP01Challenges: []netv1alpha1.HTTP01Challenge{{ + URL: &apis.URL{ + Scheme: "http", + Host: "becomes-ready.default.example.com", + Path: "/.well-known/acme-challenge/challengeToken", + }, + ServiceName: "cm-solver", + ServicePort: intstr.FromInt(8090), + ServiceNamespace: "default", + }}, + }, + }, + }, + WantCreates: []runtime.Object{ + ingressWithTLS( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + []netv1alpha1.IngressTLS{ + { + Hosts: []string{"becomes-ready.default.example.com"}, + SecretName: "route-12-34", + SecretNamespace: "default", + }, + }, + []netv1alpha1.HTTP01Challenge{{ + URL: &apis.URL{ + Scheme: "http", + Host: "becomes-ready.default.example.com", + Path: "/.well-known/acme-challenge/challengeToken", + }, + ServiceName: "cm-solver", + ServicePort: intstr.FromInt(8090), + ServiceNamespace: "default", + }}, + ), + simpleK8sService( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + WithExternalName("becomes-ready.default.example.com"), + ), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), + // The certificate has to be created in the not ready state for the ACME challenge + // ingress rules to be added. + MarkCertificateNotReady, + // Which also means no HTTPS URL + WithURL, + ), + }}, + Key: "default/becomes-ready", + SkipNamespaceValidation: true, + }, { + Name: "check that Route updates status and produces event log when valid name but not owned certificate", + WantErr: true, + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12-34", + Namespace: "default", + // Mark OwnerReferences for this test. + OwnerReferences: nil, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: network.CertManagerCertificateClassName, + }, + Labels: map[string]string{ + serving.RouteLabelKey: "becomes-ready", + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"becomes-ready.default.example.com"}, + }, + Status: readyCertStatus(), + }, + }, + WantDeleteCollections: []clientgotesting.DeleteCollectionActionImpl{}, + WantCreates: []runtime.Object{ + simplePlaceholderK8sService(getContext(), Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), ""), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + WithAddress, WithInitRouteConditions, WithURL, + MarkTrafficAssigned, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), MarkCertificateNotOwned), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeWarning, "InternalError", kaccessor.NewAccessorError(fmt.Errorf("owner: %s with Type %T does not own Certificate: %q", "becomes-ready", &v1alpha1.Route{}, "route-12-34"), kaccessor.NotOwnResource).Error()), + }, + Key: "default/becomes-ready", + }, { + Name: "check that Route is correctly updated when Certificate is not ready", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + // MakeCertificates will create a certificate with DNS name "*.test-ns.example.com" which is not the host name + // needed by the input Route. + &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12-34", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")))}, + Labels: labels.Set(map[string]string{ + serving.RouteLabelKey: "becomes-ready", + }), + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: network.CertManagerCertificateClassName, + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"abc.test.example.com"}, + }, + Status: notReadyCertStatus(), + }, + }, + WantCreates: []runtime.Object{ + ingressWithTLS( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + []netv1alpha1.IngressTLS{ + { + Hosts: []string{"becomes-ready.default.example.com"}, + SecretName: "route-12-34", + SecretNamespace: "default", + }, + }, + nil, + ), + simpleK8sService( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + WithExternalName("becomes-ready.default.example.com"), + ), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: certificateWithStatus(resources.MakeCertificates(Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), + map[string]string{"becomes-ready.default.example.com": ""}, network.CertManagerCertificateClassName)[0], notReadyCertStatus()), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), MarkCertificateNotReady, MarkIngressNotConfigured, + // The certificate is not ready. So we want to have HTTP URL. + WithURL), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Spec for Certificate %s/%s", "default", "route-12-34"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }, { + // This test is a same with "public becomes cluster local" above, but confirm it does not create certs with autoTLS for cluster-local. + Name: "public becomes cluster local w/ autoTLS", + Objects: []runtime.Object{ + Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteLabel(map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal}), + WithRouteUID("65-23")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("tb")), + simpleIngress( + Route("default", "becomes-local", WithConfigTarget("config"), WithRouteUID("65-23"), WithRouteLabel(map[string]string{"serving.knative.dev/visibility": "cluster-local"})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + ), + simpleK8sService(Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteLabel(map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal}), + WithRouteUID("65-23"))), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: simpleIngressWithVisibility( + Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteUID("65-23"), + WithRouteLabel(map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal})), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "tb", + Active: true, + }}, + }, + }, + sets.NewString("becomes-local"), + ), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-local", WithConfigTarget("config"), + WithRouteUID("65-23"), + MarkTrafficAssigned, MarkIngressNotConfigured, + WithLocalDomain, WithAddress, WithInitRouteConditions, + WithRouteLabel(map[string]string{config.VisibilityLabelKey: config.VisibilityClusterLocal}), + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + })), + }}, + Key: "default/becomes-local", + }} + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + routeLister: listers.GetRouteLister(), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + serviceLister: listers.GetK8sServiceLister(), + ingressLister: listers.GetIngressLister(), + certificateLister: listers.GetCertificateLister(), + tracker: &NullTracker{}, + configStore: &testConfigStore{ + config: ReconcilerTestConfig(true), + }, + clock: FakeClock{Time: fakeCurTime}, + } + })) +} + +func wildcardCert(namespace string, domain string) *netv1alpha1.Certificate { + cert := &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("%s.%s", namespace, domain), + Labels: map[string]string{ + networking.WildcardCertDomainLabelKey: domain, + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{fmt.Sprintf("*.%s.%s", namespace, domain)}, + SecretName: namespace, + }, + Status: readyCertStatus(), + } + + return cert +} + +func TestReconcile_EnableAutoTLS_HTTPDisabled(t *testing.T) { + table := TableTest{{ + Name: "check that Route is correctly updated when Certificate is not ready", + Objects: []runtime.Object{ + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + cfg("default", "config", + WithGeneration(1), WithLatestCreated("config-00001"), WithLatestReady("config-00001")), + rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("mcd")), + // MakeCertificates will create a certificate with DNS name "*.test-ns.example.com" which is not the host name + // needed by the input Route. + &netv1alpha1.Certificate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "route-12-34", + Namespace: "default", + Labels: labels.Set(map[string]string{ + serving.RouteLabelKey: "becomes-ready", + }), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")))}, + Annotations: map[string]string{ + networking.CertificateClassAnnotationKey: network.CertManagerCertificateClassName, + }, + }, + Spec: netv1alpha1.CertificateSpec{ + DNSNames: []string{"abc.test.example.com"}, + }, + Status: notReadyCertStatus(), + }, + }, + WantCreates: []runtime.Object{ + ingressWithTLS( + Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, + WithRouteUID("12-34")), + &traffic.Config{ + Targets: map[string]traffic.RevisionTargets{ + traffic.DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + // Use the Revision name from the config. + RevisionName: "config-00001", + Percent: ptr.Int64(100), + }, + ServiceName: "mcd", + Active: true, + }}, + }, + }, + []netv1alpha1.IngressTLS{ + { + Hosts: []string{"becomes-ready.default.example.com"}, + SecretName: "route-12-34", + SecretNamespace: "default", + }, + }, + nil, + ), + simpleK8sService( + Route("default", "becomes-ready", WithConfigTarget("config"), WithRouteUID("12-34")), + WithExternalName("becomes-ready.default.example.com"), + ), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: certificateWithStatus(resources.MakeCertificates(Route("default", "becomes-ready", WithConfigTarget("config"), WithURL, WithRouteUID("12-34")), + map[string]string{"becomes-ready.default.example.com": ""}, network.CertManagerCertificateClassName)[0], notReadyCertStatus()), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Route("default", "becomes-ready", WithConfigTarget("config"), + WithRouteUID("12-34"), + // Populated by reconciliation when all traffic has been assigned. + WithAddress, WithInitRouteConditions, + MarkTrafficAssigned, MarkIngressNotConfigured, WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-00001", + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + }), MarkCertificateNotReady, MarkIngressNotConfigured, + // The certificate is not ready. But we still want to have HTTPS URL. + WithHTTPSDomain), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created placeholder service %q", "becomes-ready"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Spec for Certificate %s/%s", "default", "route-12-34"), + Eventf(corev1.EventTypeNormal, "Created", "Created Ingress %q", "becomes-ready"), + }, + Key: "default/becomes-ready", + }} + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + cfg := ReconcilerTestConfig(true) + cfg.Network.HTTPProtocol = network.HTTPDisabled + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + routeLister: listers.GetRouteLister(), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + serviceLister: listers.GetK8sServiceLister(), + ingressLister: listers.GetIngressLister(), + certificateLister: listers.GetCertificateLister(), + tracker: &NullTracker{}, + configStore: &testConfigStore{ + config: cfg, + }, + clock: FakeClock{Time: fakeCurTime}, + } + })) +} + +func cfg(namespace, name string, co ...ConfigOption) *v1alpha1.Configuration { + cfg := &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + ResourceVersion: "v1", + }, + Spec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + }, + }, + }, + }, + } + for _, opt := range co { + opt(cfg) + } + return cfg +} + +func simplePlaceholderK8sService(ctx context.Context, r *v1alpha1.Route, targetName string, so ...K8sServiceOption) *corev1.Service { + // omit the error here, as we are sure the loadbalancer info is porvided. + // return the service instance only, so that the result can be used in TableRow. + svc, _ := resources.MakeK8sPlaceholderService(ctx, r, targetName) + + for _, opt := range so { + opt(svc) + } + + return svc +} + +func simpleK8sService(r *v1alpha1.Route, so ...K8sServiceOption) *corev1.Service { + cs := &testConfigStore{ + config: ReconcilerTestConfig(false), + } + ctx := cs.ToContext(context.Background()) + + // omit the error here, as we are sure the loadbalancer info is porvided. + // return the service instance only, so that the result can be used in TableRow. + svc, _ := resources.MakeK8sService(ctx, r, "", &netv1alpha1.Ingress{Status: readyIngressStatus()}, false) + + for _, opt := range so { + opt(svc) + } + + return svc +} + +type ingressCtor func(ctx context.Context, + r *v1alpha1.Route, + tc *traffic.Config, + tls []netv1alpha1.IngressTLS, + clusterLocalServices sets.String, + ingressClass string, + acmeChallenges ...netv1alpha1.HTTP01Challenge, +) (*netv1alpha1.Ingress, error) + +func simpleIngress(r *v1alpha1.Route, tc *traffic.Config, io ...IngressOption) *netv1alpha1.Ingress { + return simpleIngressWithVisibility(r, tc, sets.NewString(), io...) +} + +func simpleIngressWithVisibility(r *v1alpha1.Route, tc *traffic.Config, serviceVisibility sets.String, io ...IngressOption) *netv1alpha1.Ingress { + return baseIngressWithClass(r, tc, TestIngressClass, serviceVisibility, resources.MakeIngress, io...) +} + +func ingressWithClass(r *v1alpha1.Route, tc *traffic.Config, class string, serviceVisibility sets.String, io ...IngressOption) *netv1alpha1.Ingress { + return baseIngressWithClass(r, tc, class, serviceVisibility, resources.MakeIngress, io...) +} + +func baseIngressWithClass(r *v1alpha1.Route, tc *traffic.Config, class string, serviceVisibility sets.String, ctor ingressCtor, io ...IngressOption) *netv1alpha1.Ingress { + ingress, _ := ctor(getContext(), r, tc, nil, serviceVisibility, class) + + for _, opt := range io { + opt(ingress) + } + + return ingress +} + +func ingressWithTLS(r *v1alpha1.Route, tc *traffic.Config, tls []netv1alpha1.IngressTLS, challenges []netv1alpha1.HTTP01Challenge, io ...IngressOption) *netv1alpha1.Ingress { + return baseIngressWithTLS(r, tc, tls, resources.MakeIngress, challenges, io...) +} + +func baseIngressWithTLS(r *v1alpha1.Route, tc *traffic.Config, tls []netv1alpha1.IngressTLS, ctor ingressCtor, challenges []netv1alpha1.HTTP01Challenge, io ...IngressOption) *netv1alpha1.Ingress { + ingress, _ := ctor(getContext(), r, tc, tls, sets.NewString(), TestIngressClass, challenges...) + + for _, opt := range io { + opt(ingress) + } + + return ingress +} + +func simpleReadyIngress(r *v1alpha1.Route, tc *traffic.Config, io ...IngressOption) *netv1alpha1.Ingress { + ingress := ingressWithStatus(r, tc, readyIngressStatus()) + + for _, opt := range io { + opt(ingress) + } + + return ingress +} + +func readyIngressStatus() netv1alpha1.IngressStatus { + status := netv1alpha1.IngressStatus{} + status.InitializeConditions() + status.MarkNetworkConfigured() + status.MarkLoadBalancerReady( + []netv1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + []netv1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("istio-ingressgateway", "istio-system")}, + }, + []netv1alpha1.LoadBalancerIngressStatus{ + {DomainInternal: pkgnet.GetServiceHostname("private-istio-ingressgateway", "istio-system")}, + }, + ) + + return status +} + +func ingressWithStatus(r *v1alpha1.Route, tc *traffic.Config, status netv1alpha1.IngressStatus) *netv1alpha1.Ingress { + ci := simpleIngress(r, tc) + ci.SetName(r.Name) + ci.Status = status + + return ci +} + +func mutateIngress(ci *netv1alpha1.Ingress) *netv1alpha1.Ingress { + // Thor's Hammer + ci.Spec = netv1alpha1.IngressSpec{} + return ci +} + +func patchLastPinned(namespace, name string) clientgotesting.PatchActionImpl { + action := clientgotesting.PatchActionImpl{} + action.Name = name + action.Namespace = namespace + lastPinStr := v1alpha1.RevisionLastPinnedString(fakeCurTime) + patch := fmt.Sprintf(`{"metadata":{"annotations":{"serving.knative.dev/lastPinned":%q}}}`, lastPinStr) + action.Patch = []byte(patch) + return action +} + +func rev(namespace, name string, generation int64, ro ...RevisionOption) *v1alpha1.Revision { + r := &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Annotations: map[string]string{ + "serving.knative.dev/lastPinned": v1alpha1.RevisionLastPinnedString( + fakeCurTime.Add(-1 * time.Second)), + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: name, + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + } + for _, opt := range ro { + opt(r) + } + return r +} + +type testConfigStore struct { + config *config.Config +} + +func (t *testConfigStore) ToContext(ctx context.Context) context.Context { + return config.ToContext(ctx, t.config) +} + +var _ reconciler.ConfigStore = (*testConfigStore)(nil) + +func ReconcilerTestConfig(enableAutoTLS bool) *config.Config { + return &config.Config{ + Domain: &config.Domain{ + Domains: map[string]*config.LabelSelector{ + "example.com": {}, + "another-example.com": { + Selector: map[string]string{"app": "prod"}, + }, + }, + }, + Network: &network.Config{ + DefaultIngressClass: TestIngressClass, + DefaultCertificateClass: network.CertManagerCertificateClassName, + AutoTLS: enableAutoTLS, + DomainTemplate: network.DefaultDomainTemplate, + TagTemplate: network.DefaultTagTemplate, + HTTPProtocol: network.HTTPEnabled, + }, + GC: &gc.Config{ + StaleRevisionLastpinnedDebounce: time.Duration(1 * time.Minute), + }, + } +} + +func readyCertStatus() netv1alpha1.CertificateStatus { + certStatus := &netv1alpha1.CertificateStatus{} + certStatus.MarkReady() + return *certStatus +} + +func notReadyCertStatus() netv1alpha1.CertificateStatus { + certStatus := &netv1alpha1.CertificateStatus{} + certStatus.MarkNotReady("not ready", "not ready") + return *certStatus +} + +func certificateWithStatus(cert *netv1alpha1.Certificate, status netv1alpha1.CertificateStatus) *netv1alpha1.Certificate { + cert.Status = status + return cert +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/doc.go new file mode 100644 index 0000000000..2e8a65389f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This package contains code that translate our TrafficTarget to an +intermediate format that has less semantic. In particular it deals +with flattening the TrafficTarget to the revision level -- it also +does the grouping of traffic target into their traffic groups. +*/ + +package traffic diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors.go new file mode 100644 index 0000000000..2ff9bc6548 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package traffic + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// TargetError gives details about an invalid traffic target. +type TargetError interface { + error + + // MarkBadTrafficTarget marks a RouteStatus with Condition corresponding + // to the error case of the traffic target. + MarkBadTrafficTarget(rs *v1alpha1.RouteStatus) + + // IsFailure returns whether a TargetError is a true failure, e.g. + // a Configuration fails to become ready. + IsFailure() bool +} + +type missingTargetError struct { + kind string // Kind of the traffic target, e.g. Configuration/Revision. + name string // Name of the traffic target. +} + +var _ TargetError = (*missingTargetError)(nil) + +// Error implements error. +func (e *missingTargetError) Error() string { + return fmt.Sprintf("%v %q referenced in traffic not found", e.kind, e.name) +} + +// MarkBadTrafficTarget implements TargetError. +func (e *missingTargetError) MarkBadTrafficTarget(rs *v1alpha1.RouteStatus) { + rs.MarkMissingTrafficTarget(e.kind, e.name) +} + +// IsFailure implements TargetError. +func (e *missingTargetError) IsFailure() bool { + return true +} + +type unreadyConfigError struct { + name string // Name of the config that isn't ready. + isFailure bool // True iff target fails to get ready. +} + +var _ TargetError = (*unreadyConfigError)(nil) + +// Error implements error. +func (e *unreadyConfigError) Error() string { + return fmt.Sprintf("Configuration %q not ready, isFailure=%t", e.name, e.isFailure) +} + +// MarkBadTrafficTarget implements TargetError. +func (e *unreadyConfigError) MarkBadTrafficTarget(rs *v1alpha1.RouteStatus) { + if e.IsFailure() { + rs.MarkConfigurationFailed(e.name) + } else { + rs.MarkConfigurationNotReady(e.name) + } +} + +func (e *unreadyConfigError) IsFailure() bool { + return e.isFailure +} + +type unreadyRevisionError struct { + name string // Name of the config that isn't ready. + isFailure bool // True iff the Revision fails to become ready. +} + +var _ TargetError = (*unreadyRevisionError)(nil) + +// Error implements error. +func (e *unreadyRevisionError) Error() string { + return fmt.Sprintf("Revision %q not ready, isFailure=%t", e.name, e.isFailure) +} + +// MarkBadTrafficTarget implements TargetError. +func (e *unreadyRevisionError) MarkBadTrafficTarget(rs *v1alpha1.RouteStatus) { + if e.IsFailure() { + rs.MarkRevisionFailed(e.name) + } else { + rs.MarkRevisionNotReady(e.name) + } +} + +func (e *unreadyRevisionError) IsFailure() bool { + return e.isFailure +} + +// errUnreadyConfiguration returns a TargetError for a Configuration that is not ready. +func errUnreadyConfiguration(config *v1alpha1.Configuration) TargetError { + status := corev1.ConditionUnknown + if c := config.Status.GetCondition(v1alpha1.ConfigurationConditionReady); c != nil { + status = c.Status + } + return &unreadyConfigError{ + name: config.Name, + isFailure: status == corev1.ConditionFalse, + } +} + +// errUnreadyRevision returns a TargetError for a Revision that is not ready. +func errUnreadyRevision(rev *v1alpha1.Revision) TargetError { + status := corev1.ConditionUnknown + if c := rev.Status.GetCondition(v1alpha1.RevisionConditionReady); c != nil { + status = c.Status + } + return &unreadyRevisionError{ + name: rev.Name, + isFailure: status == corev1.ConditionFalse, + } +} + +// errMissingConfiguration returns a TargetError for a Configuration what does not exist. +func errMissingConfiguration(name string) TargetError { + return &missingTargetError{ + kind: "Configuration", + name: name, + } +} + +// errMissingRevision returns a TargetError for a Revision that does not exist. +func errMissingRevision(name string) TargetError { + return &missingTargetError{ + kind: "Revision", + name: name, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors_test.go new file mode 100644 index 0000000000..5a4a50a519 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/errors_test.go @@ -0,0 +1,178 @@ +/* +Copyright 2018 The Knative Author + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package traffic + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +func TestIsFailure_Missing(t *testing.T) { + err := errMissingRevision("missing-rev") + want := true + if got := err.IsFailure(); got != want { + t.Errorf("wanted %v, got %v", want, got) + } +} + +func TestMarkBadTrafficTarget_Missing(t *testing.T) { + err := errMissingRevision("missing-rev") + r := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + + err.MarkBadTrafficTarget(&r.Status) + for _, condType := range []apis.ConditionType{ + v1alpha1.RouteConditionAllTrafficAssigned, + v1alpha1.RouteConditionReady, + } { + got := r.Status.GetCondition(condType) + want := &apis.Condition{ + Type: condType, + Status: corev1.ConditionFalse, + Reason: "RevisionMissing", + Message: `Revision "missing-rev" referenced in traffic not found.`, + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected condition diff (-want +got): %v", diff) + } + } +} + +func TestIsFailure_NotYetReady(t *testing.T) { + err := errUnreadyConfiguration(unreadyConfig) + want := false + if got := err.IsFailure(); got != want { + t.Errorf("wanted %v, got %v", want, got) + } +} + +func TestMarkBadTrafficTarget_NotYetReady(t *testing.T) { + err := errUnreadyConfiguration(unreadyConfig) + r := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + + err.MarkBadTrafficTarget(&r.Status) + for _, condType := range []apis.ConditionType{ + v1alpha1.RouteConditionAllTrafficAssigned, + v1alpha1.RouteConditionReady, + } { + got := r.Status.GetCondition(condType) + want := &apis.Condition{ + Type: condType, + Status: corev1.ConditionUnknown, + Reason: "RevisionMissing", + Message: `Configuration "unready-config" is waiting for a Revision to become ready.`, + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected condition diff (-want +got): %v", diff) + } + } +} + +func TestIsFailure_ConfigFailedToBeReady(t *testing.T) { + err := errUnreadyConfiguration(failedConfig) + want := true + if got := err.IsFailure(); got != want { + t.Errorf("wanted %v, got %v", want, got) + } +} + +func TestMarkBadTrafficTarget_ConfigFailedToBeReady(t *testing.T) { + err := errUnreadyConfiguration(failedConfig) + r := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + + err.MarkBadTrafficTarget(&r.Status) + for _, condType := range []apis.ConditionType{ + v1alpha1.RouteConditionAllTrafficAssigned, + v1alpha1.RouteConditionReady, + } { + got := r.Status.GetCondition(condType) + want := &apis.Condition{ + Type: condType, + Status: corev1.ConditionFalse, + Reason: "RevisionMissing", + Message: `Configuration "failed-config" does not have any ready Revision.`, + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected condition diff (-want +got): %v", diff) + } + } +} + +func TestMarkBadTrafficTarget_RevisionFailedToBeReady(t *testing.T) { + err := errUnreadyRevision(failedRev) + r := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + + err.MarkBadTrafficTarget(&r.Status) + for _, condType := range []apis.ConditionType{ + v1alpha1.RouteConditionAllTrafficAssigned, + v1alpha1.RouteConditionReady, + } { + got := r.Status.GetCondition(condType) + want := &apis.Condition{ + Type: condType, + Status: corev1.ConditionFalse, + Reason: "RevisionMissing", + Message: `Revision "failed-revision" failed to become ready.`, + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected condition diff (-want +got): %v", diff) + } + } +} + +func TestIsFailure_RevFailedToBeReady(t *testing.T) { + err := errUnreadyRevision(failedRev) + want := true + if got := err.IsFailure(); got != want { + t.Errorf("wanted %v, got %v", want, got) + } +} + +func TestMarkBadTrafficTarget_RevisionNotYetReady(t *testing.T) { + err := errUnreadyRevision(unreadyRev) + r := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{})) + + err.MarkBadTrafficTarget(&r.Status) + for _, condType := range []apis.ConditionType{ + v1alpha1.RouteConditionAllTrafficAssigned, + v1alpha1.RouteConditionReady, + } { + got := r.Status.GetCondition(condType) + want := &apis.Condition{ + Type: condType, + Status: corev1.ConditionUnknown, + Reason: "RevisionMissing", + Message: `Revision "unready-revision" is not yet ready.`, + LastTransitionTime: got.LastTransitionTime, + Severity: apis.ConditionSeverityError, + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Unexpected condition diff (-want +got): %v", diff) + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go new file mode 100644 index 0000000000..84873e35bb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go @@ -0,0 +1,349 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package traffic + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/pkg/ptr" + net "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/route/domains" + "knative.dev/serving/pkg/reconciler/route/resources/labels" +) + +const ( + // DefaultTarget is the unnamed default target for the traffic. + DefaultTarget = "" +) + +// A RevisionTarget adds the Active/Inactive state and the transport protocol of a +// Revision to a flattened TrafficTarget. +type RevisionTarget struct { + v1.TrafficTarget + Active bool + Protocol net.ProtocolType + ServiceName string // Revision service name. +} + +// RevisionTargets is a collection of revision targets. +type RevisionTargets []RevisionTarget + +// Config encapsulates details of our traffic so that we don't need to make API calls, or use details of the +// route beyond its ObjectMeta to make routing changes. +type Config struct { + // Group of traffic splits. Un-named targets are grouped together + // under the key `DefaultTarget`, and named target are under the respective + // name. This is used to configure network configuration to + // realize a route's setting. + Targets map[string]RevisionTargets + + // A list traffic targets, flattened to the Revision level. This + // is used to populate the Route.Status.TrafficTarget field. + revisionTargets RevisionTargets + + // The referred `Configuration`s and `Revision`s. + Configurations map[string]*v1alpha1.Configuration + Revisions map[string]*v1alpha1.Revision + + // MissingTargets are references to Configuration's or Revision's + // that are missing + MissingTargets []corev1.ObjectReference +} + +// BuildTrafficConfiguration consolidates and flattens the Route.Spec.Traffic to the Revision-level. It also provides a +// complete lists of Configurations and Revisions referred by the Route, directly or indirectly. These referred targets +// are keyed by name for easy access. +// +// In the case that some target is missing, an error of type TargetError will be returned. +func BuildTrafficConfiguration(configLister listers.ConfigurationLister, revLister listers.RevisionLister, + r *v1alpha1.Route) (*Config, error) { + builder := newBuilder(configLister, revLister, r.Namespace, len(r.Spec.Traffic)) + builder.applySpecTraffic(r.Spec.Traffic) + return builder.build() +} + +// GetRevisionTrafficTargets returns a list of TrafficTarget flattened to the RevisionName, and having ConfigurationName cleared out. +func (t *Config) GetRevisionTrafficTargets(ctx context.Context, r *v1alpha1.Route, clusterLocalService sets.String) ([]v1alpha1.TrafficTarget, error) { + results := make([]v1alpha1.TrafficTarget, len(t.revisionTargets)) + for i, tt := range t.revisionTargets { + var pp *int64 + if tt.Percent != nil { + pp = ptr.Int64(*tt.Percent) + } + + // We cannot `DeepCopy` here, since tt.TrafficTarget might contain both + // configuration and revision. + results[i] = v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: tt.Tag, + RevisionName: tt.RevisionName, + Percent: pp, + LatestRevision: tt.LatestRevision, + }, + } + if tt.Tag != "" { + meta := r.ObjectMeta.DeepCopy() + + hostname, err := domains.HostnameFromTemplate(ctx, meta.Name, tt.Tag) + if err != nil { + return nil, err + } + + labels.SetVisibility(meta, clusterLocalService.Has(hostname)) + + // http is currently the only supported scheme + fullDomain, err := domains.DomainNameFromTemplate(ctx, *meta, hostname) + if err != nil { + return nil, err + } + results[i].URL = domains.URL(domains.HTTPScheme, fullDomain) + } + } + return results, nil +} + +type configBuilder struct { + configLister listers.ConfigurationLister + revLister listers.RevisionLister + namespace string + + // targets is a grouping of traffic targets serving the same origin. + targets map[string]RevisionTargets + + // revisionTargets is the original list of targets, at the Revision level. + revisionTargets RevisionTargets + + // configurations contains all the referred Configuration, keyed by their name. + configurations map[string]*v1alpha1.Configuration + // revisions contains all the referred Revision, keyed by their name. + revisions map[string]*v1alpha1.Revision + + // missingTargets is a collection of targets that weren't present + // in our listers + missingTargets []corev1.ObjectReference + + // TargetError are deferred until we got a complete list of all referred targets. + deferredTargetErr TargetError +} + +func newBuilder( + configLister listers.ConfigurationLister, revLister listers.RevisionLister, + namespace string, trafficSize int) *configBuilder { + return &configBuilder{ + configLister: configLister, + revLister: revLister, + namespace: namespace, + targets: make(map[string]RevisionTargets), + revisionTargets: make(RevisionTargets, 0, trafficSize), + + configurations: make(map[string]*v1alpha1.Configuration), + revisions: make(map[string]*v1alpha1.Revision), + } +} + +func (t *configBuilder) applySpecTraffic(traffic []v1alpha1.TrafficTarget) error { + for _, tt := range traffic { + if err := t.addTrafficTarget(&tt); err != nil { + // Other non-traffic target errors shouldn't be ignored. + return err + } + } + return nil +} + +func (t *configBuilder) getConfiguration(name string) (*v1alpha1.Configuration, error) { + if _, ok := t.configurations[name]; !ok { + config, err := t.configLister.Configurations(t.namespace).Get(name) + if errors.IsNotFound(err) { + return nil, errMissingConfiguration(name) + } else if err != nil { + return nil, err + } + t.configurations[name] = config + } + return t.configurations[name], nil +} + +func (t *configBuilder) getRevision(name string) (*v1alpha1.Revision, error) { + if _, ok := t.revisions[name]; !ok { + rev, err := t.revLister.Revisions(t.namespace).Get(name) + if errors.IsNotFound(err) { + return nil, errMissingRevision(name) + } else if err != nil { + return nil, err + } + t.revisions[name] = rev + } + return t.revisions[name], nil +} + +// deferTargetError will record a TargetError. A TargetError with +// IsFailure()=true will always overwrite a previous TargetError. +func (t *configBuilder) deferTargetError(err TargetError) { + if t.deferredTargetErr == nil || err.IsFailure() { + t.deferredTargetErr = err + } +} + +func (t *configBuilder) addTrafficTarget(tt *v1alpha1.TrafficTarget) error { + var err error + if tt.RevisionName != "" { + err = t.addRevisionTarget(tt) + } else if tt.ConfigurationName != "" { + err = t.addConfigurationTarget(tt) + } + if err, ok := err.(*missingTargetError); err != nil && ok { + apiVersion, kind := v1alpha1.SchemeGroupVersion. + WithKind(err.kind). + ToAPIVersionAndKind() + + t.missingTargets = append(t.missingTargets, corev1.ObjectReference{ + APIVersion: apiVersion, + Kind: kind, + Name: err.name, + Namespace: t.namespace, + }) + } + if err, ok := err.(TargetError); err != nil && ok { + // Defer target errors, as we still want to compile a list of + // all referred targets, including missing ones. + t.deferTargetError(err) + return nil + } + return err +} + +// addConfigurationTarget flattens a traffic target to the Revision level, by looking up for the LatestReadyRevisionName +// on the referred Configuration. It adds both to the lists of directly referred targets. +func (t *configBuilder) addConfigurationTarget(tt *v1alpha1.TrafficTarget) error { + config, err := t.getConfiguration(tt.ConfigurationName) + if err != nil { + return err + } + if config.Status.LatestReadyRevisionName == "" { + return errUnreadyConfiguration(config) + } + rev, err := t.getRevision(config.Status.LatestReadyRevisionName) + if err != nil { + return err + } + ntt := tt.TrafficTarget.DeepCopy() + target := RevisionTarget{ + TrafficTarget: *ntt, + Active: !rev.Status.IsActivationRequired(), + Protocol: rev.GetProtocol(), + ServiceName: rev.Status.ServiceName, + } + target.TrafficTarget.RevisionName = rev.Name + t.addFlattenedTarget(target) + return nil +} + +func (t *configBuilder) addRevisionTarget(tt *v1alpha1.TrafficTarget) error { + rev, err := t.getRevision(tt.RevisionName) + if err != nil { + return err + } + if !rev.Status.IsReady() { + return errUnreadyRevision(rev) + } + ntt := tt.TrafficTarget.DeepCopy() + target := RevisionTarget{ + TrafficTarget: *ntt, + Active: !rev.Status.IsActivationRequired(), + Protocol: rev.GetProtocol(), + ServiceName: rev.Status.ServiceName, + } + if configName, ok := rev.Labels[serving.ConfigurationLabelKey]; ok { + target.TrafficTarget.ConfigurationName = configName + if _, err := t.getConfiguration(configName); err != nil { + return err + } + } + t.addFlattenedTarget(target) + return nil +} + +func (t *configBuilder) addFlattenedTarget(target RevisionTarget) { + name := target.TrafficTarget.Tag + t.revisionTargets = append(t.revisionTargets, target) + t.targets[DefaultTarget] = append(t.targets[DefaultTarget], target) + if name != "" { + t.targets[name] = append(t.targets[name], target) + } +} + +func consolidate(targets RevisionTargets) RevisionTargets { + byName := make(map[string]RevisionTarget) + names := []string{} + for _, tt := range targets { + name := tt.TrafficTarget.RevisionName + cur, ok := byName[name] + if !ok { + byName[name] = tt + names = append(names, name) + } else { + if tt.TrafficTarget.Percent != nil { + current := int64(0) + if cur.TrafficTarget.Percent != nil { + current += *cur.TrafficTarget.Percent + } + current += *tt.TrafficTarget.Percent + cur.TrafficTarget.Percent = ptr.Int64(current) + } + byName[name] = cur + } + } + consolidated := make([]RevisionTarget, len(names)) + for i, name := range names { + consolidated[i] = byName[name] + } + if len(consolidated) == 1 { + consolidated[0].TrafficTarget.Percent = ptr.Int64(100) + } + return consolidated +} + +func consolidateAll(targets map[string]RevisionTargets) map[string]RevisionTargets { + consolidated := make(map[string]RevisionTargets) + for name, tts := range targets { + consolidated[name] = consolidate(tts) + } + return consolidated +} + +func (t *configBuilder) build() (*Config, error) { + if t.deferredTargetErr != nil { + t.targets = nil + t.revisionTargets = nil + } + return &Config{ + Targets: consolidateAll(t.targets), + revisionTargets: t.revisionTargets, + Configurations: t.configurations, + Revisions: t.revisions, + MissingTargets: t.missingTargets, + }, t.deferredTargetErr +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic_test.go new file mode 100644 index 0000000000..0a24d75e58 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic_test.go @@ -0,0 +1,1137 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package traffic + +import ( + "context" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/pkg/ptr" + net "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakeclientset "knative.dev/serving/pkg/client/clientset/versioned/fake" + informers "knative.dev/serving/pkg/client/informers/externalversions" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/gc" + "knative.dev/serving/pkg/network" + "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/domains" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const testNamespace string = "test" + +// A simple fixed Configuration/Revision layout for testing. +// Tests should not modify these objects. +var ( + // These are objects never inserted. + missingConfig *v1alpha1.Configuration + missingRev *v1alpha1.Revision + + // emptyConfig never has any revision. + emptyConfig *v1alpha1.Configuration + + // revDeletedConfig has a Ready revision but was deleted. + revDeletedConfig *v1alpha1.Configuration + + // unreadyConfig only has unreadyRev, and it's not ready. + unreadyConfig *v1alpha1.Configuration + unreadyRev *v1alpha1.Revision + + // failedConfig only has failedRev, and it fails to be ready. + failedConfig *v1alpha1.Configuration + failedRev *v1alpha1.Revision + + // inactiveConfig only has inactiveRevision, and it's not active. + inactiveConfig *v1alpha1.Configuration + inactiveRev *v1alpha1.Revision + + // goodConfig has two good revisions: goodOldRev and goodNewRev + goodConfig *v1alpha1.Configuration + goodOldRev *v1alpha1.Revision + goodNewRev *v1alpha1.Revision + + // niceConfig has two good revisions: niceOldRev and niceNewRev + niceConfig *v1alpha1.Configuration + niceOldRev *v1alpha1.Revision + niceNewRev *v1alpha1.Revision + + configLister listers.ConfigurationLister + revLister listers.RevisionLister + + cmpOpts = []cmp.Option{cmp.AllowUnexported(Config{})} +) + +func setUp() { + emptyConfig = getTestEmptyConfig("empty") + revDeletedConfig = testConfigWithDeletedRevision("latest-rev-deleted") + unreadyConfig, unreadyRev = getTestUnreadyConfig("unready") + failedConfig, failedRev = getTestFailedConfig("failed") + inactiveConfig, inactiveRev = getTestInactiveConfig("inactive") + goodConfig, goodOldRev, goodNewRev = getTestReadyConfig("good") + niceConfig, niceOldRev, niceNewRev = getTestReadyConfig("nice") + servingClient := fakeclientset.NewSimpleClientset() + + servingInformer := informers.NewSharedInformerFactory(servingClient, 0) + configInformer := servingInformer.Serving().V1alpha1().Configurations() + configLister = configInformer.Lister() + revInformer := servingInformer.Serving().V1alpha1().Revisions() + revLister = revInformer.Lister() + + // Add these test objects to the informers. + objs := []runtime.Object{ + unreadyConfig, unreadyRev, + failedConfig, failedRev, + inactiveConfig, inactiveRev, + revDeletedConfig, + emptyConfig, + goodConfig, goodOldRev, goodNewRev, + niceConfig, niceOldRev, niceNewRev, + } + + for _, obj := range objs { + switch o := obj.(type) { + case *v1alpha1.Configuration: + configInformer.Informer().GetIndexer().Add(o) + case *v1alpha1.Revision: + revInformer.Informer().GetIndexer().Add(o) + } + } + + missingConfig, missingRev = getTestUnreadyConfig("missing") +} + +// The vanilla use case of 100% directing to latest ready revision of a single configuration. +func TestBuildTrafficConfiguration_Vanilla(t *testing.T) { + tts := v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + Percent: ptr.Int64(100), + }, + } + + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodNewRev.Name: goodNewRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(tts))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func testRouteWithTrafficTargets(trafficTarget RouteOption) *v1alpha1.Route { + return Route(testNamespace, "test-route", WithRouteLabel(map[string]string{"route": "test-route"}), trafficTarget) +} + +func TestBuildTrafficConfiguration_NoNameRevision(t *testing.T) { + tts := v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + }, + } + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodNewRev.Name, + ConfigurationName: goodConfig.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{goodConfig.Name: goodConfig}, + Revisions: map[string]*v1alpha1.Revision{goodNewRev.Name: goodNewRev}, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(tts))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// The vanilla use case of 100% directing to latest revision of an inactive configuration. +func TestBuildTrafficConfiguration_VanillaScaledToZero(t *testing.T) { + tts := v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: inactiveConfig.Name, + Percent: ptr.Int64(100), + }, + } + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: inactiveConfig.Name, + RevisionName: inactiveRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + Active: false, + Protocol: net.ProtocolHTTP1, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: inactiveConfig.Name, + RevisionName: inactiveRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + Active: false, + Protocol: net.ProtocolHTTP1, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + inactiveConfig.Name: inactiveConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + inactiveRev.Name: inactiveRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(tts))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// Transitioning from one good config to another by splitting traffic. +func TestBuildTrafficConfiguration_TwoConfigs(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + niceConfig.Name: niceConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodNewRev.Name: goodNewRev, + niceNewRev.Name: niceNewRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: niceConfig.Name, + Percent: ptr.Int64(90), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + Percent: ptr.Int64(10), + }, + }))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// Splitting traffic between a fixed revision and the latest revision (canary). +func TestBuildTrafficConfiguration_Canary(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodOldRev.Name: goodOldRev, + goodNewRev.Name: goodNewRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(90), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + Percent: ptr.Int64(10), + }, + }))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// Splitting traffic between latest revision and a fixed revision which is also latest. +func TestBuildTrafficConfiguration_Consolidated(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + Tag: "one", + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(49), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "two", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(51), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + "one": {{ + TrafficTarget: v1.TrafficTarget{ + Tag: "one", + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }}, + "two": {{ + TrafficTarget: v1.TrafficTarget{ + Tag: "two", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + "also-two": {{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-two", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "one", + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(49), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "two", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "also-two", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(1), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodOldRev.Name: goodOldRev, + goodNewRev.Name: goodNewRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "one", + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(49), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "two", + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "also-two", + ConfigurationName: goodConfig.Name, + Percent: ptr.Int64(1), + }, + }))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// Splitting traffic between a two fixed revisions. +func TestBuildTrafficConfiguration_TwoFixedRevisions(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(90), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodNewRev.Name: goodNewRev, + goodOldRev.Name: goodOldRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(90), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(10), + }, + }))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// Splitting traffic between a two fixed revisions of two configurations. +func TestBuildTrafficConfiguration_TwoFixedRevisionsFromTwoConfigurations(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(40), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + Percent: ptr.Int64(60), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(40), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + Percent: ptr.Int64(60), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + niceConfig.Name: niceConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodNewRev.Name: goodNewRev, + niceNewRev.Name: niceNewRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(40), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: niceNewRev.Name, + Percent: ptr.Int64(60), + }, + }))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +// One fixed, two named targets for newer stuffs. +func TestBuildTrafficConfiguration_Preliminary(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{ + DefaultTarget: {{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + "beta": {{ + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + "alpha": {{ + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + }, + revisionTargets: []RevisionTarget{{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: goodConfig.Name, + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolHTTP1, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + ConfigurationName: goodConfig.Name, + RevisionName: goodNewRev.Name, + LatestRevision: ptr.Bool(false), + }, + Active: true, + Protocol: net.ProtocolH2C, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + ConfigurationName: niceConfig.Name, + RevisionName: niceNewRev.Name, + LatestRevision: ptr.Bool(true), + }, + Active: true, + Protocol: net.ProtocolH2C, + }}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + niceConfig.Name: niceConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodOldRev.Name: goodOldRev, + goodNewRev.Name: goodNewRev, + niceNewRev.Name: niceNewRev, + }, + } + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + RevisionName: goodNewRev.Name, + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + ConfigurationName: niceConfig.Name, + }, + }))); err != nil { + t.Errorf("Unexpected error %v", err) + } else if want, got := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_MissingConfig(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{ + goodConfig.Name: goodConfig, + }, + Revisions: map[string]*v1alpha1.Revision{ + goodOldRev.Name: goodOldRev, + goodNewRev.Name: goodNewRev, + }, + MissingTargets: []corev1.ObjectReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Configuration", + Name: missingConfig.Name, + Namespace: missingConfig.Namespace, + }}, + } + + expectedErr := errMissingConfiguration(missingConfig.Name) + r := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + RevisionName: goodNewRev.Name, + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + ConfigurationName: missingConfig.Name, + }, + })) + if tc, err := BuildTrafficConfiguration(configLister, revLister, r); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected %v, saw %v", expectedErr, err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_NotRoutableRevision(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{}, + Revisions: map[string]*v1alpha1.Revision{unreadyRev.Name: unreadyRev}, + } + expectedErr := errUnreadyRevision(unreadyRev) + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: unreadyRev.Name, + Percent: ptr.Int64(100), + }, + }))); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected error %v, saw %v", expectedErr, err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_NotRoutableConfiguration(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{unreadyConfig.Name: unreadyConfig}, + Revisions: map[string]*v1alpha1.Revision{}, + } + expectedErr := errUnreadyConfiguration(unreadyConfig) + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: unreadyConfig.Name, + Percent: ptr.Int64(100), + }, + }))); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected error %v, saw %v", expectedErr, err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_EmptyConfiguration(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{ + emptyConfig.Name: emptyConfig, + }, + Revisions: map[string]*v1alpha1.Revision{}, + } + + expectedErr := errUnreadyConfiguration(emptyConfig) + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: emptyConfig.Name, + Percent: ptr.Int64(100), + }, + }))); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected error %v, saw %v", expectedErr, err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_EmptyAndFailedConfigurations(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{ + emptyConfig.Name: emptyConfig, + failedConfig.Name: failedConfig, + }, + Revisions: map[string]*v1alpha1.Revision{}, + } + expectedErr := errUnreadyConfiguration(failedConfig) + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: emptyConfig.Name, + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: failedConfig.Name, + Percent: ptr.Int64(50), + }, + }))); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected error %v, saw %v", expectedErr, err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_FailedAndEmptyConfigurations(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{ + emptyConfig.Name: emptyConfig, + failedConfig.Name: failedConfig, + }, + Revisions: map[string]*v1alpha1.Revision{}, + } + expectedErr := errUnreadyConfiguration(failedConfig) + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: failedConfig.Name, + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: emptyConfig.Name, + Percent: ptr.Int64(50), + }, + }))); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected error %v, saw %v", expectedErr, err) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestBuildTrafficConfiguration_MissingRevision(t *testing.T) { + expected := &Config{ + Targets: map[string]RevisionTargets{}, + Configurations: map[string]*v1alpha1.Configuration{goodConfig.Name: goodConfig}, + Revisions: map[string]*v1alpha1.Revision{goodNewRev.Name: goodNewRev}, + MissingTargets: []corev1.ObjectReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Revision", + Name: missingRev.Name, + Namespace: missingRev.Namespace, + }}, + } + expectedErr := errMissingRevision(missingRev.Name) + if tc, err := BuildTrafficConfiguration(configLister, revLister, testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: missingRev.Name, + Percent: ptr.Int64(50), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodNewRev.Name, + Percent: ptr.Int64(50), + }, + }))); err != nil && expectedErr.Error() != err.Error() { + t.Errorf("Expected %s, saw %s", expectedErr.Error(), err.Error()) + } else if got, want := tc, expected; !cmp.Equal(want, got, cmpOpts...) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got, cmpOpts...)) + } +} + +func TestRoundTripping(t *testing.T) { + expected := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + RevisionName: goodNewRev.Name, + URL: domains.URL(domains.HTTPScheme, "beta-test-route.test.example.com"), + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + RevisionName: niceNewRev.Name, + URL: domains.URL(domains.HTTPScheme, "alpha-test-route.test.example.com"), + LatestRevision: ptr.Bool(true), + }, + }} + route := testRouteWithTrafficTargets(WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: goodOldRev.Name, + Percent: ptr.Int64(100), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "beta", + RevisionName: goodNewRev.Name, + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "alpha", + ConfigurationName: niceConfig.Name, + }, + })) + if tc, err := BuildTrafficConfiguration(configLister, revLister, route); err != nil { + t.Errorf("Unexpected error %v", err) + } else { + targets, err := tc.GetRevisionTrafficTargets(getContext(), route, sets.String{}) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if want, got := expected, targets; !cmp.Equal(want, got) { + t.Errorf("Unexpected traffic diff (-want +got): %v", cmp.Diff(want, got)) + } + } +} + +func testConfig(name string) *v1alpha1.Configuration { + return &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + }, + Spec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "test-image", + }}, + }, + }, + }, + }, + }, + } +} + +func testRevForConfig(config *v1alpha1.Configuration, name string) *v1alpha1.Revision { + return &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: testNamespace, + Labels: map[string]string{ + serving.ConfigurationLabelKey: config.Name, + }, + }, + Spec: *config.Spec.GetTemplate().Spec.DeepCopy(), + } +} + +func getTestEmptyConfig(name string) *v1alpha1.Configuration { + config := testConfig(name + "-config") + config.Status.InitializeConditions() + return config +} + +func testConfigWithDeletedRevision(name string) *v1alpha1.Configuration { + config := testConfig(name + "-config") + config.Status.SetLatestCreatedRevisionName("i-was-deleted") + config.Status.SetLatestReadyRevisionName("") + config.Status.MarkLatestReadyDeleted() + return config +} + +func getTestUnreadyConfig(name string) (*v1alpha1.Configuration, *v1alpha1.Revision) { + config := testConfig(name + "-config") + rev := testRevForConfig(config, name+"-revision") + config.Status.SetLatestCreatedRevisionName(rev.Name) + return config, rev +} + +func getTestFailedConfig(name string) (*v1alpha1.Configuration, *v1alpha1.Revision) { + config := testConfig(name + "-config") + rev := testRevForConfig(config, name+"-revision") + config.Status.SetLatestCreatedRevisionName(rev.Name) + config.Status.MarkLatestCreatedFailed(rev.Name, "Permanently failed") + rev.Status.MarkContainerHealthyFalse(v1alpha1.ContainerMissing, "Should have used ko") + return config, rev +} + +func getTestInactiveConfig(name string) (*v1alpha1.Configuration, *v1alpha1.Revision) { + config := testConfig(name + "-config") + rev := testRevForConfig(config, name+"-revision") + config.Status.SetLatestReadyRevisionName(rev.Name) + config.Status.SetLatestCreatedRevisionName(rev.Name) + rev.Status.InitializeConditions() + rev.Status.MarkActiveFalse("Reserve", "blah blah blah") + return config, rev +} + +func getTestReadyConfig(name string) (*v1alpha1.Configuration, *v1alpha1.Revision, *v1alpha1.Revision) { + config := testConfig(name + "-config") + rev1 := testRevForConfig(config, name+"-revision-1") + rev1.Status.MarkResourcesAvailableTrue() + rev1.Status.MarkContainerHealthyTrue() + rev1.Status.MarkActiveTrue() + + // rev1 will use http1, rev2 will use h2c + config.Spec.GetTemplate().Spec.GetContainer().Ports = []corev1.ContainerPort{{ + Name: "h2c", + }} + + rev2 := testRevForConfig(config, name+"-revision-2") + rev2.Status.MarkResourcesAvailableTrue() + rev2.Status.MarkContainerHealthyTrue() + rev2.Status.MarkActiveTrue() + config.Status.SetLatestReadyRevisionName(rev2.Name) + config.Status.SetLatestCreatedRevisionName(rev2.Name) + return config, rev1, rev2 +} + +func TestMain(m *testing.M) { + setUp() + os.Exit(m.Run()) +} + +func getContext() context.Context { + ctx := context.Background() + cfg := testNetworkConfig() + return config.ToContext(ctx, cfg) +} + +func testNetworkConfig() *config.Config { + return &config.Config{ + Domain: &config.Domain{ + Domains: map[string]*config.LabelSelector{ + "example.com": {}, + "another-example.com": { + Selector: map[string]string{"app": "prod"}, + }, + }, + }, + Network: &network.Config{ + DefaultIngressClass: "test-ingress-class", + DomainTemplate: network.DefaultDomainTemplate, + TagTemplate: network.DefaultTagTemplate, + }, + GC: &gc.Config{ + StaleRevisionLastpinnedDebounce: time.Duration(1 * time.Minute), + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go new file mode 100644 index 0000000000..27e0b7ffa8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverlessservice + +import ( + "context" + + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + serviceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/service" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + sksinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice" + pkgreconciler "knative.dev/serving/pkg/reconciler" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +const ( + controllerAgentName = "serverlessservice-controller" +) + +// NewController initializes the controller and is called by the generated code. +// Registers eventhandlers to enqueue events. +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + serviceInformer := serviceinformer.Get(ctx) + endpointsInformer := endpointsinformer.Get(ctx) + sksInformer := sksinformer.Get(ctx) + + c := &reconciler{ + Base: pkgreconciler.NewBase(ctx, controllerAgentName, cmw), + endpointsLister: endpointsInformer.Lister(), + serviceLister: serviceInformer.Lister(), + sksLister: sksInformer.Lister(), + psInformerFactory: podscalable.Get(ctx), + } + impl := controller.NewImpl(c, c.Logger, reconcilerName) + + c.Logger.Info("Setting up event handlers") + + // Watch all the SKS objects. + sksInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + // Watch all the endpoints that we have attached our label to. + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: pkgreconciler.LabelExistsFilterFunc(networking.SKSLabelKey), + Handler: controller.HandleAll(impl.EnqueueLabelOfNamespaceScopedResource("" /*any namespace*/, networking.SKSLabelKey)), + }) + + // Watch all the services that we have created. + serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(netv1alpha1.SchemeGroupVersion.WithKind("ServerlessService")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + // Watch activator-service endpoints. + grCb := func(obj interface{}) { + // Since changes in the Activator Service endpoints affect all the SKS objects, + // do a global resync. + c.Logger.Info("Doing a global resync due to activator endpoint changes") + impl.GlobalResync(sksInformer.Informer()) + } + endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + // Accept only ActivatorService K8s service objects. + FilterFunc: pkgreconciler.ChainFilterFuncs( + pkgreconciler.NamespaceFilterFunc(system.Namespace()), + pkgreconciler.NameFilterFunc(networking.ActivatorServiceName)), + Handler: controller.HandleAll(grCb), + }) + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/global_resync_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/global_resync_test.go new file mode 100644 index 0000000000..23d57b103c --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/global_resync_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverlessservice + +import ( + "testing" + "time" + + "golang.org/x/sync/errgroup" + + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + fakedynamicclient "knative.dev/pkg/injection/clients/dynamicclient/fake" + "knative.dev/serving/pkg/apis/networking" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing" +) + +func TestGlobalResyncOnActivatorChange(t *testing.T) { + const ( + ns1 = "test-ns1" + ns2 = "test-ns2" + sks1 = "test-sks-1" + sks2 = "test-sks-2" + ) + ctx, cancel, informers := SetupFakeContextWithCancel(t) + // Replace the fake dynamic client with one containing our objects. + ctx, _ = fakedynamicclient.With(ctx, runtime.NewScheme(), + ToUnstructured(t, NewScheme(), []runtime.Object{deploy(ns1, sks1), deploy(ns2, sks2)})..., + ) + ctx = podscalable.WithDuck(ctx) + ctrl := NewController(ctx, configmap.NewStaticWatcher()) + + grp := errgroup.Group{} + + kubeClnt := fakekubeclient.Get(ctx) + + // Create activator endpoints. + aEps := activatorEndpoints(WithSubsets) + if _, err := kubeClnt.CoreV1().Endpoints(aEps.Namespace).Create(aEps); err != nil { + t.Fatalf("Error creating activator endpoints: %v", err) + } + + // Private endpoints are supposed to exist, since we're using selector mode for the service. + privEps := endpointspriv(ns1, sks1) + if _, err := kubeClnt.CoreV1().Endpoints(privEps.Namespace).Create(privEps); err != nil { + t.Fatalf("Error creating private endpoints: %v", err) + } + // This is passive, so no endpoints. + privEps = endpointspriv(ns2, sks2, withOtherSubsets) + if _, err := kubeClnt.CoreV1().Endpoints(privEps.Namespace).Create(privEps); err != nil { + t.Fatalf("Error creating private endpoints: %v", err) + } + + waitInformers, err := controller.RunInformers(ctx.Done(), informers...) + if err != nil { + t.Fatalf("Error starting informers: %v", err) + } + defer func() { + cancel() + if err := grp.Wait(); err != nil { + t.Fatalf("Error waiting for contoller to terminate: %v", err) + } + waitInformers() + }() + + grp.Go(func() error { + return ctrl.Run(1, ctx.Done()) + }) + + numServices, numEndpoints := 0, 0 + hooks := NewHooks() + hooks.OnCreate(&kubeClnt.Fake, "endpoints", func(obj runtime.Object) HookResult { + t.Logf("Registered creation of endpoints: %#v", obj) + // We are waiting for creation of two endpoints objects. + numEndpoints++ + if numEndpoints == 2 { + return HookComplete + } + return HookIncomplete + }) + hooks.OnCreate(&kubeClnt.Fake, "services", func(obj runtime.Object) HookResult { + t.Logf("Registered creation of services: %#v", obj) + numServices++ + // We need to wait for creation of 2x2 K8s services. + if numServices == 4 { + return HookComplete + } + return HookIncomplete + }) + + // Inactive, will reconcile. + sksObj1 := SKS(ns1, sks1, WithPrivateService, WithPubService, WithDeployRef(sks1), WithProxyMode) + // Active, should not visibly reconcile. + sksObj2 := SKS(ns2, sks2, WithPrivateService, WithPubService, WithDeployRef(sks2), markHappy) + + if _, err := fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(ns1).Create(sksObj1); err != nil { + t.Fatalf("Error creating SKS1: %v", err) + } + if _, err := fakeservingclient.Get(ctx).NetworkingV1alpha1().ServerlessServices(ns2).Create(sksObj2); err != nil { + t.Fatalf("Error creating SKS2: %v", err) + } + if err := hooks.WaitForHooks(3 * time.Second); err != nil { + t.Fatalf("Error creating preliminary objects: %v", err) + } + + t.Log("Updating the activator endpoints now...") + // Now that we have established the baseline, update the activator endpoints. + // Reset the hooks. + hooks = NewHooks() + hooks.OnUpdate(&kubeClnt.Fake, "endpoints", func(obj runtime.Object) HookResult { + eps := obj.(*corev1.Endpoints) + if eps.Name == sks1 { + t.Logf("Registering expected hook update for endpoints %s", eps.Name) + return HookComplete + } + if eps.Name == networking.ActivatorServiceName { + // Expected, but not the one we're waiting for. + t.Log("Registering activator endpoint update") + } else { + // Somethings broken. + t.Errorf("Unexpected endpoint update for %s", eps.Name) + } + return HookIncomplete + }) + + aEps = activatorEndpoints(withOtherSubsets) + if _, err := kubeClnt.CoreV1().Endpoints(aEps.Namespace).Update(aEps); err != nil { + t.Fatalf("Error creating activator endpoints: %v", err) + } + + if err := hooks.WaitForHooks(3 * time.Second); err != nil { + t.Fatalf("Hooks timed out: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go new file mode 100644 index 0000000000..7ddf76161d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/resources" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// targetPort chooses the target (pod) port for the public and private service. +func targetPort(sks *v1alpha1.ServerlessService) intstr.IntOrString { + if sks.Spec.ProtocolType == networking.ProtocolH2C { + return intstr.FromInt(networking.BackendHTTP2Port) + } + return intstr.FromInt(networking.BackendHTTPPort) +} + +// MakePublicService constructs a K8s Service that is not backed a selector +// and will be manually reconciled by the SKS controller. +func MakePublicService(sks *v1alpha1.ServerlessService) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: sks.Name, + Namespace: sks.Namespace, + Labels: resources.UnionMaps(sks.GetLabels(), map[string]string{ + // Add our own special key. + networking.SKSLabelKey: sks.Name, + networking.ServiceTypeKey: string(networking.ServiceTypePublic), + }), + Annotations: resources.CopyMap(sks.GetAnnotations()), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(sks)}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortName(sks.Spec.ProtocolType), + Protocol: corev1.ProtocolTCP, + Port: int32(networking.ServicePort(sks.Spec.ProtocolType)), + TargetPort: targetPort(sks), + }}, + }, + } +} + +// MakePublicEndpoints constructs a K8s Endpoints that is not backed a selector +// and will be manually reconciled by the SKS controller. +func MakePublicEndpoints(sks *v1alpha1.ServerlessService, src *corev1.Endpoints) *corev1.Endpoints { + return &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: sks.Name, // Name of Endpoints must match that of Service. + Namespace: sks.Namespace, + Labels: resources.UnionMaps(sks.GetLabels(), map[string]string{ + // Add our own special key. + networking.SKSLabelKey: sks.Name, + networking.ServiceTypeKey: string(networking.ServiceTypePublic), + }), + Annotations: resources.CopyMap(sks.GetAnnotations()), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(sks)}, + }, + Subsets: FilterSubsetPorts(sks, src.Subsets), + } +} + +// FilterSubsetPorts makes a copy of the ep.Subsets, filtering out ports +// that are not serving (e.g. 8012 for HTTP). +func FilterSubsetPorts(sks *v1alpha1.ServerlessService, subsets []corev1.EndpointSubset) []corev1.EndpointSubset { + targetPort := targetPort(sks).IntVal + return filterSubsetPorts(targetPort, subsets) +} + +// filterSubsetPorts internal implementation that takes in port. +// Those are not arbitrary endpoints, but the endpoints we construct ourselves, +// thus we know that at least one of the ports will always match. +func filterSubsetPorts(targetPort int32, subsets []corev1.EndpointSubset) []corev1.EndpointSubset { + if len(subsets) == 0 { + return nil + } + ret := make([]corev1.EndpointSubset, len(subsets)) + for i, sss := range subsets { + sst := sss.DeepCopy() + // Find the port we care about and remove all others. + for j, p := range sst.Ports { + if p.Port == targetPort { + sst.Ports = sst.Ports[j : j+1] + break + } + } + ret[i] = *sst + } + return ret +} + +// MakePrivateService constructs a K8s service, that is backed by the pod selector +// matching pods created by the revision. +func MakePrivateService(sks *v1alpha1.ServerlessService, selector map[string]string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: kmeta.ChildName(sks.Name, "-private"), + Namespace: sks.Namespace, + Labels: resources.UnionMaps(sks.GetLabels(), map[string]string{ + // Add our own special key. + networking.SKSLabelKey: sks.Name, + networking.ServiceTypeKey: string(networking.ServiceTypePrivate), + }), + Annotations: resources.CopyMap(sks.GetAnnotations()), + OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(sks)}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortName(sks.Spec.ProtocolType), + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTPPort, + // This one is matching the public one, since this is the + // port queue-proxy listens on. + TargetPort: targetPort(sks), + }, { + Name: servingv1alpha1.AutoscalingQueueMetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.AutoscalingQueueMetricsPort, + TargetPort: intstr.FromString(servingv1alpha1.AutoscalingQueueMetricsPortName), + }, { + Name: servingv1alpha1.UserQueueMetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.UserQueueMetricsPort, + TargetPort: intstr.FromString(servingv1alpha1.UserQueueMetricsPortName), + }, { + // When run with the Istio mesh, Envoy blocks traffic to any ports not + // recognized, and has special treatment for probes, but not PreStop hooks. + // That results in the PreStop hook /wait-for-drain in queue-proxy not + // reachable, thus triggering SIGTERM immediately during shutdown and + // causing requests to be dropped. + // + // So we expose this port here to work around this Istio bug. + Name: servingv1alpha1.QueueAdminPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.QueueAdminPort, + TargetPort: intstr.FromInt(networking.QueueAdminPort), + }}, + Selector: selector, + }, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services_test.go new file mode 100644 index 0000000000..91b0cc5bd0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services_test.go @@ -0,0 +1,698 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + servingv1alpha1 "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +var ( + goodPod = "good-pod" + badPod = "bad-pod" +) + +// TODO(vagababov): Add templating here to get rid of the boilerplate. +func TestMakePublicService(t *testing.T) { + tests := []struct { + name string + sks *v1alpha1.ServerlessService + want *corev1.Service + }{{ + name: "HTTP - serve", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + UID: "1982", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolHTTP1, + Mode: v1alpha1.SKSOperationModeServe, + }, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + networking.SKSLabelKey: "collie", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "collie", + UID: "1982", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTPPort, + TargetPort: intstr.FromInt(networking.BackendHTTPPort), + }}, + }, + }, + }, { + name: "HTTP - proxy", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + UID: "1982", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + Mode: v1alpha1.SKSOperationModeProxy, + ProtocolType: networking.ProtocolHTTP1, + }, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + networking.SKSLabelKey: "collie", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "collie", + UID: "1982", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTPPort, + TargetPort: intstr.FromInt(networking.BackendHTTPPort), + }}, + }, + }, + }, { + name: "HTTP2 - serve", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream", + UID: "1988", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolH2C, + Mode: v1alpha1.SKSOperationModeServe, + }, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + networking.SKSLabelKey: "dream", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "dream", + UID: "1988", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameH2C, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTP2Port, + TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }}, + }, + }, + }, { + name: "HTTP2 - serve - no backends", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream", + UID: "1988", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolH2C, + Mode: v1alpha1.SKSOperationModeServe, + }, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + networking.SKSLabelKey: "dream", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "dream", + UID: "1988", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameH2C, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTP2Port, + TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }}, + }, + }, + }, { + name: "HTTP2 - proxy", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream", + UID: "1988", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolH2C, + Mode: v1alpha1.SKSOperationModeProxy, + }, + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + networking.SKSLabelKey: "dream", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "dream", + UID: "1988", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameH2C, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTP2Port, + TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MakePublicService(test.sks) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("Public K8s Service mismatch (-want, +got) = %v", diff) + } + }) + } +} + +func TestMakeEndpoints(t *testing.T) { + tests := []struct { + name string + sks *v1alpha1.ServerlessService + eps *corev1.Endpoints + want *corev1.Endpoints + }{{ + name: "empty source", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + UID: "1982", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolHTTP1, + Mode: v1alpha1.SKSOperationModeServe, + }, + }, + eps: &corev1.Endpoints{}, + want: &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + networking.SKSLabelKey: "collie", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "collie", + UID: "1982", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + }, + }, { + name: "some endpoints, many ports", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + UID: "1982", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolHTTP1, + }, + }, + eps: &corev1.Endpoints{ + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: "192.168.1.1", + NodeName: &goodPod, + }, { + IP: "10.5.6.21", + NodeName: &badPod, + }}, + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 8022, + Protocol: "TCP", + }, { + Name: "http", + Port: 8012, + Protocol: "TCP", + }, { + Name: "https", + Port: 8043, + Protocol: "TCP", + }}, + }}, + }, + want: &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + networking.SKSLabelKey: "collie", + networking.ServiceTypeKey: "Public", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "collie", + UID: "1982", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Subsets: []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: "192.168.1.1", + NodeName: &goodPod, + }, { + IP: "10.5.6.21", + NodeName: &badPod, + }}, + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 8012, + Protocol: "TCP", + }}, + }}, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MakePublicEndpoints(test.sks, test.eps) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("Public K8s Endpoints mismatch (-want, +got) = %v", diff) + } + }) + } +} + +func TestFilterSubsetPorts(t *testing.T) { + tests := []struct { + name string + port int32 + subsets []corev1.EndpointSubset + want []corev1.EndpointSubset + }{{ + name: "nil", + port: 1982, + }, { + name: "one port", + port: 1984, + subsets: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 1984, + Protocol: "TCP", + }}, + }}, + want: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 1984, + Protocol: "TCP", + }}, + }}, + }, { + name: "two ports, keep first", + port: 1988, + subsets: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 1988, + Protocol: "TCP", + }, { + Name: "http", + Port: 1983, + Protocol: "TCP", + }}, + }}, + want: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 1988, + Protocol: "TCP", + }}, + }}, + }, { + name: "three ports, keep middle", + port: 2006, + subsets: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 2009, + Protocol: "TCP", + }, { + Name: "http", + Port: 2006, + Protocol: "TCP", + }, { + Name: "http", + Port: 2019, + Protocol: "TCP", + }}, + }}, + want: []corev1.EndpointSubset{{ + Ports: []corev1.EndpointPort{{ + Name: "http", + Port: 2006, + Protocol: "TCP", + }}, + }}, + }} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got, want := filterSubsetPorts(test.port, test.subsets), test.want; !cmp.Equal(got, want) { + t.Errorf("Got = %v, want: %v, diff:\n%s", got, want, cmp.Diff(want, got)) + } + }) + } +} + +func TestMakePrivateService(t *testing.T) { + tests := []struct { + name string + sks *v1alpha1.ServerlessService + selector map[string]string + want *corev1.Service + }{{ + name: "HTTP", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie", + UID: "1982", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolHTTP1, + // To make sure this does not affect private service in any way. + Mode: v1alpha1.SKSOperationModeProxy, + }, + }, + selector: map[string]string{ + "app": "sadness", + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "melon", + Name: "collie-private", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "collie", + serving.RevisionUID: "1982", + networking.SKSLabelKey: "collie", + networking.ServiceTypeKey: "Private", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "collie", + UID: "1982", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "sadness", + }, + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTPPort, + TargetPort: intstr.FromInt(networking.BackendHTTPPort), + }, { + Name: servingv1alpha1.AutoscalingQueueMetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.AutoscalingQueueMetricsPort, + TargetPort: intstr.FromString(servingv1alpha1.AutoscalingQueueMetricsPortName), + }, { + Name: servingv1alpha1.UserQueueMetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.UserQueueMetricsPort, + TargetPort: intstr.FromString(servingv1alpha1.UserQueueMetricsPortName), + }, { + Name: servingv1alpha1.QueueAdminPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.QueueAdminPort, + TargetPort: intstr.FromInt(networking.QueueAdminPort), + }}, + }, + }, + }, { + name: "HTTP2 and long", + sks: &v1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream-tonight-cherub-rock-mayonaise-hummer-disarm-rocket-soma-quiet", + UID: "1988", + // Those labels are propagated from the Revision->PA. + Labels: map[string]string{ + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + }, + Spec: v1alpha1.ServerlessServiceSpec{ + ProtocolType: networking.ProtocolH2C, + }, + }, + selector: map[string]string{ + "app": "today", + }, + want: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "siamese", + Name: "dream-tonight-cherub-ro9598b55360c44122a4442ce54caa8619-private", + Labels: map[string]string{ + // Those should be propagated. + serving.RevisionLabelKey: "dream", + serving.RevisionUID: "1988", + networking.SKSLabelKey: "dream-tonight-cherub-rock-mayonaise-hummer-disarm-rocket-soma-quiet", + networking.ServiceTypeKey: "Private", + }, + Annotations: map[string]string{ + "cherub": "rock", + }, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "ServerlessService", + Name: "dream-tonight-cherub-rock-mayonaise-hummer-disarm-rocket-soma-quiet", + UID: "1988", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }}, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "today", + }, + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameH2C, + Protocol: corev1.ProtocolTCP, + Port: networking.ServiceHTTPPort, + TargetPort: intstr.FromInt(networking.BackendHTTP2Port), + }, { + Name: servingv1alpha1.AutoscalingQueueMetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.AutoscalingQueueMetricsPort, + TargetPort: intstr.FromString(servingv1alpha1.AutoscalingQueueMetricsPortName), + }, { + Name: servingv1alpha1.UserQueueMetricsPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.UserQueueMetricsPort, + TargetPort: intstr.FromString(servingv1alpha1.UserQueueMetricsPortName), + }, { + Name: servingv1alpha1.QueueAdminPortName, + Protocol: corev1.ProtocolTCP, + Port: networking.QueueAdminPort, + TargetPort: intstr.FromInt(networking.QueueAdminPort), + }}, + }, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := MakePrivateService(test.sks, test.selector) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("Private K8s Service mismatch (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice.go b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice.go new file mode 100644 index 0000000000..c74edf9559 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice.go @@ -0,0 +1,369 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverlessservice + +import ( + "context" + "fmt" + "reflect" + "strconv" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + listers "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + rbase "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/serverlessservice/resources" + presources "knative.dev/serving/pkg/resources" +) + +const reconcilerName = "ServerlessServices" + +// reconciler implements controller.Reconciler for Service resources. +type reconciler struct { + *rbase.Base + + // listers index properties about resources + sksLister listers.ServerlessServiceLister + serviceLister corev1listers.ServiceLister + endpointsLister corev1listers.EndpointsLister + + // Used to get PodScalables from object references. + psInformerFactory duck.InformerFactory +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Revision resource +// with the current status of the resource. +func (r *reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + logger.Debug("Reconciling SKS resource") + // Get the current SKS resource. + original, err := r.sksLister.ServerlessServices(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logger.Info("SKS resource in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + sks := original.DeepCopy() + reconcileErr := r.reconcile(ctx, sks) + if reconcileErr != nil { + r.Recorder.Eventf(sks, corev1.EventTypeWarning, "UpdateFailed", "InternalError: %v", reconcileErr.Error()) + } + if !equality.Semantic.DeepEqual(sks.Status, original.Status) { + if err := r.updateStatus(original, sks); err != nil { + r.Recorder.Eventf(sks, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status: %v", err) + return err + } + r.Recorder.Eventf(sks, corev1.EventTypeNormal, "Updated", "Successfully updated ServerlessService %q", key) + } + return reconcileErr +} + +func (r *reconciler) reconcile(ctx context.Context, sks *netv1alpha1.ServerlessService) error { + logger := logging.FromContext(ctx) + // Don't reconcile if we're being deleted. + if sks.GetDeletionTimestamp() != nil { + return nil + } + + sks.SetDefaults(ctx) + sks.Status.InitializeConditions() + + for i, fn := range []func(context.Context, *netv1alpha1.ServerlessService) error{ + r.reconcilePrivateService, // First make sure our data source is setup. + r.reconcilePublicService, + r.reconcilePublicEndpoints, + } { + if err := fn(ctx, sks); err != nil { + logger.Debugw(strconv.Itoa(i)+": reconcile failed", zap.Error(err)) + return err + } + } + sks.Status.ObservedGeneration = sks.Generation + return nil +} + +func (r *reconciler) updateStatus(existing *netv1alpha1.ServerlessService, desired *netv1alpha1.ServerlessService) error { + existing = existing.DeepCopy() + return rbase.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = r.ServingClientSet.NetworkingV1alpha1().ServerlessServices(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + existing.Status = desired.Status + _, err = r.ServingClientSet.NetworkingV1alpha1().ServerlessServices(existing.Namespace).UpdateStatus(existing) + return err + }) +} + +func (r *reconciler) reconcilePublicService(ctx context.Context, sks *netv1alpha1.ServerlessService) error { + logger := logging.FromContext(ctx) + + sn := sks.Name + srv, err := r.serviceLister.Services(sks.Namespace).Get(sn) + if apierrs.IsNotFound(err) { + logger.Infof("K8s public service %s does not exist; creating.", sn) + // We've just created the service, so it has no endpoints. + sks.Status.MarkEndpointsNotReady("CreatingPublicService") + srv = resources.MakePublicService(sks) + _, err := r.KubeClientSet.CoreV1().Services(sks.Namespace).Create(srv) + if err != nil { + return fmt.Errorf("failed to create public K8s Service: %w", err) + } + logger.Info("Created public K8s service: ", sn) + } else if err != nil { + return fmt.Errorf("failed to get public K8s Service: %w", err) + } else if !metav1.IsControlledBy(srv, sks) { + sks.Status.MarkEndpointsNotOwned("Service", sn) + return fmt.Errorf("SKS: %s does not own Service: %s", sks.Name, sn) + } else { + tmpl := resources.MakePublicService(sks) + want := srv.DeepCopy() + want.Spec.Ports = tmpl.Spec.Ports + want.Spec.Selector = nil + + if !equality.Semantic.DeepEqual(want.Spec, srv.Spec) { + logger.Info("Public K8s Service changed; reconciling: ", sn, cmp.Diff(want.Spec, srv.Spec)) + if _, err = r.KubeClientSet.CoreV1().Services(sks.Namespace).Update(want); err != nil { + return fmt.Errorf("failed to update public K8s Service: %w", err) + } + } + } + sks.Status.ServiceName = sn + logger.Debug("Done reconciling public K8s service: ", sn) + return nil +} + +func (r *reconciler) reconcilePublicEndpoints(ctx context.Context, sks *netv1alpha1.ServerlessService) error { + logger := logging.FromContext(ctx) + + var ( + srcEps *corev1.Endpoints + foundServingEndpoints bool + ) + activatorEps, err := r.endpointsLister.Endpoints(system.Namespace()).Get(networking.ActivatorServiceName) + if err != nil { + return fmt.Errorf("failed to get activator service endpoints: %w", err) + } + logger.Debug("Activator endpoints: ", spew.Sprint(activatorEps)) + + psn := sks.Status.PrivateServiceName + pvtEps, err := r.endpointsLister.Endpoints(sks.Namespace).Get(psn) + if err != nil { + return fmt.Errorf("failed to get private K8s Service endpoints: %w", err) + } + // We still might be "ready" even if in proxy mode, + // if proxy mode is by means of burst capacity handling. + pvtReady := presources.ReadyAddressCount(pvtEps) + if pvtReady > 0 { + foundServingEndpoints = true + } + + // The logic below is as follows: + // if mode == serve: + // if len(private_service_endpoints) > 0: + // srcEps = private_service_endpoints + // else: + // srcEps = activator_endpoints + // else: + // srcEps = activator_endpoints + // The reason for this is, we don't want to leave the public service endpoints empty, + // since those endpoints are the ones programmed into the VirtualService. + // + switch sks.Spec.Mode { + case netv1alpha1.SKSOperationModeServe: + // We should have successfully reconciled the private service if we're here + // which means that we'd have the name assigned in Status. + logger.Debugf("Private endpoints: %s", spew.Sprint(pvtEps)) + // Serving but no ready endpoints. + if pvtReady == 0 { + logger.Info(psn + " is in mode Serve but has no endpoints, using Activator endpoints for now") + srcEps = activatorEps + } else { + // Serving & have endpoints ready. + srcEps = pvtEps + } + case netv1alpha1.SKSOperationModeProxy: + srcEps = activatorEps + } + + sn := sks.Name + eps, err := r.endpointsLister.Endpoints(sks.Namespace).Get(sn) + + if apierrs.IsNotFound(err) { + logger.Infof("Public endpoints %s does not exist; creating.", sn) + sks.Status.MarkEndpointsNotReady("CreatingPublicEndpoints") + if _, err = r.KubeClientSet.CoreV1().Endpoints(sks.Namespace).Create(resources.MakePublicEndpoints(sks, srcEps)); err != nil { + return fmt.Errorf("failed to create public K8s Endpoints: %w", err) + } + logger.Info("Created K8s Endpoints: ", sn) + } else if err != nil { + return fmt.Errorf("failed to get public K8s Endpoints: %w", err) + } else if !metav1.IsControlledBy(eps, sks) { + sks.Status.MarkEndpointsNotOwned("Endpoints", sn) + return fmt.Errorf("SKS: %s does not own Endpoints: %s", sks.Name, sn) + } else { + wantSubsets := resources.FilterSubsetPorts(sks, srcEps.Subsets) + if !equality.Semantic.DeepEqual(wantSubsets, eps.Subsets) { + want := eps.DeepCopy() + want.Subsets = wantSubsets + logger.Info("Public K8s Endpoints changed; reconciling: ", sn) + if _, err = r.KubeClientSet.CoreV1().Endpoints(sks.Namespace).Update(want); err != nil { + return fmt.Errorf("failed to update public K8s Endpoints: %w", err) + } + } + } + if foundServingEndpoints { + sks.Status.MarkEndpointsReady() + } else { + logger.Infof("Endpoints %s has no ready endpoints", sn) + sks.Status.MarkEndpointsNotReady("NoHealthyBackends") + } + // If we have no backends or if we're in the proxy mode, then + // activator backs this revision. + if !foundServingEndpoints || sks.Spec.Mode == netv1alpha1.SKSOperationModeProxy { + sks.Status.MarkActivatorEndpointsPopulated() + } else { + sks.Status.MarkActivatorEndpointsRemoved() + } + + logger.Debug("Done reconciling public K8s endpoints: ", sn) + return nil +} + +func (r *reconciler) privateService(sks *netv1alpha1.ServerlessService) (*corev1.Service, error) { + // The code below is for backwards compatibility, when we had + // GenerateName for the private services. + svcs, err := r.serviceLister.Services(sks.Namespace).List(labels.SelectorFromSet(map[string]string{ + networking.SKSLabelKey: sks.Name, + networking.ServiceTypeKey: string(networking.ServiceTypePrivate), + })) + if err != nil { + return nil, err + } + switch l := len(svcs); l { + case 0: + return nil, apierrs.NewNotFound(corev1.Resource("Services"), sks.Name) + case 1: + return svcs[0], nil + default: + // We encountered more than one. Keep the one that is in the SKS status and delete the others. + var ret *corev1.Service + for _, s := range svcs { + if s.Name == sks.Status.PrivateServiceName { + ret = s + continue + } + // If we don't control it, don't delete it. + if metav1.IsControlledBy(s, sks) { + r.KubeClientSet.CoreV1().Services(sks.Namespace).Delete(s.Name, &metav1.DeleteOptions{}) + } + } + return ret, nil + } +} + +func (r *reconciler) reconcilePrivateService(ctx context.Context, sks *netv1alpha1.ServerlessService) error { + logger := logging.FromContext(ctx) + + selector, err := r.getSelector(sks) + if err != nil { + return fmt.Errorf("error retrieving deployment selector spec: %w", err) + } + + svc, err := r.privateService(sks) + if apierrs.IsNotFound(err) { + logger.Info("SKS has no private service; creating.") + sks.Status.MarkEndpointsNotReady("CreatingPrivateService") + svc = resources.MakePrivateService(sks, selector) + svc, err = r.KubeClientSet.CoreV1().Services(sks.Namespace).Create(svc) + if err != nil { + return fmt.Errorf("failed to create private K8s Service: %w", err) + } + logger.Info("Created private K8s service: ", svc.Name) + } else if err != nil { + return fmt.Errorf("failed to get private K8s Service: %w", err) + } else if !metav1.IsControlledBy(svc, sks) { + sks.Status.MarkEndpointsNotOwned("Service", svc.Name) + return fmt.Errorf("SKS: %s does not own Service: %s", sks.Name, svc.Name) + } else { + tmpl := resources.MakePrivateService(sks, selector) + want := svc.DeepCopy() + // Our controller manages only part of spec, so set the fields we own. + want.Spec.Ports = tmpl.Spec.Ports + want.Spec.Selector = tmpl.Spec.Selector + + if !equality.Semantic.DeepEqual(svc.Spec, want.Spec) { + sks.Status.MarkEndpointsNotReady("UpdatingPrivateService") + logger.Infof("Private K8s Service changed %s; reconciling: ", svc.Name) + if _, err = r.KubeClientSet.CoreV1().Services(sks.Namespace).Update(want); err != nil { + return fmt.Errorf("failed to update private K8s Service: %w", err) + } + } + } + + sks.Status.PrivateServiceName = svc.Name + logger.Debug("Done reconciling private K8s service: ", svc.Name) + return nil +} + +func (r *reconciler) getSelector(sks *netv1alpha1.ServerlessService) (map[string]string, error) { + scale, err := presources.GetScaleResource(sks.Namespace, sks.Spec.ObjectRef, r.psInformerFactory) + if err != nil { + return nil, err + } + return scale.Spec.Selector.MatchLabels, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice_test.go new file mode 100644 index 0000000000..f5c6598f8b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/serverlessservice_test.go @@ -0,0 +1,830 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serverlessservice + +import ( + "context" + "fmt" + "testing" + "time" + + // Inject the fakes for informers this reconciler depends on. + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints/fake" + _ "knative.dev/pkg/client/injection/kube/informers/core/v1/service/fake" + "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable" + _ "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake" + _ "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice/fake" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + "knative.dev/pkg/system" + "knative.dev/serving/pkg/apis/networking" + nv1a1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + rpkg "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/serverlessservice/resources" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + clientgotesting "k8s.io/client-go/testing" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing" +) + +func TestNewController(t *testing.T) { + ctx, _ := SetupFakeContext(t) + c := NewController(ctx, configmap.NewStaticWatcher()) + if c == nil { + t.Fatal("Expected NewController to return a non-nil value") + } +} + +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key, Part I", + Key: "too/many/parts", + }, { + Name: "bad workqueue key, Part II", + Key: "too-few-parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "steady state", + Key: "steady/state", + Objects: []runtime.Object{ + SKS("steady", "state", markHappy, WithPubService, WithPrivateService, WithDeployRef("bar")), + deploy("steady", "bar"), + svcpub("steady", "state"), + svcpriv("steady", "state"), + endpointspub("steady", "state", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("steady", "state", WithSubsets), + activatorEndpoints(WithSubsets), + }, + }, { + // This is the case for once we are scaled to zero. + Name: "steady switch to proxy mode", + Key: "steady/to-proxy", + Objects: []runtime.Object{ + SKS("steady", "to-proxy", markHappy, WithPubService, WithPrivateService, + WithDeployRef("bar"), withProxyMode), + deploy("steady", "bar"), + svcpub("steady", "to-proxy"), + svcpriv("steady", "to-proxy"), + endpointspub("steady", "to-proxy", withOtherSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("steady", "to-proxy"), + activatorEndpoints(WithSubsets), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("steady", "to-proxy", WithDeployRef("bar"), markNoEndpoints, + withProxyMode, WithPubService, WithPrivateService), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("steady", "to-proxy", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "steady/to-proxy"`), + }, + }, { + // This is the case for once we are proxying for unsufficient burst capacity. + // It should be a no-op. + Name: "steady switch to proxy mode with endpoints", + Key: "steady/to-proxy", + Objects: []runtime.Object{ + SKS("steady", "to-proxy", markHappy, WithPubService, WithPrivateService, + WithDeployRef("bar"), withProxyMode), + deploy("steady", "bar"), + svcpub("steady", "to-proxy"), + svcpriv("steady", "to-proxy"), + endpointspub("steady", "to-proxy", withOtherSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("steady", "to-proxy", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("steady", "to-proxy", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }}, + }, { + Name: "many-private-services", + Key: "many/privates", + Objects: []runtime.Object{ + SKS("many", "privates", markHappy, WithPubService, WithPrivateService, + WithDeployRef("bar")), + deploy("many", "bar"), + svcpub("many", "privates"), + svcpriv("many", "privates"), + svcpriv("many", "privates", svcWithName("privates-brutality-is-here")), + svcpriv("many", "privates", svcWithName("privates-uncharacteristically-pretty"), + WithK8sSvcOwnersRemoved), // unowned, should remain. + endpointspub("many", "privates", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("many", "privates", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantDeletes: []clientgotesting.DeleteActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "many", + Verb: "delete", + Resource: schema.GroupVersionResource{ + Group: "core", + Version: "v1", + Resource: "services", + }, + }, + Name: "privates-brutality-is-here", + }}, + }, { + Name: "user changes public svc", + Key: "public/svc-change", + Objects: []runtime.Object{ + SKS("public", "svc-change", WithPubService, WithSKSReady, + WithPrivateService, WithDeployRef("bar")), + deploy("public", "bar"), + svcpub("public", "svc-change", withTimeSelector), + svcpriv("public", "svc-change"), + endpointspub("public", "svc-change", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("public", "svc-change", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: svcpub("public", "svc-change"), + }}, + }, { + Name: "user changes priv svc", + Key: "private/svc-change", + Objects: []runtime.Object{ + SKS("private", "svc-change", markHappy, WithPubService, + WithPrivateService, WithDeployRef("baz")), + deploy("private", "baz"), + svcpub("private", "svc-change"), + svcpriv("private", "svc-change", withTimeSelector), + endpointspub("private", "svc-change", withOtherSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("private", "svc-change", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: svcpriv("private", "svc-change"), + }, { + Object: endpointspub("private", "svc-change", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }}, + }, { + Name: "OnCreate-deployment-does-not-exist", + Key: "on/cde", + Objects: []runtime.Object{ + SKS("on", "cde", WithDeployRef("blah"), markNoEndpoints), + deploy("on", "blah-another"), + endpointspriv("on", "cde", WithSubsets), + }, + WantErr: true, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", `InternalError: error retrieving deployment selector spec: error fetching Pod Scalable on/blah: deployments.apps "blah" not found`), + }, + }, { + Name: "OnCreate-deployment-exists", + Key: "on/cde", + Objects: []runtime.Object{ + SKS("on", "cde", WithDeployRef("blah")), + deploy("on", "blah"), + // This "has" to pre-exist, otherwise I can't populate it with subsets. + endpointspriv("on", "cde", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantCreates: []runtime.Object{ + svcpriv("on", "cde"), + svcpub("on", "cde"), + endpointspub("on", "cde", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("on", "cde", WithDeployRef("blah"), + markHappy, WithPubService, WithPrivateService), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "on/cde"`), + }, + }, { + Name: "update-eps-fail", + Key: "update-eps/failA", + WantErr: true, + Objects: []runtime.Object{ + SKS("update-eps", "failA", WithPubService, WithPrivateService, WithDeployRef("blah"), markNoEndpoints), + deploy("update-eps", "blah"), + svcpub("update-eps", "failA"), + svcpriv("update-eps", "failA"), + endpointspub("update-eps", "failA"), + endpointspriv("update-eps", "failA", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "endpoints"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("update-eps", "failA", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), // The attempted update. + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + "InternalError: failed to update public K8s Endpoints: inducing failure for update endpoints"), + }, + }, { + Name: "svc-fail-pub", + Key: "svc/fail2", + WantErr: true, + Objects: []runtime.Object{ + SKS("svc", "fail2", WithDeployRef("blah")), + deploy("svc", "blah"), + svcpriv("svc", "fail2"), + endpointspriv("svc", "fail2"), + activatorEndpoints(WithSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "services"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("svc", "fail2", WithPrivateService, + WithDeployRef("blah"), markTransitioning("CreatingPublicService")), + }}, + WantCreates: []runtime.Object{ + svcpub("svc", "fail2"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + "InternalError: failed to create public K8s Service: inducing failure for create services"), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "svc/fail2"`), + }, + }, { + Name: "eps-create-fail-pub", + Key: "eps/fail3", + WantErr: true, + Objects: []runtime.Object{ + SKS("eps", "fail3", WithDeployRef("blah")), + deploy("eps", "blah"), + svcpriv("eps", "fail3"), + endpointspriv("eps", "fail3", WithSubsets), + activatorEndpoints(withOtherSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "endpoints"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("eps", "fail3", WithPubService, WithPrivateService, + WithDeployRef("blah"), markTransitioning("CreatingPublicEndpoints")), + }}, + WantCreates: []runtime.Object{ + svcpub("eps", "fail3"), + endpointspub("eps", "fail3", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + "InternalError: failed to create public K8s Endpoints: inducing failure for create endpoints"), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "eps/fail3"`), + }, + }, { + Name: "OnCreate-no-eps", + Key: "on/cneps", + Objects: []runtime.Object{ + SKS("on", "cneps", WithDeployRef("blah"), WithPrivateService), + deploy("on", "blah"), + endpointspriv("on", "cneps"), + activatorEndpoints(WithSubsets), + }, + WantCreates: []runtime.Object{ + svcpriv("on", "cneps"), + svcpub("on", "cneps"), + endpointspub("on", "cneps", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("on", "cneps", WithDeployRef("blah"), + markNoEndpoints, WithPubService, WithPrivateService), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "on/cneps"`), + }, + }, { + Name: "OnCreate-no-activator-eps-exist", + Key: "on/cnaeps2", + Objects: []runtime.Object{ + SKS("on", "cnaeps2", WithDeployRef("blah")), + deploy("on", "blah"), + endpointspriv("on", "cnaeps2", WithSubsets), + endpointspub("on", "cnaeps2", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }, + WantErr: true, + WantCreates: []runtime.Object{ + svcpriv("on", "cnaeps2"), + svcpub("on", "cnaeps2"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("on", "cnaeps2", WithDeployRef("blah"), WithPubService, + WithPrivateService, + markTransitioning("CreatingPublicService")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + `InternalError: failed to get activator service endpoints: endpoints "activator-service" not found`), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "on/cnaeps2"`), + }, + }, { + Name: "OnCreate-no-private-eps-exist", + Key: "on/cnaeps3", + Objects: []runtime.Object{ + SKS("on", "cnaeps3", WithDeployRef("blah")), + deploy("on", "blah"), + endpointspub("on", "cnaeps3", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantErr: true, + WantCreates: []runtime.Object{ + svcpriv("on", "cnaeps3"), + svcpub("on", "cnaeps3"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("on", "cnaeps3", WithDeployRef("blah"), WithPubService, + WithPrivateService, + markTransitioning("CreatingPublicService")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + `InternalError: failed to get private K8s Service endpoints: endpoints "cnaeps3-private" not found`), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "on/cnaeps3"`), + }, + }, { + Name: "OnCreate-no-activator-eps-service", + Key: "on/cnaeps", + Objects: []runtime.Object{ + SKS("on", "cnaeps", WithDeployRef("blah")), + deploy("on", "blah"), + endpointspriv("on", "cnaeps", WithSubsets), + endpointspub("on", "cnaeps", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + activatorEndpoints(), + }, + WantCreates: []runtime.Object{ + svcpriv("on", "cnaeps"), + svcpub("on", "cnaeps"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("on", "cnaeps", WithDeployRef("blah"), + markHappy, WithPubService, WithPrivateService), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "on/cnaeps"`), + }, + }, { + Name: "OnCreate-no-activator-eps-proxy", + Key: "on/cnaeps", + Objects: []runtime.Object{ + SKS("on", "cnaeps", WithDeployRef("blah"), withProxyMode), + deploy("on", "blah"), + endpointspriv("on", "cnaeps"), // This should be ignored. + activatorEndpoints(), + }, + WantCreates: []runtime.Object{ + svcpriv("on", "cnaeps"), + svcpub("on", "cnaeps", withTargetPortNum(8012)), + endpointspub("on", "cnaeps"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("on", "cnaeps", WithDeployRef("blah"), withProxyMode, + markNoEndpoints, WithPubService, WithPrivateService), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "on/cnaeps"`), + }, + }, { + Name: "create-svc-fail-priv", + Key: "svc/fail", + WantErr: true, + Objects: []runtime.Object{ + SKS("svc", "fail", WithDeployRef("blah")), + deploy("svc", "blah"), + endpointspriv("svc", "fail"), + activatorEndpoints(WithSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "services"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("svc", "fail", WithDeployRef("blah"), markTransitioning("CreatingPrivateService")), + }}, + WantCreates: []runtime.Object{ + svcpriv("svc", "fail"), + }, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + "InternalError: failed to create private K8s Service: inducing failure for create services"), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "svc/fail"`), + }, + }, { + Name: "update-sks-fail", + Key: "update-sks/fail4", + WantErr: true, + Objects: []runtime.Object{ + SKS("update-sks", "fail4", WithPubService, WithPrivateService, + WithDeployRef("blah")), + deploy("update-sks", "blah"), + svcpub("update-sks", "fail4"), + svcpriv("update-sks", "fail4"), + endpointspub("update-sks", "fail4", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + endpointspriv("update-sks", "fail4", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "serverlessservices"), + }, + // We still record update, but it fails. + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("update-sks", "fail4", + WithDeployRef("blah"), markHappy, WithPubService, WithPrivateService), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status: inducing failure for update serverlessservices"), + }, + }, { + Name: "ronin-priv-service", + Key: "ronin-priv-service/fail5", + WantErr: true, + Objects: []runtime.Object{ + SKS("ronin-priv-service", "fail5", WithPubService, WithPrivateService, + WithDeployRef("blah"), markHappy), + deploy("ronin-priv-service", "blah"), + svcpub("ronin-priv-service", "fail5"), + svcpriv("ronin-priv-service", "fail5", WithK8sSvcOwnersRemoved), + endpointspub("ronin-priv-service", "fail5", WithSubsets), + endpointspriv("ronin-priv-service", "fail5", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("ronin-priv-service", "fail5", WithPubService, WithPrivateService, + WithDeployRef("blah"), markUnowned("Service", "fail5-private")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", `InternalError: SKS: fail5 does not own Service: fail5-private`), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "ronin-priv-service/fail5"`), + }, + }, { + Name: "ronin-pub-service", + Key: "ronin-pub-service/fail6", + WantErr: true, + Objects: []runtime.Object{ + SKS("ronin-pub-service", "fail6", WithPubService, WithPrivateService, + WithDeployRef("blah")), + deploy("ronin-pub-service", "blah"), + svcpub("ronin-pub-service", "fail6", WithK8sSvcOwnersRemoved), + svcpriv("ronin-pub-service", "fail6"), + endpointspub("ronin-pub-service", "fail6", WithSubsets), + endpointspriv("ronin-pub-service", "fail6", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("ronin-pub-service", "fail6", WithPubService, WithPrivateService, + WithDeployRef("blah"), markUnowned("Service", "fail6")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", `InternalError: SKS: fail6 does not own Service: fail6`), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "ronin-pub-service/fail6"`), + }, + }, { + Name: "ronin-pub-eps", + Key: "ronin-pub-eps/fail7", + WantErr: true, + Objects: []runtime.Object{ + SKS("ronin-pub-eps", "fail7", WithPubService, WithPrivateService, + WithDeployRef("blah")), + deploy("ronin-pub-eps", "blah"), + svcpub("ronin-pub-eps", "fail7"), + svcpriv("ronin-pub-eps", "fail7"), + endpointspub("ronin-pub-eps", "fail7", WithSubsets, WithEndpointsOwnersRemoved), + endpointspriv("ronin-pub-eps", "fail7", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("ronin-pub-eps", "fail7", WithPubService, WithPrivateService, + WithDeployRef("blah"), markUnowned("Endpoints", "fail7")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", `InternalError: SKS: fail7 does not own Endpoints: fail7`), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "ronin-pub-eps/fail7"`), + }, + }, { + Name: "update-priv-svc-fail", + Key: "update-svc/fail9", + WantErr: true, + Objects: []runtime.Object{ + SKS("update-svc", "fail9", WithPubService, WithPrivateService, + WithDeployRef("blah")), + deploy("update-svc", "blah"), + svcpub("update-svc", "fail9"), + svcpriv("update-svc", "fail9", withTimeSelector), + endpointspub("update-svc", "fail9"), + endpointspriv("update-svc", "fail9"), + activatorEndpoints(WithSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "services"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("update-svc", "fail9", WithPubService, WithPrivateService, + WithDeployRef("blah"), markTransitioning("UpdatingPrivateService")), + }}, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: svcpriv("update-svc", "fail9"), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", + "InternalError: failed to update private K8s Service: inducing failure for update services"), + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "update-svc/fail9"`), + }, + }, + { + Name: "update-pub-svc-fail", + Key: "update-svc/fail8", + WantErr: true, + Objects: []runtime.Object{ + SKS("update-svc", "fail8", WithPubService, WithDeployRef("blah"), markHappy, WithPrivateService), + deploy("update-svc", "blah"), + svcpub("update-svc", "fail8", withTimeSelector), + svcpriv("update-svc", "fail8"), + endpointspub("update-svc", "fail8", WithSubsets), + endpointspriv("update-svc", "fail8", WithSubsets), + activatorEndpoints(WithSubsets), + }, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "services"), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: svcpub("update-svc", "fail8"), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "UpdateFailed", "InternalError: failed to update public K8s Service: inducing failure for update services"), + }, + }, { + Name: "pod change", + Key: "pod/change", + Objects: []runtime.Object{ + SKS("pod", "change", markHappy, WithPubService, WithPrivateService, + WithDeployRef("blah")), + deploy("pod", "blah"), + svcpub("pod", "change"), + svcpriv("pod", "change"), + endpointspub("pod", "change", WithSubsets), + endpointspriv("pod", "change", withOtherSubsets), + activatorEndpoints(WithSubsets), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("pod", "change", withOtherSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }}, + }, { + Name: "proxy mode; pod change - activator", + Key: "pod/change", + Objects: []runtime.Object{ + SKS("pod", "change", markNoEndpoints, WithPubService, withHTTP2Protocol, + WithPrivateService, WithDeployRef("blah")), + deploy("pod", "blah"), + svcpub("pod", "change", withHTTP2), + svcpriv("pod", "change", withHTTP2Priv), + endpointspub("pod", "change", WithSubsets), + endpointspriv("pod", "change"), + activatorEndpoints(withOtherSubsets), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("pod", "change", withOtherSubsets, withFilteredPorts(networking.BackendHTTP2Port)), + }}, + }, { + Name: "serving mode; serving pod comes online", + Key: "pod/change", + Objects: []runtime.Object{ + SKS("pod", "change", markNoEndpoints, WithPubService, + WithPrivateService, WithDeployRef("blah")), + deploy("pod", "blah"), + svcpub("pod", "change"), + svcpriv("pod", "change"), + endpointspub("pod", "change", withOtherSubsets), + endpointspriv("pod", "change", WithSubsets), + activatorEndpoints(withOtherSubsets), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("pod", "change", + WithDeployRef("blah"), markHappy, WithPubService, WithPrivateService, WithDeployRef("blah")), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "pod/change"`), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("pod", "change", WithSubsets, withFilteredPorts(networking.BackendHTTPPort)), + }}, + }, { + Name: "serving mode; no backend endpoints", + Key: "pod/change", + Objects: []runtime.Object{ + SKS("pod", "change", WithSKSReady, WithPubService, withHTTP2Protocol, + WithPrivateService, WithDeployRef("blah")), + deploy("pod", "blah"), + svcpub("pod", "change", withHTTP2), + svcpriv("pod", "change", withHTTP2Priv), + endpointspub("pod", "change", WithSubsets), // We had endpoints... + endpointspriv("pod", "change"), // but now we don't. + activatorEndpoints(withOtherSubsets), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: SKS("pod", "change", withHTTP2Protocol, + WithDeployRef("blah"), markNoEndpoints, WithPubService, WithPrivateService), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", `Successfully updated ServerlessService "pod/change"`), + }, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: endpointspub("pod", "change", withOtherSubsets, withFilteredPorts(networking.BackendHTTP2Port)), + }}, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + ctx = podscalable.WithDuck(ctx) + + return &reconciler{ + Base: rpkg.NewBase(ctx, controllerAgentName, cmw), + sksLister: listers.GetServerlessServiceLister(), + serviceLister: listers.GetK8sServiceLister(), + endpointsLister: listers.GetEndpointsLister(), + psInformerFactory: podscalable.Get(ctx), + } + })) +} + +// Keeps only desired port. +func withFilteredPorts(port int32) EndpointsOption { + return func(ep *corev1.Endpoints) { + for _, p := range ep.Subsets[0].Ports { + if p.Port == port { + ep.Subsets[0].Ports[0] = p + break + } + } + // Strip all the others. + ep.Subsets[0].Ports = ep.Subsets[0].Ports[:1] + } +} + +// withOtherSubsets uses different IP set than functional::withSubsets. +func withOtherSubsets(ep *corev1.Endpoints) { + ep.Subsets = []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{IP: "127.0.0.2"}}, + Ports: []corev1.EndpointPort{{Port: 8013}, {Port: 8012}}, + }} +} + +func markHappy(sks *nv1a1.ServerlessService) { + sks.Status.MarkEndpointsReady() +} + +func markUnowned(k, n string) SKSOption { + return func(sks *nv1a1.ServerlessService) { + sks.Status.MarkEndpointsNotOwned(k, n) + } +} + +func markTransitioning(s string) SKSOption { + return func(sks *nv1a1.ServerlessService) { + sks.Status.MarkEndpointsNotReady(s) + } +} + +func markNoEndpoints(sks *nv1a1.ServerlessService) { + sks.Status.MarkEndpointsNotReady("NoHealthyBackends") + sks.Status.MarkActivatorEndpointsPopulated() +} + +func withHTTP2Protocol(sks *nv1a1.ServerlessService) { + sks.Spec.ProtocolType = networking.ProtocolH2C +} + +type deploymentOption func(*appsv1.Deployment) + +func deploy(namespace, name string, opts ...deploymentOption) *appsv1.Deployment { + d := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label": "value", + }, + }, + Replicas: ptr.Int32(1), + }, + } + + for _, opt := range opts { + opt(d) + } + return d +} + +func withHTTP2Priv(svc *corev1.Service) { + svc.Spec.Ports[0].Name = "http2" + svc.Spec.Ports[0].TargetPort = intstr.FromInt(networking.BackendHTTP2Port) +} + +func withHTTP2(svc *corev1.Service) { + svc.Spec.Ports[0].Port = networking.ServiceHTTP2Port + svc.Spec.Ports[0].Name = "http2" + svc.Spec.Ports[0].TargetPort = intstr.FromInt(networking.BackendHTTP2Port) +} + +// For SKS internal tests this sets mode & activator status. +func withProxyMode(sks *nv1a1.ServerlessService) { + WithProxyMode(sks) + sks.Status.MarkActivatorEndpointsPopulated() +} + +func withTargetPortNum(port int) K8sServiceOption { + return func(svc *corev1.Service) { + svc.Spec.Ports[0].TargetPort = intstr.FromInt(port) + } +} + +func svcpub(namespace, name string, so ...K8sServiceOption) *corev1.Service { + sks := SKS(namespace, name) + s := resources.MakePublicService(sks) + for _, opt := range so { + opt(s) + } + return s +} + +func svcWithName(n string) K8sServiceOption { + return func(s *corev1.Service) { + s.GenerateName = "" + s.Name = n + } +} + +func svcpriv(namespace, name string, so ...K8sServiceOption) *corev1.Service { + sks := SKS(namespace, name) + s := resources.MakePrivateService(sks, map[string]string{ + "label": "value", + }) + for _, opt := range so { + opt(s) + } + return s +} + +func activatorEndpoints(eo ...EndpointsOption) *corev1.Endpoints { + ep := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace(), + Name: networking.ActivatorServiceName, + }, + } + for _, opt := range eo { + opt(ep) + } + return ep +} + +func endpointspriv(namespace, name string, eo ...EndpointsOption) *corev1.Endpoints { + service := svcpriv(namespace, name) + ep := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: service.Namespace, + Name: service.Name, + }, + } + for _, opt := range eo { + opt(ep) + } + return ep +} + +func endpointspub(namespace, name string, eo ...EndpointsOption) *corev1.Endpoints { + service := svcpub(namespace, name) + ep := &corev1.Endpoints{ + ObjectMeta: *service.ObjectMeta.DeepCopy(), + } + for _, opt := range eo { + opt(ep) + } + return ep +} + +func withTimeSelector(svc *corev1.Service) { + svc.Spec.Selector = map[string]string{"pod-x": fmt.Sprintf("a-%d", time.Now().UnixNano())} +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/controller.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/controller.go new file mode 100644 index 0000000000..df5ac75e47 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/controller.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + + configurationinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration" + revisioninformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision" + routeinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route" + kserviceinformer "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service" + + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" +) + +const ( + controllerAgentName = "service-controller" +) + +// NewController initializes the controller and is called by the generated code +// Registers eventhandlers to enqueue events +func NewController( + ctx context.Context, + cmw configmap.Watcher, +) *controller.Impl { + serviceInformer := kserviceinformer.Get(ctx) + routeInformer := routeinformer.Get(ctx) + configurationInformer := configurationinformer.Get(ctx) + revisionInformer := revisioninformer.Get(ctx) + + c := &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + serviceLister: serviceInformer.Lister(), + configurationLister: configurationInformer.Lister(), + revisionLister: revisionInformer.Lister(), + routeLister: routeInformer.Lister(), + } + impl := controller.NewImpl(c, c.Logger, ReconcilerName) + + c.Logger.Info("Setting up event handlers") + serviceInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + + configurationInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Service")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + routeInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Service")), + Handler: controller.HandleAll(impl.EnqueueControllerOf), + }) + + return impl +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration.go new file mode 100644 index 0000000000..4097ac1cb7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/pkg/resources" +) + +// MakeConfiguration creates a Configuration from a Service object. +func MakeConfiguration(service *v1alpha1.Service) (*v1alpha1.Configuration, error) { + return &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Configuration(service), + Namespace: service.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(service), + }, + Labels: resources.UnionMaps(service.GetLabels(), map[string]string{ + serving.RouteLabelKey: names.Route(service), + serving.ServiceLabelKey: service.Name, + }), + Annotations: resources.FilterMap(service.GetAnnotations(), func(key string) bool { + return key == corev1.LastAppliedConfigAnnotation + }), + }, + Spec: service.Spec.ConfigurationSpec, + }, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration_test.go new file mode 100644 index 0000000000..eca9e62be7 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/configuration_test.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func makeConfiguration(service *v1alpha1.Service) (*v1alpha1.Configuration, error) { + // We do this prior to reconciliation, so test with it enabled. + service.SetDefaults(v1.WithUpgradeViaDefaulting(context.Background())) + return MakeConfiguration(service) +} + +func TestRunLatest(t *testing.T) { + s := createServiceWithRunLatest() + c, _ := makeConfiguration(s) + if got, want := c.Name, testServiceName; got != want { + t.Errorf("expected %q for service name got %q", want, got) + } + if got, want := c.Namespace, testServiceNamespace; got != want { + t.Errorf("expected %q for service namespace got %q", want, got) + } + if got, want := c.Spec.GetTemplate().Spec.GetContainer().Name, testContainerNameRunLatest; got != want { + t.Errorf("expected %q for container name got %q", want, got) + } + expectOwnerReferencesSetCorrectly(t, c.OwnerReferences) + + if got, want := len(c.Labels), 3; got != want { + t.Errorf("expected %d labels got %d", want, got) + } + if got, want := c.Labels[testLabelKey], testLabelValueRunLatest; got != want { + t.Errorf("expected %q labels got %q", want, got) + } + if got, want := c.Labels[serving.ServiceLabelKey], testServiceName; got != want { + t.Errorf("expected %q labels got %q", want, got) + } +} + +func TestPinned(t *testing.T) { + s := createServiceWithPinned() + c, _ := makeConfiguration(s) + if got, want := c.Name, testServiceName; got != want { + t.Errorf("expected %q for service name got %q", want, got) + } + if got, want := c.Namespace, testServiceNamespace; got != want { + t.Errorf("expected %q for service namespace got %q", want, got) + } + if got, want := c.Spec.GetTemplate().Spec.GetContainer().Name, testContainerNamePinned; got != want { + t.Errorf("expected %q for container name got %q", want, got) + } + expectOwnerReferencesSetCorrectly(t, c.OwnerReferences) + + if got, want := len(c.Labels), 3; got != want { + t.Errorf("expected %d labels got %d", want, got) + } + if got, want := c.Labels[testLabelKey], testLabelValuePinned; got != want { + t.Errorf("expected %q labels got %q", want, got) + } + if got, want := c.Labels[serving.ServiceLabelKey], testServiceName; got != want { + t.Errorf("expected %q labels got %q", want, got) + } +} + +func TestRelease(t *testing.T) { + s := createServiceWithRelease(1, 0) + c, _ := makeConfiguration(s) + if got, want := c.Name, testServiceName; got != want { + t.Errorf("expected %q for service name got %q", want, got) + } + if got, want := c.Namespace, testServiceNamespace; got != want { + t.Errorf("expected %q for service namespace got %q", want, got) + } + if got, want := c.Spec.GetTemplate().Spec.GetContainer().Name, testContainerNameRelease; got != want { + t.Errorf("expected %q for container name got %q", want, got) + } + expectOwnerReferencesSetCorrectly(t, c.OwnerReferences) + + if got, want := len(c.Labels), 3; got != want { + t.Errorf("expected %d labels got %d", want, got) + } + if got, want := c.Labels[testLabelKey], testLabelValueRelease; got != want { + t.Errorf("expected %q labels got %q", want, got) + } + if got, want := c.Labels[serving.ServiceLabelKey], testServiceName; got != want { + t.Errorf("expected %q labels got %q", want, got) + } +} + +func TestInlineConfigurationSpec(t *testing.T) { + s := createServiceInline() + c, _ := makeConfiguration(s) + if got, want := c.Name, testServiceName; got != want { + t.Errorf("expected %q for service name got %q", want, got) + } + if got, want := c.Namespace, testServiceNamespace; got != want { + t.Errorf("expected %q for service namespace got %q", want, got) + } + if got, want := c.Spec.GetTemplate().Spec.GetContainer().Name, testContainerNameInline; got != want { + t.Errorf("expected %q for container name got %q", want, got) + } + expectOwnerReferencesSetCorrectly(t, c.OwnerReferences) + + if got, want := len(c.Labels), 2; got != want { + t.Errorf("expected %d labels got %d", want, got) + } + if got, want := c.Labels[serving.ServiceLabelKey], testServiceName; got != want { + t.Errorf("expected %q labels got %q", want, got) + } +} + +func TestConfigurationHasNoKubectlAnnotation(t *testing.T) { + s := createServiceWithKubectlAnnotation() + c, err := makeConfiguration(s) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if v, ok := c.Annotations[corev1.LastAppliedConfigAnnotation]; ok { + t.Errorf("Annotation %s = %q, want empty", corev1.LastAppliedConfigAnnotation, v) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/doc.go new file mode 100644 index 0000000000..40af28f4ad --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources holds simple functions for synthesizing child resources +// from a Service resource and any relevant Service controller configuration. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/doc.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/doc.go new file mode 100644 index 0000000000..aa96d4baa3 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names holds simple functions for synthesizing resource names. +package names diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names.go new file mode 100644 index 0000000000..3182c10163 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names.go @@ -0,0 +1,27 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import "knative.dev/pkg/kmeta" + +func Configuration(service kmeta.Accessor) string { + return service.GetName() +} + +func Route(service kmeta.Accessor) string { + return service.GetName() +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names_test.go new file mode 100644 index 0000000000..8a2274d2ec --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/names/names_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package names + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +func TestNamer(t *testing.T) { + tests := []struct { + name string + service *v1alpha1.Service + f func(kmeta.Accessor) string + want string + }{{ + name: "Configuration", + service: &v1alpha1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + }, + f: Configuration, + want: "foo", + }, { + name: "Route", + service: &v1alpha1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + }, + }, + f: Route, + want: "bar", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := test.f(test.service) + if got != test.want { + t.Errorf("%s() = %v, wanted %v", test.name, got, test.want) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route.go new file mode 100644 index 0000000000..dce701cf10 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/pkg/resources" +) + +// MakeRoute creates a Route from a Service object. +func MakeRoute(service *v1alpha1.Service) (*v1alpha1.Route, error) { + c := &v1alpha1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Route(service), + Namespace: service.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *kmeta.NewControllerRef(service), + }, + Annotations: resources.FilterMap(service.GetAnnotations(), func(key string) bool { + return key == corev1.LastAppliedConfigAnnotation + }), + Labels: resources.UnionMaps(service.GetLabels(), map[string]string{ + // Add this service's name to the route annotations. + serving.ServiceLabelKey: service.Name, + }), + }, + Spec: *service.Spec.RouteSpec.DeepCopy(), + } + + // Fill in any missing ConfigurationName fields when translating + // from Service to Route. + for idx := range c.Spec.Traffic { + if c.Spec.Traffic[idx].RevisionName == "" { + c.Spec.Traffic[idx].ConfigurationName = names.Configuration(service) + } + } + + return c, nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route_test.go new file mode 100644 index 0000000000..629df70e1d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/route_test.go @@ -0,0 +1,405 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/service/resources/names" +) + +func makeRoute(service *v1alpha1.Service) (*v1alpha1.Route, error) { + // We do this prior to reconciliation, so test with it enabled. + service.SetDefaults(v1.WithUpgradeViaDefaulting(context.Background())) + return MakeRoute(service) +} + +func TestRouteRunLatest(t *testing.T) { + s := createServiceWithRunLatest() + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + if got, want := len(r.Spec.Traffic), 1; got != want { + t.Fatalf("Expected %d traffic targets got %d", want, got) + } + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + + wantL := map[string]string{ + testLabelKey: testLabelValueRunLatest, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + + wantA := map[string]string{ + testAnnotationKey: testAnnotationValue, + } + if got, want := r.Annotations, wantA; !cmp.Equal(got, want) { + t.Errorf("Annotations mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} + +func TestRoutePinned(t *testing.T) { + s := createServiceWithPinned() + r, err := makeRoute(s) + if err != nil { + t.Errorf("Expected nil for err got %q", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + if got, want := len(r.Spec.Traffic), 1; got != want { + t.Fatalf("Expected %d traffic targets, got %d", want, got) + } + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + RevisionName: testRevisionName, + LatestRevision: ptr.Bool(false), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + + wantL := map[string]string{ + testLabelKey: testLabelValuePinned, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} + +func TestRouteReleaseSingleRevision(t *testing.T) { + const numRevisions = 1 + s := createServiceWithRelease(numRevisions, 0 /*no rollout*/) + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + if err != nil { + t.Errorf("Expected nil for err got %q", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + Percent: ptr.Int64(100), + RevisionName: testRevisionName, + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + wantL := map[string]string{ + testLabelKey: testLabelValueRelease, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} + +func TestRouteLatestRevisionSplit(t *testing.T) { + const ( + rolloutPercent = 42 + currentPercent = 100 - rolloutPercent + ) + s := createServiceWithRelease(2 /*num revisions*/, rolloutPercent) + s.Spec.DeprecatedRelease.Revisions = []string{v1alpha1.ReleaseLatestRevisionKeyword, "juicy-revision"} + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + if err != nil { + t.Errorf("Expected nil for err got %q", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + Percent: ptr.Int64(currentPercent), + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + Percent: ptr.Int64(rolloutPercent), + RevisionName: "juicy-revision", + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + + wantL := map[string]string{ + testLabelKey: testLabelValueRelease, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} +func TestRouteLatestRevisionSplitCandidate(t *testing.T) { + const ( + rolloutPercent = 42 + currentPercent = 100 - rolloutPercent + ) + s := createServiceWithRelease(2 /*num revisions*/, rolloutPercent) + s.Spec.DeprecatedRelease.Revisions = []string{"squishy-revision", v1alpha1.ReleaseLatestRevisionKeyword} + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + if err != nil { + t.Errorf("Expected nil for err got %q", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + Percent: ptr.Int64(currentPercent), + RevisionName: "squishy-revision", + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + Percent: ptr.Int64(rolloutPercent), + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + + wantL := map[string]string{ + testLabelKey: testLabelValueRelease, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} +func TestRouteLatestRevisionNoSplit(t *testing.T) { + s := createServiceWithRelease(1 /*num revisions*/, 0 /*unused*/) + s.Spec.DeprecatedRelease.Revisions = []string{v1alpha1.ReleaseLatestRevisionKeyword} + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + + if err != nil { + t.Errorf("Expected nil for err got %q", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + // Should have 2 named traffic targets (current, latest) + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + Percent: ptr.Int64(100), + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + + wantL := map[string]string{ + testLabelKey: testLabelValueRelease, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} + +func TestRouteReleaseTwoRevisions(t *testing.T) { + const ( + currentPercent = 52 + numRevisions = 2 + ) + s := createServiceWithRelease(numRevisions, 100-currentPercent) + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + if err != nil { + t.Errorf("Expected nil for err got %q", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + // Should have 3 named traffic targets (current, candidate, latest) + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + Percent: ptr.Int64(currentPercent), + RevisionName: testRevisionName, + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + Percent: ptr.Int64(100 - currentPercent), + RevisionName: testCandidateRevisionName, + LatestRevision: ptr.Bool(false), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + wantL := map[string]string{ + testLabelKey: testLabelValueRelease, + serving.ServiceLabelKey: testServiceName, + } + if got, want := r.Labels, wantL; !cmp.Equal(got, want) { + t.Errorf("Labels mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } +} + +func TestInlineRouteSpec(t *testing.T) { + s := createServiceInline() + testConfigName := names.Configuration(s) + r, err := makeRoute(s) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if got, want := r.Name, testServiceName; got != want { + t.Errorf("Expected %q for service name got %q", want, got) + } + if got, want := r.Namespace, testServiceNamespace; got != want { + t.Errorf("Expected %q for service namespace got %q", want, got) + } + if got, want := len(r.Spec.Traffic), 1; got != want { + t.Fatalf("Expected %d traffic targets got %d", want, got) + } + wantT := []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + ConfigurationName: testConfigName, + LatestRevision: ptr.Bool(true), + }, + }} + if got, want := r.Spec.Traffic, wantT; !cmp.Equal(got, want) { + t.Errorf("Traffic mismatch: diff (-got, +want): %s", cmp.Diff(got, want)) + } + expectOwnerReferencesSetCorrectly(t, r.OwnerReferences) + + if got, want := len(r.Labels), 1; got != want { + t.Errorf("expected %d labels got %d", want, got) + } + if got, want := r.Labels[serving.ServiceLabelKey], testServiceName; got != want { + t.Errorf("expected %q labels got %q", want, got) + } +} + +func TestRouteHasNoKubectlAnnotation(t *testing.T) { + s := createServiceWithKubectlAnnotation() + r, err := makeRoute(s) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if v, ok := r.Annotations[corev1.LastAppliedConfigAnnotation]; ok { + t.Errorf("Annotation %s = %q, want empty", corev1.LastAppliedConfigAnnotation, v) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/shared_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/shared_test.go new file mode 100644 index 0000000000..94fc1eabf2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/resources/shared_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const ( + testServiceName = "test-service" + testServiceNamespace = "test-service-namespace" + testRevisionName = "test-revision-name" + testCandidateRevisionName = "test-candidate-revision-name" + testContainerNameRunLatest = "test-container-run-latest" + testContainerNamePinned = "test-container-pinned" + testContainerNameRelease = "test-container-release" + testContainerNameInline = "test-container-inline" + testLabelKey = "test-label-key" + testLabelValuePinned = "test-label-value-pinned" + testLabelValueRunLatest = "test-label-value-run-latest" + testLabelValueRelease = "test-label-value-release" + testAnnotationKey = "test-annotation-key" + testAnnotationValue = "test-annotation-value" +) + +func expectOwnerReferencesSetCorrectly(t *testing.T, ownerRefs []metav1.OwnerReference) { + t.Helper() + if got, want := len(ownerRefs), 1; got != want { + t.Errorf("expected %d owner refs got %d", want, got) + return + } + + expectedRefs := []metav1.OwnerReference{{ + APIVersion: "serving.knative.dev/v1alpha1", + Kind: "Service", + Name: testServiceName, + }} + if diff := cmp.Diff(expectedRefs, ownerRefs, cmpopts.IgnoreFields(expectedRefs[0], "Controller", "BlockOwnerDeletion")); diff != "" { + t.Errorf("Unexpected service owner refs diff (-want +got): %v", diff) + } +} + +func createConfiguration(containerName string) v1alpha1.ConfigurationSpec { + return v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Name: containerName, + }, + }, + }, + } +} + +func createServiceInline() *v1alpha1.Service { + return Service(testServiceName, testServiceNamespace, + WithInlineConfigSpec(createConfiguration(testContainerNameInline)), + WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }}, + })) +} + +func createServiceWithRunLatest() *v1alpha1.Service { + return Service(testServiceName, testServiceNamespace, + WithRunLatestConfigSpec(createConfiguration(testContainerNameRunLatest)), + WithServiceLabel(testLabelKey, testLabelValueRunLatest), + WithServiceAnnotations(map[string]string{ + testAnnotationKey: testAnnotationValue, + })) +} + +func createServiceWithPinned() *v1alpha1.Service { + return Service(testServiceName, testServiceNamespace, + WithPinnedRolloutConfigSpec(testRevisionName, createConfiguration(testContainerNamePinned)), + WithServiceLabel(testLabelKey, testLabelValuePinned)) +} + +func createServiceWithRelease(numRevision int, rolloutPercent int) *v1alpha1.Service { + var revisions []string + if numRevision == 2 { + revisions = []string{testRevisionName, testCandidateRevisionName} + } else { + revisions = []string{testRevisionName} + } + + return Service(testServiceName, testServiceNamespace, + WithReleaseRolloutAndPercentageConfigSpec(rolloutPercent, createConfiguration(testContainerNameRelease), revisions...), + WithServiceLabel(testLabelKey, testLabelValueRelease)) +} + +func createServiceWithKubectlAnnotation() *v1alpha1.Service { + return Service(testServiceName, testServiceNamespace, + WithRunLatestConfigSpec(createConfiguration(testContainerNameRunLatest)), + WithServiceAnnotations(map[string]string{ + corev1.LastAppliedConfigAnnotation: testAnnotationValue, + })) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/service.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/service.go new file mode 100644 index 0000000000..689368f278 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/service.go @@ -0,0 +1,388 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + "go.uber.org/zap" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + "knative.dev/pkg/controller" + "knative.dev/pkg/kmp" + "knative.dev/pkg/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + listers "knative.dev/serving/pkg/client/listers/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + cfgreconciler "knative.dev/serving/pkg/reconciler/configuration" + "knative.dev/serving/pkg/reconciler/service/resources" + resourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" +) + +const ( + // ReconcilerName is the name of the reconciler + ReconcilerName = "Services" +) + +// Reconciler implements controller.Reconciler for Service resources. +type Reconciler struct { + *reconciler.Base + + // listers index properties about resources + serviceLister listers.ServiceLister + configurationLister listers.ConfigurationLister + revisionLister listers.RevisionLister + routeLister listers.RouteLister +} + +// Check that our Reconciler implements controller.Reconciler +var _ controller.Reconciler = (*Reconciler)(nil) + +// Reconcile compares the actual state with the desired, and attempts to +// converge the two. It then updates the Status block of the Service resource +// with the current status of the resource. +func (c *Reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + logger.Errorw("Invalid resource key", zap.Error(err)) + return nil + } + + // Get the Service resource with this namespace/name + original, err := c.serviceLister.Services(namespace).Get(name) + if apierrs.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing. + logger.Info("Service in work queue no longer exists") + return nil + } else if err != nil { + return err + } + + if original.GetDeletionTimestamp() != nil { + return nil + } + + // Don't modify the informers copy + service := original.DeepCopy() + + // Reconcile this copy of the service and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileErr := c.reconcile(ctx, service) + if equality.Semantic.DeepEqual(original.Status, service.Status) { + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the informer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + + } else if uErr := c.updateStatus(original, service, logger); uErr != nil { + logger.Warnw("Failed to update service status", zap.Error(uErr)) + c.Recorder.Eventf(service, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for Service %q: %v", service.Name, uErr) + return uErr + } else if reconcileErr == nil { + // There was a difference and updateStatus did not return an error. + c.Recorder.Eventf(service, corev1.EventTypeNormal, "Updated", "Updated Service %q", service.GetName()) + } + if reconcileErr != nil { + c.Recorder.Event(service, corev1.EventTypeWarning, "InternalError", reconcileErr.Error()) + return reconcileErr + } + // TODO(mattmoor): Remove this after 0.7 cuts. + // If the spec has changed, then assume we need an upgrade and issue a patch to trigger + // the webhook to upgrade via defaulting. Status updates do not trigger this due to the + // use of the /status resource. + if !equality.Semantic.DeepEqual(original.Spec, service.Spec) { + services := v1alpha1.SchemeGroupVersion.WithResource("services") + if err := c.MarkNeedsUpgrade(services, service.Namespace, service.Name); err != nil { + return err + } + } + return nil +} + +func (c *Reconciler) reconcile(ctx context.Context, service *v1alpha1.Service) error { + logger := logging.FromContext(ctx) + + // We may be reading a version of the object that was stored at an older version + // and may not have had all of the assumed defaults specified. This won't result + // in this getting written back to the API Server, but lets downstream logic make + // assumptions about defaulting. + service.SetDefaults(v1.WithUpgradeViaDefaulting(ctx)) + service.Status.InitializeConditions() + + if err := service.ConvertUp(ctx, &v1beta1.Service{}); err != nil { + if ce, ok := err.(*v1alpha1.CannotConvertError); ok { + service.Status.MarkResourceNotConvertible(ce) + return nil + } + return err + } + + config, err := c.config(ctx, logger, service) + if err != nil { + return err + } + + if config.Generation != config.Status.ObservedGeneration { + // The Configuration hasn't yet reconciled our latest changes to + // its desired state, so its conditions are outdated. + service.Status.MarkConfigurationNotReconciled() + + // If BYO-Revision name is used we must serialize reconciling the Configuration + // and Route. Wait for observed generation to match before continuing. + if config.Spec.GetTemplate().Name != "" { + return nil + } + } else { + // Update our Status based on the state of our underlying Configuration. + service.Status.PropagateConfigurationStatus(&config.Status) + } + + // When the Configuration names a Revision, check that the named Revision is owned + // by our Configuration and matches its generation before reprogramming the Route, + // otherwise a bad patch could lead to folks inadvertently routing traffic to a + // pre-existing Revision (possibly for another Configuration). + if _, err := cfgreconciler.CheckNameAvailability(config, c.revisionLister); err != nil && + !apierrs.IsNotFound(err) { + service.Status.MarkRevisionNameTaken(config.Spec.GetTemplate().Name) + return nil + } + + route, err := c.route(ctx, logger, service) + if err != nil { + return err + } + + // Update our Status based on the state of our underlying Route. + ss := &service.Status + if route.Generation != route.Status.ObservedGeneration { + // The Route hasn't yet reconciled our latest changes to + // its desired state, so its conditions are outdated. + ss.MarkRouteNotReconciled() + } else { + // Update our Status based on the state of our underlying Route. + ss.PropagateRouteStatus(&route.Status) + } + + c.checkRoutesNotReady(config, logger, route, service) + service.Status.ObservedGeneration = service.Generation + + return nil +} + +func (c *Reconciler) config(ctx context.Context, logger *zap.SugaredLogger, service *v1alpha1.Service) (*v1alpha1.Configuration, error) { + configName := resourcenames.Configuration(service) + config, err := c.configurationLister.Configurations(service.Namespace).Get(configName) + if apierrs.IsNotFound(err) { + config, err = c.createConfiguration(service) + if err != nil { + c.Recorder.Eventf(service, corev1.EventTypeWarning, "CreationFailed", "Failed to create Configuration %q: %v", configName, err) + return nil, fmt.Errorf("failed to create Configuration: %w", err) + } + c.Recorder.Eventf(service, corev1.EventTypeNormal, "Created", "Created Configuration %q", configName) + } else if err != nil { + return nil, fmt.Errorf("failed to get Configuration: %w", err) + } else if !metav1.IsControlledBy(config, service) { + // Surface an error in the service's status,and return an error. + service.Status.MarkConfigurationNotOwned(configName) + return nil, fmt.Errorf("service: %q does not own configuration: %q", service.Name, configName) + } else if config, err = c.reconcileConfiguration(ctx, service, config); err != nil { + return nil, fmt.Errorf("failed to reconcile Configuration: %w", err) + } + return config, nil +} + +func (c *Reconciler) route(ctx context.Context, logger *zap.SugaredLogger, service *v1alpha1.Service) (*v1alpha1.Route, error) { + routeName := resourcenames.Route(service) + route, err := c.routeLister.Routes(service.Namespace).Get(routeName) + if apierrs.IsNotFound(err) { + route, err = c.createRoute(service) + if err != nil { + c.Recorder.Eventf(service, corev1.EventTypeWarning, "CreationFailed", "Failed to create Route %q: %v", routeName, err) + return nil, fmt.Errorf("failed to create Route: %w", err) + } + c.Recorder.Eventf(service, corev1.EventTypeNormal, "Created", "Created Route %q", routeName) + } else if err != nil { + return nil, fmt.Errorf("failed to get Route: %w", err) + } else if !metav1.IsControlledBy(route, service) { + // Surface an error in the service's status, and return an error. + service.Status.MarkRouteNotOwned(routeName) + return nil, fmt.Errorf("service: %q does not own route: %q", service.Name, routeName) + } else if route, err = c.reconcileRoute(ctx, service, route); err != nil { + return nil, fmt.Errorf("failed to reconcile Route: %w", err) + } + return route, nil +} + +func (c *Reconciler) checkRoutesNotReady(config *v1alpha1.Configuration, logger *zap.SugaredLogger, route *v1alpha1.Route, service *v1alpha1.Service) { + // `manual` is not reconciled. + rc := service.Status.GetCondition(v1alpha1.ServiceConditionRoutesReady) + if rc == nil || rc.Status != corev1.ConditionTrue { + return + } + + if len(route.Spec.Traffic) != len(route.Status.Traffic) { + service.Status.MarkRouteNotYetReady() + return + } + + want, got := route.Spec.DeepCopy().Traffic, route.Status.DeepCopy().Traffic + // Replace `configuration` target with its latest ready revision. + for idx := range want { + if want[idx].ConfigurationName == config.Name { + want[idx].RevisionName = config.Status.LatestReadyRevisionName + want[idx].ConfigurationName = "" + } + } + ignoreFields := cmpopts.IgnoreFields(v1alpha1.TrafficTarget{}, + "TrafficTarget.URL", "TrafficTarget.LatestRevision", + // We specify the Routing via Tag in spec, but the status surfaces it + // via both names for now, so ignore the deprecated name field when + // comparing them. + "DeprecatedName") + if diff, err := kmp.SafeDiff(got, want, ignoreFields); err != nil || diff != "" { + logger.Errorf("Route %s is not yet what we want: %s", route.Name, diff) + service.Status.MarkRouteNotYetReady() + } +} + +func (c *Reconciler) updateStatus(existing *v1alpha1.Service, desired *v1alpha1.Service, logger *zap.SugaredLogger) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + existing, err = c.ServingClientSet.ServingV1alpha1().Services(desired.Namespace).Get(desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if reflect.DeepEqual(existing.Status, desired.Status) { + return nil + } + + becomesReady := desired.Status.IsReady() && !existing.Status.IsReady() + existing.Status = desired.Status + _, err = c.ServingClientSet.ServingV1alpha1().Services(desired.Namespace).UpdateStatus(existing) + if err == nil && becomesReady { + duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time) + logger.Infof("Service became ready after %v", duration) + c.StatsReporter.ReportServiceReady(existing.Namespace, existing.Name, duration) + } + return err + }) +} + +func (c *Reconciler) createConfiguration(service *v1alpha1.Service) (*v1alpha1.Configuration, error) { + cfg, err := resources.MakeConfiguration(service) + if err != nil { + return nil, err + } + return c.ServingClientSet.ServingV1alpha1().Configurations(service.Namespace).Create(cfg) +} + +func configSemanticEquals(desiredConfig, config *v1alpha1.Configuration) bool { + return equality.Semantic.DeepEqual(desiredConfig.Spec, config.Spec) && + equality.Semantic.DeepEqual(desiredConfig.ObjectMeta.Labels, config.ObjectMeta.Labels) && + equality.Semantic.DeepEqual(desiredConfig.ObjectMeta.Annotations, config.ObjectMeta.Annotations) +} + +func (c *Reconciler) reconcileConfiguration(ctx context.Context, service *v1alpha1.Service, config *v1alpha1.Configuration) (*v1alpha1.Configuration, error) { + logger := logging.FromContext(ctx) + desiredConfig, err := resources.MakeConfiguration(service) + if err != nil { + return nil, err + } + + if configSemanticEquals(desiredConfig, config) { + // No differences to reconcile. + return config, nil + } + diff, err := kmp.SafeDiff(desiredConfig.Spec, config.Spec) + if err != nil { + return nil, fmt.Errorf("failed to diff Configuration: %w", err) + } + logger.Infof("Reconciling configuration diff (-desired, +observed): %s", diff) + + // Don't modify the informers copy. + existing := config.DeepCopy() + // Preserve the rest of the object (e.g. ObjectMeta except for labels). + existing.Spec = desiredConfig.Spec + existing.ObjectMeta.Labels = desiredConfig.ObjectMeta.Labels + existing.ObjectMeta.Annotations = desiredConfig.ObjectMeta.Annotations + return c.ServingClientSet.ServingV1alpha1().Configurations(service.Namespace).Update(existing) +} + +func (c *Reconciler) createRoute(service *v1alpha1.Service) (*v1alpha1.Route, error) { + route, err := resources.MakeRoute(service) + if err != nil { + // This should be unreachable as configuration creation + // happens first in `reconcile()` and it verifies the edge cases + // that would make `MakeRoute` fail as well. + return nil, err + } + return c.ServingClientSet.ServingV1alpha1().Routes(service.Namespace).Create(route) +} + +func routeSemanticEquals(desiredRoute, route *v1alpha1.Route) bool { + return equality.Semantic.DeepEqual(desiredRoute.Spec, route.Spec) && + equality.Semantic.DeepEqual(desiredRoute.ObjectMeta.Labels, route.ObjectMeta.Labels) && + equality.Semantic.DeepEqual(desiredRoute.ObjectMeta.Annotations, route.ObjectMeta.Annotations) +} + +func (c *Reconciler) reconcileRoute(ctx context.Context, service *v1alpha1.Service, route *v1alpha1.Route) (*v1alpha1.Route, error) { + logger := logging.FromContext(ctx) + desiredRoute, err := resources.MakeRoute(service) + if err != nil { + // This should be unreachable as configuration creation + // happens first in `reconcile()` and it verifies the edge cases + // that would make `MakeRoute` fail as well. + return nil, err + } + + if routeSemanticEquals(desiredRoute, route) { + // No differences to reconcile. + return route, nil + } + diff, err := kmp.SafeDiff(desiredRoute.Spec, route.Spec) + if err != nil { + return nil, fmt.Errorf("failed to diff Route: %w", err) + } + logger.Infof("Reconciling route diff (-desired, +observed): %s", diff) + + // Don't modify the informers copy. + existing := route.DeepCopy() + // Preserve the rest of the object (e.g. ObjectMeta except for labels and annotations). + existing.Spec = desiredRoute.Spec + existing.ObjectMeta.Labels = desiredRoute.ObjectMeta.Labels + existing.ObjectMeta.Annotations = desiredRoute.ObjectMeta.Annotations + return c.ServingClientSet.ServingV1alpha1().Routes(service.Namespace).Update(existing) +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/service/service_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/service/service_test.go new file mode 100644 index 0000000000..f78ed3575e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/service/service_test.go @@ -0,0 +1,1463 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + "testing" + + // Install our fake informers + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/configuration/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/revision/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/route/fake" + _ "knative.dev/serving/pkg/client/injection/informers/serving/v1alpha1/service/fake" + + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler" + "knative.dev/serving/pkg/reconciler/service/resources" + presources "knative.dev/serving/pkg/resources" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgotesting "k8s.io/client-go/testing" + + . "knative.dev/pkg/reconciler/testing" + . "knative.dev/serving/pkg/reconciler/testing/v1alpha1" + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// This is heavily based on the way the OpenShift Ingress controller tests its reconciliation method. +func TestReconcile(t *testing.T) { + table := TableTest{{ + Name: "bad workqueue key", + Key: "too/many/parts", + }, { + Name: "key not found", + Key: "foo/not-found", + }, { + Name: "nop deletion reconcile", + // Test that with a DeletionTimestamp we do nothing. + Objects: []runtime.Object{ + DefaultService("delete-pending", "foo", WithServiceDeletionTimestamp), + }, + Key: "foo/delete-pending", + }, { + Name: "inline - byo rev name used in traffic serialize", + Objects: []runtime.Object{ + DefaultService("byo-rev", "foo", WithInlineNamedRevision), + config("byo-rev", "foo", + WithInlineNamedRevision, + WithGeneration(2)), + }, + // Route should not be created until config progresses + WantCreates: []runtime.Object{}, + Key: "foo/byo-rev", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("byo-rev", "foo", WithInlineNamedRevision, + // Route conditions should be at init state while Config should be OutOfDate + WithInitSvcConditions, WithOutOfDateConfig), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "byo-rev"), + }, + }, { + Name: "inline - byo rev name used in traffic", + Objects: []runtime.Object{ + DefaultService("byo-rev", "foo", WithInlineNamedRevision), + }, + Key: "foo/byo-rev", + WantCreates: []runtime.Object{ + config("byo-rev", "foo", WithInlineNamedRevision), + route("byo-rev", "foo", WithInlineNamedRevision), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("byo-rev", "foo", WithInlineNamedRevision, + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "byo-rev"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "byo-rev"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "byo-rev"), + }, + }, { + Name: "inline - create route and service", + Objects: []runtime.Object{ + DefaultService("run-latest", "foo", WithInlineRollout), + }, + Key: "foo/run-latest", + WantCreates: []runtime.Object{ + config("run-latest", "foo", WithInlineRollout), + route("run-latest", "foo", WithInlineRollout), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("run-latest", "foo", WithInlineRollout, + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "run-latest"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "run-latest"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "run-latest"), + }, + }, { + Name: "runLatest - create route and service", + Objects: []runtime.Object{ + DefaultService("run-latest", "foo", WithRunLatestRollout), + }, + Key: "foo/run-latest", + WantCreates: []runtime.Object{ + config("run-latest", "foo", WithRunLatestRollout), + route("run-latest", "foo", WithRunLatestRollout), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("run-latest", "foo", WithRunLatestRollout, + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "run-latest", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "run-latest"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "run-latest"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "run-latest"), + }, + }, { + Name: "pinned - create route and service", + Objects: []runtime.Object{ + DefaultService("pinned", "foo", WithPinnedRollout("pinned-0001")), + }, + Key: "foo/pinned", + WantCreates: []runtime.Object{ + config("pinned", "foo", WithPinnedRollout("pinned-0001")), + route("pinned", "foo", WithPinnedRollout("pinned-0001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("pinned", "foo", WithPinnedRollout("pinned-0001"), + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "pinned", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "pinned"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "pinned"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "pinned"), + }, + }, { + // Pinned rollouts are deprecated, so test the same functionality + // using Release. + Name: "pinned - create route and service - via release", + Objects: []runtime.Object{ + DefaultService("pinned2", "foo", WithReleaseRollout("pinned2-0001")), + }, + Key: "foo/pinned2", + WantCreates: []runtime.Object{ + config("pinned2", "foo", WithReleaseRollout("pinned2-0001")), + route("pinned2", "foo", WithReleaseRollout("pinned2-0001")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("pinned2", "foo", WithReleaseRollout("pinned2-0001"), + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "pinned2", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "pinned2"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "pinned2"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "pinned2"), + }, + }, { + Name: "pinned - with ready config and route", + Objects: []runtime.Object{ + DefaultService("pinned3", "foo", WithReleaseRollout("pinned3-00001"), + WithInitSvcConditions), + config("pinned3", "foo", WithReleaseRollout("pinned3-00001"), + WithGeneration(1), WithObservedGen, + WithLatestCreated("pinned3-00001"), + WithLatestReady("pinned3-00001")), + route("pinned3", "foo", WithReleaseRollout("pinned3-00001"), + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "pinned3-00001", + Percent: ptr.Int64(100), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "pinned3-00001", + Percent: nil, + }, + }), MarkTrafficAssigned, MarkIngressReady), + }, + Key: "foo/pinned3", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + // Make sure that status contains all the required propagated fields + // from config and route status. + Object: DefaultService("pinned3", "foo", + // Initial setup conditions. + WithReleaseRollout("pinned3-00001"), + // The delta induced by configuration object. + WithReadyConfig("pinned3-00001"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "pinned3-00001", + Percent: ptr.Int64(100), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "pinned3-00001", + Percent: nil, + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "pinned3", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "pinned3"), + }, + WantServiceReadyStats: map[string]int{ + "foo/pinned3": 1, + }, + }, { + Name: "release - with @latest", + Objects: []runtime.Object{ + DefaultService("release", "foo", WithReleaseRollout(v1alpha1.ReleaseLatestRevisionKeyword)), + }, + Key: "foo/release", + WantCreates: []runtime.Object{ + config("release", "foo", WithReleaseRollout("release-00001")), + route("release", "foo", WithReleaseRollout(v1alpha1.ReleaseLatestRevisionKeyword)), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release", "foo", WithReleaseRollout(v1alpha1.ReleaseLatestRevisionKeyword), + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "release"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "release"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release"), + }, + }, { + Name: "release - create route and service", + Objects: []runtime.Object{ + DefaultService("release", "foo", WithReleaseRollout("release-00001", "release-00002")), + }, + Key: "foo/release", + WantCreates: []runtime.Object{ + config("release", "foo", WithReleaseRollout("release-00001", "release-00002")), + route("release", "foo", WithReleaseRollout("release-00001", "release-00002")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release", "foo", WithReleaseRollout("release-00001", "release-00002"), + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "release"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "release"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release"), + }, + }, { + Name: "release - update service, route not ready", + Objects: []runtime.Object{ + DefaultService("release-nr", "foo", WithReleaseRollout("release-nr-00002"), WithInitSvcConditions), + config("release-nr", "foo", WithReleaseRollout("release-nr-00002"), + WithCreatedAndReady("release-nr-00002", "release-nr-00002")), + // NB: route points to the previous revision. + route("release-nr", "foo", WithReleaseRollout("release-nr-00002"), RouteReady, + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + }, + Key: "foo/release-nr", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-nr", "foo", + WithReleaseRollout("release-nr-00002"), + WithReadyConfig("release-nr-00002"), + WithServiceStatusRouteNotReady, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-00001", + Percent: ptr.Int64(100), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-nr", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-nr"), + }, + }, { + Name: "release - update service, route not ready, 2 rev, no split", + Objects: []runtime.Object{ + DefaultService("release-nr", "foo", WithReleaseRollout("release-nr-00002", "release-nr-00003"), WithInitSvcConditions), + config("release-nr", "foo", WithReleaseRollout("release-nr-00002", "release-nr-00003"), + WithCreatedAndReady("release-nr-00003", "release-nr-00003")), + // NB: route points to the previous revision. + route("release-nr", "foo", WithReleaseRollout("release-nr-00002", "release-nr-00003"), + RouteReady, WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + }, + Key: "foo/release-nr", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-nr", "foo", + WithReleaseRollout("release-nr-00002", "release-nr-00003"), + WithReadyConfig("release-nr-00003"), + WithServiceStatusRouteNotReady, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-00001", + Percent: ptr.Int64(100), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-nr", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-nr"), + }, + }, { + Name: "release - update service, route not ready, traffic split", + Objects: []runtime.Object{ + DefaultService("release-nr-ts", "foo", + WithReleaseRolloutAndPercentage(42, "release-nr-ts-00002", "release-nr-ts-00003"), + WithInitSvcConditions), + config("release-nr-ts", "foo", + WithReleaseRolloutAndPercentage(42, "release-nr-ts-00002", "release-nr-ts-00003"), + WithCreatedAndReady("release-nr-ts-00003", "release-nr-ts-00003")), + route("release-nr-ts", "foo", + WithReleaseRolloutAndPercentage(42, "release-nr-ts-00002", "release-nr-ts-00003"), + RouteReady, WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts-00001", + Percent: ptr.Int64(58), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts-00002", + Percent: ptr.Int64(42), + }, + }), MarkTrafficAssigned, MarkIngressReady), + }, + Key: "foo/release-nr-ts", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-nr-ts", "foo", + WithReleaseRolloutAndPercentage(42, "release-nr-ts-00002", "release-nr-ts-00003"), + WithReadyConfig("release-nr-ts-00003"), + WithServiceStatusRouteNotReady, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts-00001", + Percent: ptr.Int64(58), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts-00002", + Percent: ptr.Int64(42), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-nr-ts", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-nr-ts"), + }, + }, { + Name: "release - update service, route not ready, traffic split, percentage changed", + Objects: []runtime.Object{ + DefaultService("release-nr-ts2", "foo", + WithReleaseRolloutAndPercentage(58, "release-nr-ts2-00002", "release-nr-ts2-00003"), + WithInitSvcConditions), + config("release-nr-ts2", "foo", + WithReleaseRolloutAndPercentage(58, "release-nr-ts2-00002", "release-nr-ts2-00003"), + WithCreatedAndReady("release-nr-ts2-00003", "release-nr-ts2-00003")), + route("release-nr-ts2", "foo", + WithReleaseRolloutAndPercentage(58, "release-nr-ts2-00002", "release-nr-ts2-00003"), + RouteReady, WithURL, WithAddress, WithInitRouteConditions, + // NB: here the revisions match, but percentages, don't. + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts2-00002", + Percent: ptr.Int64(58), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts2-00003", + Percent: ptr.Int64(42), + }, + }), MarkTrafficAssigned, MarkIngressReady), + }, + Key: "foo/release-nr-ts2", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-nr-ts2", "foo", + WithReleaseRolloutAndPercentage(58, "release-nr-ts2-00002", "release-nr-ts2-00003"), + WithReadyConfig("release-nr-ts2-00003"), + WithServiceStatusRouteNotReady, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts2-00002", + Percent: ptr.Int64(58), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "release-nr-ts2-00003", + Percent: ptr.Int64(42), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-nr-ts2", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-nr-ts2"), + }, + }, { + Name: "release - route and config ready, using @latest", + Objects: []runtime.Object{ + DefaultService("release-ready-lr", "foo", + WithReleaseRollout(v1alpha1.ReleaseLatestRevisionKeyword), WithInitSvcConditions), + route("release-ready-lr", "foo", + WithReleaseRollout(v1alpha1.ReleaseLatestRevisionKeyword), + RouteReady, WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic([]v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "release-ready-lr-00001", + Percent: ptr.Int64(100), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "release-ready-lr-00001", + }, + }}...), MarkTrafficAssigned, MarkIngressReady), + config("release-ready-lr", "foo", WithReleaseRollout("release-ready-lr"), + WithGeneration(1), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("release-ready-lr-00001"), + WithLatestReady("release-ready-lr-00001")), + }, + Key: "foo/release-ready-lr", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-ready-lr", "foo", + WithReleaseRollout(v1alpha1.ReleaseLatestRevisionKeyword), + // The delta induced by the config object. + WithReadyConfig("release-ready-lr-00001"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic([]v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "release-ready-lr-00001", + Percent: ptr.Int64(100), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "release-ready-lr-00001", + }, + }}...), + ), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-ready-lr", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-ready-lr"), + }, + WantServiceReadyStats: map[string]int{ + "foo/release-ready-lr": 1, + }, + }, { + Name: "release - route and config ready, traffic split, using @latest", + Objects: []runtime.Object{ + DefaultService("release-ready-lr", "foo", + WithReleaseRolloutAndPercentage( + 42, "release-ready-lr-00001", v1alpha1.ReleaseLatestRevisionKeyword), WithInitSvcConditions), + route("release-ready-lr", "foo", + WithReleaseRolloutAndPercentage( + 42, "release-ready-lr-00001", v1alpha1.ReleaseLatestRevisionKeyword), + RouteReady, WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic([]v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "release-ready-lr-00001", + Percent: ptr.Int64(58), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + RevisionName: "release-ready-lr-00002", + Percent: ptr.Int64(42), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "release-ready-lr-00002", + }, + }}...), MarkTrafficAssigned, MarkIngressReady), + config("release-ready-lr", "foo", WithReleaseRollout("release-ready-lr"), + WithGeneration(2), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("release-ready-lr-00002"), + WithLatestReady("release-ready-lr-00002")), + }, + Key: "foo/release-ready-lr", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-ready-lr", "foo", + WithReleaseRolloutAndPercentage( + 42, "release-ready-lr-00001", v1alpha1.ReleaseLatestRevisionKeyword), + // The delta induced by the config object. + WithReadyConfig("release-ready-lr-00002"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic([]v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "release-ready-lr-00001", + Percent: ptr.Int64(58), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + RevisionName: "release-ready-lr-00002", + Percent: ptr.Int64(42), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "release-ready-lr-00002", + }, + }}...), + ), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-ready-lr", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-ready-lr"), + }, + WantServiceReadyStats: map[string]int{ + "foo/release-ready-lr": 1, + }, + }, { + Name: "release - route and config ready, propagate ready, percentage set", + Objects: []runtime.Object{ + DefaultService("release-ready", "foo", + WithReleaseRolloutAndPercentage(58, /*candidate traffic percentage*/ + "release-ready-00001", "release-ready-00002"), WithInitSvcConditions), + route("release-ready", "foo", + WithReleaseRolloutAndPercentage(58, /*candidate traffic percentage*/ + "release-ready-00001", "release-ready-00002"), + RouteReady, WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "release-ready-00001", + Percent: ptr.Int64(42), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + RevisionName: "release-ready-00002", + Percent: ptr.Int64(58), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "release-ready-00002", + Percent: nil, + }, + }), MarkTrafficAssigned, MarkIngressReady), + config("release-ready", "foo", WithRunLatestRollout, + WithGeneration(2), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("release-ready-00002"), WithLatestReady("release-ready-00002")), + }, + Key: "foo/release-ready", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-ready", "foo", + WithReleaseRolloutAndPercentage(58, /*candidate traffic percentage*/ + "release-ready-00001", "release-ready-00002"), + // The delta induced by the config object. + WithReadyConfig("release-ready-00002"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CurrentTrafficTarget, + RevisionName: "release-ready-00001", + Percent: ptr.Int64(42), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.CandidateTrafficTarget, + RevisionName: "release-ready-00002", + Percent: ptr.Int64(58), + }, + }, v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: v1alpha1.LatestTrafficTarget, + RevisionName: "release-ready-00002", + Percent: nil, + }, + }), + ), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-ready", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-ready"), + }, + WantServiceReadyStats: map[string]int{ + "foo/release-ready": 1, + }, + }, { + Name: "release - create route and service and percentage", + Objects: []runtime.Object{ + DefaultService("release-with-percent", "foo", WithReleaseRolloutAndPercentage(10, /*candidate traffic percentage*/ + "release-with-percent-00001", "release-with-percent-00002")), + }, + Key: "foo/release-with-percent", + WantCreates: []runtime.Object{ + config("release-with-percent", "foo", WithReleaseRolloutAndPercentage(10, "release-with-percent-00001", "release-with-percent-00002")), + route("release-with-percent", "foo", WithReleaseRolloutAndPercentage(10, "release-with-percent-00001", "release-with-percent-00002")), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("release-with-percent", "foo", WithReleaseRolloutAndPercentage(10, "release-with-percent-00001", "release-with-percent-00002"), + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "release-with-percent", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "release-with-percent"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "release-with-percent"), + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "release-with-percent"), + }, + }, { + Name: "runLatest - no updates", + Objects: []runtime.Object{ + DefaultService("no-updates", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("no-updates", "foo", WithRunLatestRollout), + config("no-updates", "foo", WithRunLatestRollout), + }, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "no-updates", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + Key: "foo/no-updates", + }, { + Name: "runLatest - update annotations", + Objects: []runtime.Object{ + DefaultService("update-annos", "foo", WithRunLatestRollout, WithInitSvcConditions, + func(s *v1alpha1.Service) { + s.Annotations = presources.UnionMaps(s.Annotations, + map[string]string{"new-key": "new-value"}) + }), + config("update-annos", "foo", WithRunLatestRollout), + route("update-annos", "foo", WithRunLatestRollout), + }, + Key: "foo/update-annos", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-annos", "foo", WithRunLatestRollout, + func(s *v1alpha1.Configuration) { + s.Annotations = presources.UnionMaps(s.Annotations, + map[string]string{"new-key": "new-value"}) + }), + }, { + Object: route("update-annos", "foo", WithRunLatestRollout, + func(s *v1alpha1.Route) { + s.Annotations = presources.UnionMaps(s.Annotations, + map[string]string{"new-key": "new-value"}) + }), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "update-annos", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + }, { + Name: "runLatest - delete annotations", + Objects: []runtime.Object{ + DefaultService("update-annos", "foo", WithRunLatestRollout, WithInitSvcConditions), + config("update-annos", "foo", WithRunLatestRollout, + func(s *v1alpha1.Configuration) { + s.Annotations = presources.UnionMaps(s.Annotations, + map[string]string{"new-key": "new-value"}) + }), + route("update-annos", "foo", WithRunLatestRollout, + func(s *v1alpha1.Route) { + s.Annotations = presources.UnionMaps(s.Annotations, + map[string]string{"new-key": "new-value"}) + }), + }, + Key: "foo/update-annos", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-annos", "foo", WithRunLatestRollout), + }, { + Object: route("update-annos", "foo", WithRunLatestRollout), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "update-annos", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + }, { + Name: "runLatest - update route and service", + Objects: []runtime.Object{ + DefaultService("update-route-and-config", "foo", WithRunLatestRollout, WithInitSvcConditions), + // Mutate the Config/Route to have a different body than we want. + config("update-route-and-config", "foo", WithRunLatestRollout, + // This is just an unexpected mutation of the config spec vs. the service spec. + WithConfigContainerConcurrency(5)), + route("update-route-and-config", "foo", WithRunLatestRollout, MutateRoute), + }, + Key: "foo/update-route-and-config", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-route-and-config", "foo", WithRunLatestRollout), + }, { + Object: route("update-route-and-config", "foo", WithRunLatestRollout), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "update-route-and-config", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + }, { + Name: "runLatest - update route and service (bad existing Revision)", + Objects: []runtime.Object{ + DefaultService("update-route-and-config", "foo", WithRunLatestRollout, func(svc *v1alpha1.Service) { + svc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Name = "update-route-and-config-blah" + }, WithInitSvcConditions), + // Mutate the Config/Route to have a different body than we want. + config("update-route-and-config", "foo", WithRunLatestRollout, + // Change the concurrency to ensure it is corrected. + WithConfigContainerConcurrency(5)), + route("update-route-and-config", "foo", WithRunLatestRollout, MutateRoute), + &v1alpha1.Revision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "update-route-and-config-blah", + Namespace: "foo", + // Not labeled with the configuration or the right generation. + }, + }, + }, + Key: "foo/update-route-and-config", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-route-and-config", "foo", WithRunLatestRollout, + func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Name = "update-route-and-config-blah" + }), + }}, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("update-route-and-config", "foo", WithRunLatestRollout, func(svc *v1alpha1.Service) { + svc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Name = "update-route-and-config-blah" + }, WithInitSvcConditions, func(svc *v1alpha1.Service) { + svc.Status.MarkRevisionNameTaken("update-route-and-config-blah") + }), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "update-route-and-config", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "update-route-and-config"), + }, + }, { + Name: "runLatest - update route and config labels", + Objects: []runtime.Object{ + // Mutate the Service to add some more labels + DefaultService("update-route-and-config-labels", "foo", WithRunLatestRollout, WithInitSvcConditions, WithServiceLabel("new-label", "new-value")), + config("update-route-and-config-labels", "foo", WithRunLatestRollout), + route("update-route-and-config-labels", "foo", WithRunLatestRollout), + }, + Key: "foo/update-route-and-config-labels", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-route-and-config-labels", "foo", WithRunLatestRollout, WithConfigLabel("new-label", "new-value")), + }, { + Object: route("update-route-and-config-labels", "foo", WithRunLatestRollout, WithRouteLabel(map[string]string{"new-label": "new-value", + "serving.knative.dev/service": "update-route-and-config-labels"})), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "update-route-and-config-labels", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + }, { + Name: "runLatest - update route config labels ignoring serving.knative.dev/route", + Objects: []runtime.Object{ + // Mutate the Service to add some more labels + DefaultService("update-child-labels-ignore-route-label", "foo", + WithRunLatestRollout, WithInitSvcConditions, WithServiceLabel("new-label", "new-value")), + config("update-child-labels-ignore-route-label", "foo", + WithRunLatestRollout, WithConfigLabel("serving.knative.dev/route", "update-child-labels-ignore-route-label")), + route("update-child-labels-ignore-route-label", "foo", WithRunLatestRollout), + }, + Key: "foo/update-child-labels-ignore-route-label", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-child-labels-ignore-route-label", "foo", WithRunLatestRollout, WithConfigLabel("new-label", "new-value"), + WithConfigLabel("serving.knative.dev/route", "update-child-labels-ignore-route-label")), + }, { + Object: route("update-child-labels-ignore-route-label", "foo", WithRunLatestRollout, WithRouteLabel(map[string]string{"new-label": "new-value", + "serving.knative.dev/service": "update-child-labels-ignore-route-label"})), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "update-child-labels-ignore-route-label", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + }, { + Name: "runLatest - bad config update", + Objects: []runtime.Object{ + // There is no spec.{runLatest,pinned} in this Service, which triggers the error + // path updating Configuration. + DefaultService("bad-config-update", "foo", WithInitSvcConditions, WithRunLatestRollout, + func(svc *v1alpha1.Service) { + svc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec.GetContainer().Image = "#" + }), + config("bad-config-update", "foo", WithRunLatestRollout), + route("bad-config-update", "foo", WithRunLatestRollout), + }, + Key: "foo/bad-config-update", + WantErr: true, + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("bad-config-update", "foo", WithRunLatestRollout, + func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Spec.GetContainer().Image = "#" + }), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", + "failed to reconcile Configuration: Failed to parse image reference: spec.template.spec.containers[0].image\nimage: \"#\", error: could not parse reference"), + }, + }, { + Name: "runLatest - route creation failure", + // Induce a failure during route creation + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "routes"), + }, + Objects: []runtime.Object{ + DefaultService("create-route-failure", "foo", WithRunLatestRollout), + }, + Key: "foo/create-route-failure", + WantCreates: []runtime.Object{ + config("create-route-failure", "foo", WithRunLatestRollout), + route("create-route-failure", "foo", WithRunLatestRollout), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("create-route-failure", "foo", WithRunLatestRollout, + // First reconcile initializes conditions. + WithInitSvcConditions), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "create-route-failure"), + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Route %q: %v", + "create-route-failure", "inducing failure for create routes"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create Route: inducing failure for create routes"), + }, + }, { + Name: "runLatest - configuration creation failure", + // Induce a failure during configuration creation + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("create", "configurations"), + }, + Objects: []runtime.Object{ + DefaultService("create-config-failure", "foo", WithRunLatestRollout), + }, + Key: "foo/create-config-failure", + WantCreates: []runtime.Object{ + config("create-config-failure", "foo", WithRunLatestRollout), + // We don't get to creating the Route. + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("create-config-failure", "foo", WithRunLatestRollout, + // First reconcile initializes conditions. + WithInitSvcConditions), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "CreationFailed", "Failed to create Configuration %q: %v", + "create-config-failure", "inducing failure for create configurations"), + Eventf(corev1.EventTypeWarning, "InternalError", "failed to create Configuration: inducing failure for create configurations"), + }, + }, { + Name: "runLatest - update route failure", + // Induce a failure updating the route + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "routes"), + }, + Objects: []runtime.Object{ + DefaultService("update-route-failure", "foo", WithRunLatestRollout, WithInitSvcConditions), + // Mutate the Route to have an unexpected body to trigger an update. + route("update-route-failure", "foo", WithRunLatestRollout, MutateRoute), + config("update-route-failure", "foo", WithRunLatestRollout), + }, + Key: "foo/update-route-failure", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: route("update-route-failure", "foo", WithRunLatestRollout), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "failed to reconcile Route: inducing failure for update routes"), + }, + }, { + Name: "runLatest - update config failure", + // Induce a failure updating the config + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "configurations"), + }, + Objects: []runtime.Object{ + DefaultService("update-config-failure", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("update-config-failure", "foo", WithRunLatestRollout), + // Mutate the Config to have an unexpected body to trigger an update. + config("update-config-failure", "foo", WithRunLatestRollout, + // This is just an unexpected mutation of the config spec vs. the service spec. + WithConfigContainerConcurrency(5)), + }, + Key: "foo/update-config-failure", + WantUpdates: []clientgotesting.UpdateActionImpl{{ + Object: config("update-config-failure", "foo", WithRunLatestRollout), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", "failed to reconcile Configuration: inducing failure for update configurations"), + }, + }, { + Name: "runLatest - failure updating service status", + // Induce a failure updating the service status. + WantErr: true, + WithReactors: []clientgotesting.ReactionFunc{ + InduceFailure("update", "services"), + }, + Objects: []runtime.Object{ + DefaultService("run-latest", "foo", WithRunLatestRollout), + }, + Key: "foo/run-latest", + WantCreates: []runtime.Object{ + config("run-latest", "foo", WithRunLatestRollout), + route("run-latest", "foo", WithRunLatestRollout), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("run-latest", "foo", WithRunLatestRollout, + // We attempt to update the Service to initialize its + // conditions, which is where we induce the failure. + WithInitSvcConditions), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Created", "Created Configuration %q", "run-latest"), + Eventf(corev1.EventTypeNormal, "Created", "Created Route %q", "run-latest"), + Eventf(corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for Service %q: %v", + "run-latest", "inducing failure for update services"), + }, + }, { + Name: "runLatest - route and config ready, propagate ready", + // When both route and config are ready, the service should become ready. + Objects: []runtime.Object{ + DefaultService("all-ready", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("all-ready", "foo", WithRunLatestRollout, RouteReady, + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "all-ready-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + config("all-ready", "foo", WithRunLatestRollout, + WithGeneration(1), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("all-ready-00001"), WithLatestReady("all-ready-00001")), + }, + Key: "foo/all-ready", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("all-ready", "foo", WithRunLatestRollout, + WithReadyConfig("all-ready-00001"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "all-ready-00001", + Percent: ptr.Int64(100), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "all-ready", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "all-ready"), + }, + WantServiceReadyStats: map[string]int{ + "foo/all-ready": 1, + }, + }, { + Name: "runLatest - configuration lagging", + // When both route and config are ready, the service should become ready. + Objects: []runtime.Object{ + DefaultService("all-ready", "foo", WithRunLatestRollout, WithInitSvcConditions, + WithReadyConfig("all-ready-00001")), + route("all-ready", "foo", WithRunLatestRollout, RouteReady, + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "all-ready-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + config("all-ready", "foo", WithRunLatestRollout, + WithGeneration(1), WithObservedGen, WithGeneration(2), + // These turn a Configuration to Ready=true + WithLatestCreated("all-ready-00001"), WithLatestReady("all-ready-00001")), + }, + Key: "foo/all-ready", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("all-ready", "foo", WithRunLatestRollout, + WithReadyConfig("all-ready-00001"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + MarkConfigurationNotReconciled, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "all-ready-00001", + Percent: ptr.Int64(100), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "all-ready", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "all-ready"), + }, + }, { + Name: "runLatest - route ready previous version and config ready, service not ready", + // When both route and config are ready, but the route points to the previous revision + // the service should not be ready. + Objects: []runtime.Object{ + DefaultService("config-only-ready", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("config-only-ready", "foo", WithRunLatestRollout, RouteReady, + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-only-ready-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + config("config-only-ready", "foo", WithRunLatestRollout, + WithGeneration(2 /*will generate revision -00002*/), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("config-only-ready-00002"), WithLatestReady("config-only-ready-00002")), + }, + Key: "foo/config-only-ready", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("config-only-ready", "foo", WithRunLatestRollout, + WithReadyConfig("config-only-ready-00002"), + WithServiceStatusRouteNotReady, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-only-ready-00001", + Percent: ptr.Int64(100), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "config-only-ready", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "config-only-ready"), + }, + }, { + Name: "runLatest - config fails, new gen, propagate failure", + // Gen 1: everything is fine; + // Gen 2: config update fails; + // => service is still OK serving Gen 1. + Objects: []runtime.Object{ + DefaultService("config-fails", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("config-fails", "foo", WithRunLatestRollout, RouteReady, + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-fails-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + config("config-fails", "foo", WithRunLatestRollout, WithGeneration(2), + WithLatestReady("config-fails-00001"), WithLatestCreated("config-fails-00002"), + MarkLatestCreatedFailed("blah"), WithObservedGen), + }, + Key: "foo/config-fails", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("config-fails", "foo", WithRunLatestRollout, WithInitSvcConditions, + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "config-fails-00001", + Percent: ptr.Int64(100), + }, + }), + WithFailedConfig("config-fails-00002", "RevisionFailed", "blah"), + WithServiceLatestReadyRevision("config-fails-00001")), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "config-fails", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "config-fails"), + }, + }, { + Name: "runLatest - config fails, propagate failure", + // When config fails, the service should fail. + Objects: []runtime.Object{ + DefaultService("config-fails", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("config-fails", "foo", WithRunLatestRollout, RouteReady), + config("config-fails", "foo", WithRunLatestRollout, WithGeneration(1), WithObservedGen, + WithLatestCreated("config-fails-00001"), MarkLatestCreatedFailed("blah")), + }, + Key: "foo/config-fails", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("config-fails", "foo", WithRunLatestRollout, WithInitSvcConditions, + WithServiceStatusRouteNotReady, WithFailedConfig( + "config-fails-00001", "RevisionFailed", "blah")), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "config-fails", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "config-fails"), + }, + }, { + Name: "runLatest - route fails, propagate failure", + // When route fails, the service should fail. + Objects: []runtime.Object{ + DefaultService("route-fails", "foo", WithRunLatestRollout, WithInitSvcConditions), + route("route-fails", "foo", WithRunLatestRollout, + RouteFailed("Propagate me, please", "")), + config("route-fails", "foo", WithRunLatestRollout, WithGeneration(1), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("route-fails-00001"), WithLatestReady("route-fails-00001")), + }, + Key: "foo/route-fails", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("route-fails", "foo", WithRunLatestRollout, WithInitSvcConditions, + // When the Configuration is Ready, and the Route has failed, + // we expect the following changed to our status conditions. + WithReadyConfig("route-fails-00001"), + WithFailedRoute("Propagate me, please", "")), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "route-fails", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "route-fails"), + }, + }, { + Name: "runLatest - not owned config exists", + WantErr: true, + Objects: []runtime.Object{ + DefaultService("run-latest", "foo", WithRunLatestRollout), + config("run-latest", "foo", WithRunLatestRollout, WithConfigOwnersRemoved), + }, + Key: "foo/run-latest", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("run-latest", "foo", WithRunLatestRollout, + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions, MarkConfigurationNotOwned), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `service: "run-latest" does not own configuration: "run-latest"`), + }, + }, { + Name: "runLatest - not owned route exists", + WantErr: true, + Objects: []runtime.Object{ + DefaultService("run-latest", "foo", WithRunLatestRollout), + config("run-latest", "foo", WithRunLatestRollout), + route("run-latest", "foo", WithRunLatestRollout, WithRouteOwnersRemoved), + }, + Key: "foo/run-latest", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("run-latest", "foo", WithRunLatestRollout, + // The first reconciliation will initialize the status conditions. + WithInitSvcConditions, MarkRouteNotOwned), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeWarning, "InternalError", `service: "run-latest" does not own route: "run-latest"`), + }, + }, { + Name: "runLatest - correct not owned by adding owner refs", + // If ready Route/Configuration that weren't owned have OwnerReferences attached, + // then a Reconcile will result in the Service becoming happy. + Objects: []runtime.Object{ + DefaultService("new-owner", "foo", WithRunLatestRollout, WithInitSvcConditions, + // This service was unhappy with the prior owner situation. + MarkConfigurationNotOwned, MarkRouteNotOwned), + // The service owns these, which should result in a happy result. + route("new-owner", "foo", WithRunLatestRollout, RouteReady, + WithURL, WithAddress, WithInitRouteConditions, + WithStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "new-owner-00001", + Percent: ptr.Int64(100), + }, + }), MarkTrafficAssigned, MarkIngressReady), + config("new-owner", "foo", WithRunLatestRollout, WithGeneration(1), WithObservedGen, + // These turn a Configuration to Ready=true + WithLatestCreated("new-owner-00001"), WithLatestReady("new-owner-00001")), + }, + Key: "foo/new-owner", + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: DefaultService("new-owner", "foo", WithRunLatestRollout, + WithReadyConfig("new-owner-00001"), + // The delta induced by route object. + WithReadyRoute, WithSvcStatusDomain, WithSvcStatusAddress, + WithSvcStatusTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: "new-owner-00001", + Percent: ptr.Int64(100), + }, + })), + }}, + WantPatches: []clientgotesting.PatchActionImpl{{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: "foo", + }, + Name: "new-owner", + Patch: []byte(reconciler.ForceUpgradePatch), + }}, + WantEvents: []string{ + Eventf(corev1.EventTypeNormal, "Updated", "Updated Service %q", "new-owner"), + }, + WantServiceReadyStats: map[string]int{ + "foo/new-owner": 1, + }, + }} + + table.Test(t, MakeFactory(func(ctx context.Context, listers *Listers, cmw configmap.Watcher) controller.Reconciler { + return &Reconciler{ + Base: reconciler.NewBase(ctx, controllerAgentName, cmw), + serviceLister: listers.GetServiceLister(), + configurationLister: listers.GetConfigurationLister(), + revisionLister: listers.GetRevisionLister(), + routeLister: listers.GetRouteLister(), + } + })) +} + +func TestNew(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + c := NewController(ctx, configmap.NewStaticWatcher()) + + if c == nil { + t.Fatal("Expected NewController to return a non-nil value") + } +} + +func config(name, namespace string, so ServiceOption, co ...ConfigOption) *v1alpha1.Configuration { + s := DefaultService(name, namespace, so) + s.SetDefaults(v1.WithUpgradeViaDefaulting(context.Background())) + cfg, err := resources.MakeConfiguration(s) + if err != nil { + panic(fmt.Sprintf("MakeConfiguration() = %v", err)) + } + for _, opt := range co { + opt(cfg) + } + return cfg +} + +func route(name, namespace string, so ServiceOption, ro ...RouteOption) *v1alpha1.Route { + s := DefaultService(name, namespace, so) + s.SetDefaults(v1.WithUpgradeViaDefaulting(context.Background())) + route, err := resources.MakeRoute(s) + if err != nil { + panic(fmt.Sprintf("MakeRoute() = %v", err)) + } + for _, opt := range ro { + opt(route) + } + return route +} + +// TODO(mattmoor): Replace these when we refactor Route's table_test.go +func MutateRoute(rt *v1alpha1.Route) { + rt.Spec = v1alpha1.RouteSpec{} +} + +func RouteReady(cfg *v1alpha1.Route) { + cfg.Status = v1alpha1.RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + } +} + +func RouteFailed(reason, message string) RouteOption { + return func(cfg *v1alpha1.Route) { + cfg.Status = v1alpha1.RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "False", + Reason: reason, + Message: message, + }}, + }, + } + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter.go b/test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter.go new file mode 100644 index 0000000000..98db0f473b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter.go @@ -0,0 +1,131 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + "fmt" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "knative.dev/pkg/metrics" +) + +const ( + // ServiceReadyCountN is the number of services that have become ready. + ServiceReadyCountN = "service_ready_count" + // ServiceReadyLatencyN is the time it takes for a service to become ready since the resource is created. + ServiceReadyLatencyN = "service_ready_latency" +) + +var ( + serviceReadyLatencyStat = stats.Int64( + ServiceReadyLatencyN, + "Time it takes for a service to become ready since created", + stats.UnitMilliseconds) + serviceReadyCountStat = stats.Int64( + ServiceReadyCountN, + "Number of services that became ready", + stats.UnitDimensionless) + + // Create the tag keys that will be used to add tags to our measurements. + // Tag keys must conform to the restrictions described in + // go.opencensus.io/tag/validate.go. Currently those restrictions are: + // - length between 1 and 255 inclusive + // - characters are printable US-ASCII + reconcilerTagKey = tag.MustNewKey("reconciler") + keyTagKey = tag.MustNewKey("key") +) + +func init() { + // Create views to see our measurements. This can return an error if + // a previously-registered view has the same name with a different value. + // View name defaults to the measure name if unspecified. + if err := view.Register( + &view.View{ + Description: serviceReadyCountStat.Description(), + Measure: serviceReadyCountStat, + Aggregation: view.Count(), + TagKeys: []tag.Key{reconcilerTagKey, keyTagKey}, + }, + &view.View{ + Description: serviceReadyLatencyStat.Description(), + Measure: serviceReadyLatencyStat, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{reconcilerTagKey, keyTagKey}, + }, + ); err != nil { + panic(err) + } +} + +// StatsReporter reports reconcilers' metrics. +type StatsReporter interface { + // ReportServiceReady reports the time it took a service to become Ready. + ReportServiceReady(namespace, service string, d time.Duration) error +} + +// srKey is used to associate StatsReporters with contexts. +type srKey struct{} + +// WithStatsReporter attaches the given StatsReporter to the provided context +// in the returned context. +func WithStatsReporter(ctx context.Context, sr StatsReporter) context.Context { + return context.WithValue(ctx, srKey{}, sr) +} + +// GetStatsReporter attempts to look up the StatsReporter on a given context. +// It may return null if none is found. +func GetStatsReporter(ctx context.Context) StatsReporter { + untyped := ctx.Value(srKey{}) + if untyped == nil { + return nil + } + return untyped.(StatsReporter) +} + +type reporter struct { + ctx context.Context +} + +// NewStatsReporter creates a reporter for reconcilers' metrics +func NewStatsReporter(reconciler string) (StatsReporter, error) { + ctx, err := tag.New( + context.Background(), + tag.Upsert(reconcilerTagKey, reconciler)) + if err != nil { + return nil, err + } + return &reporter{ctx: ctx}, nil +} + +// ReportServiceReady reports the time it took a service to become Ready +func (r *reporter) ReportServiceReady(namespace, service string, d time.Duration) error { + key := fmt.Sprintf("%s/%s", namespace, service) + ctx, err := tag.New( + r.ctx, + tag.Upsert(keyTagKey, key)) + if err != nil { + return err + } + + metrics.Record(ctx, serviceReadyCountStat.M(1)) + metrics.Record(ctx, serviceReadyLatencyStat.M(d.Milliseconds())) + return nil +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter_test.go b/test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter_test.go new file mode 100644 index 0000000000..5bbd2853fb --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/stats_reporter_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "context" + "fmt" + "testing" + "time" + + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "knative.dev/pkg/metrics/metricstest" +) + +const ( + reconcilerMockName = "mock_reconciler" + testServiceNamespace = "test_namespace" + testServiceName = "test_service" +) + +func TestNewStatsReporter(t *testing.T) { + r, err := NewStatsReporter(reconcilerMockName) + if err != nil { + t.Errorf("Failed to create reporter: %v", err) + } + + m := tag.FromContext(r.(*reporter).ctx) + v, ok := m.Value(reconcilerTagKey) + if !ok { + t.Fatalf("Expected tag %q", reconcilerTagKey) + } + if v != reconcilerMockName { + t.Fatalf("Expected %q for tag %q, got %q", reconcilerMockName, reconcilerTagKey, v) + } +} + +func TestReporter_ReportDuration(t *testing.T) { + reporter, err := NewStatsReporter(reconcilerMockName) + if err != nil { + t.Errorf("Failed to create reporter: %v", err) + } + countWas := int64(0) + if m := getMetric(t, ServiceReadyCountN); m != nil { + countWas = m.Data.(*view.CountData).Value + } + + if err = reporter.ReportServiceReady(testServiceNamespace, testServiceName, time.Second); err != nil { + t.Error(err) + } + expectedTags := map[string]string{ + keyTagKey.Name(): fmt.Sprintf("%s/%s", testServiceNamespace, testServiceName), + reconcilerTagKey.Name(): reconcilerMockName, + } + + metricstest.CheckLastValueData(t, ServiceReadyLatencyN, expectedTags, 1000) + metricstest.CheckCountData(t, ServiceReadyCountN, expectedTags, countWas+1) +} + +func getMetric(t *testing.T, metric string) *view.Row { + t.Helper() + rows, err := view.RetrieveData(metric) + if err != nil { + t.Errorf("Failed retrieving data: %v", err) + } + if len(rows) == 0 { + return nil + } + return rows[0] +} + +func TestWithStatsReporter(t *testing.T) { + if WithStatsReporter(context.Background(), nil) == nil { + t.Errorf("stats reporter reports empty context") + } +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/factory.go b/test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/factory.go new file mode 100644 index 0000000000..91b20bf49f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/factory.go @@ -0,0 +1,206 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "encoding/json" + "testing" + + fakecachingclient "knative.dev/caching/pkg/client/injection/client/fake" + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakedynamicclient "knative.dev/pkg/injection/clients/dynamicclient/fake" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + fakecertmanagerclient "knative.dev/serving/pkg/client/certmanager/injection/client/fake" + fakeservingclient "knative.dev/serving/pkg/client/injection/client/fake" + fakeistioclient "knative.dev/serving/pkg/client/istio/injection/client/fake" + + "knative.dev/pkg/configmap" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + logtesting "knative.dev/pkg/logging/testing" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/record" + + rtesting "knative.dev/pkg/reconciler/testing" + "knative.dev/serving/pkg/reconciler" +) + +const ( + // maxEventBufferSize is the estimated max number of event notifications that + // can be buffered during reconciliation. + maxEventBufferSize = 10 +) + +// Ctor functions create a k8s controller with given params. +type Ctor func(context.Context, *Listers, configmap.Watcher) controller.Reconciler + +// MakeFactory creates a reconciler factory with fake clients and controller created by `ctor`. +func MakeFactory(ctor Ctor) rtesting.Factory { + return func(t *testing.T, r *rtesting.TableRow) ( + controller.Reconciler, rtesting.ActionRecorderList, rtesting.EventList, *rtesting.FakeStatsReporter) { + ls := NewListers(r.Objects) + + ctx := r.Ctx + if ctx == nil { + ctx = context.Background() + } + logger := logtesting.TestLogger(t) + ctx = logging.WithLogger(ctx, logger) + + ctx, kubeClient := fakekubeclient.With(ctx, ls.GetKubeObjects()...) + ctx, istioClient := fakeistioclient.With(ctx, ls.GetIstioObjects()...) + ctx, client := fakeservingclient.With(ctx, ls.GetServingObjects()...) + ctx, dynamicClient := fakedynamicclient.With(ctx, + ls.NewScheme(), ToUnstructured(t, ls.NewScheme(), r.Objects)...) + ctx, cachingClient := fakecachingclient.With(ctx, ls.GetCachingObjects()...) + ctx, certManagerClient := fakecertmanagerclient.With(ctx, ls.GetCMCertificateObjects()...) + ctx = context.WithValue(ctx, TrackerKey, &rtesting.FakeTracker{}) + + // The dynamic client's support for patching is BS. Implement it + // here via PrependReactor (this can be overridden below by the + // provided reactors). + dynamicClient.PrependReactor("patch", "*", + func(action ktesting.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + + eventRecorder := record.NewFakeRecorder(maxEventBufferSize) + ctx = controller.WithEventRecorder(ctx, eventRecorder) + statsReporter := &rtesting.FakeStatsReporter{} + ctx = reconciler.WithStatsReporter(ctx, statsReporter) + + // This is needed for the tests that use generated names and + // the object cannot be created beforehand. + kubeClient.PrependReactor("create", "*", + func(action ktesting.Action) (bool, runtime.Object, error) { + ca := action.(ktesting.CreateAction) + ls.IndexerFor(ca.GetObject()).Add(ca.GetObject()) + return false, nil, nil + }, + ) + // This is needed by the Configuration controller tests, which + // use GenerateName to produce Revisions. + rtesting.PrependGenerateNameReactor(&client.Fake) + + // Set up our Controller from the fakes. + c := ctor(ctx, &ls, configmap.NewStaticWatcher()) + // Update the context with the stuff we decorated it with. + r.Ctx = ctx + + for _, reactor := range r.WithReactors { + kubeClient.PrependReactor("*", "*", reactor) + istioClient.PrependReactor("*", "*", reactor) + client.PrependReactor("*", "*", reactor) + dynamicClient.PrependReactor("*", "*", reactor) + cachingClient.PrependReactor("*", "*", reactor) + certManagerClient.PrependReactor("*", "*", reactor) + } + + // Validate all Create operations through the serving client. + client.PrependReactor("create", "*", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + // TODO(n3wscott): context.Background is the best we can do at the moment, but it should be set-able. + return rtesting.ValidateCreates(context.Background(), action) + }) + client.PrependReactor("update", "*", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + // TODO(n3wscott): context.Background is the best we can do at the moment, but it should be set-able. + return rtesting.ValidateUpdates(context.Background(), action) + }) + + actionRecorderList := rtesting.ActionRecorderList{istioClient, dynamicClient, client, kubeClient, cachingClient, certManagerClient} + eventList := rtesting.EventList{Recorder: eventRecorder} + + return c, actionRecorderList, eventList, statsReporter + } +} + +// ToUnstructured takes a list of k8s resources and converts them to +// Unstructured objects. +// We must pass objects as Unstructured to the dynamic client fake, or it +// won't handle them properly. +func ToUnstructured(t *testing.T, sch *runtime.Scheme, objs []runtime.Object) (us []runtime.Object) { + for _, obj := range objs { + obj = obj.DeepCopyObject() // Don't mess with the primary copy + // Determine and set the TypeMeta for this object based on our test scheme. + gvks, _, err := sch.ObjectKinds(obj) + if err != nil { + t.Fatalf("Unable to determine kind for type: %v", err) + } + apiv, k := gvks[0].ToAPIVersionAndKind() + ta, err := meta.TypeAccessor(obj) + if err != nil { + t.Fatalf("Unable to create type accessor: %v", err) + } + ta.SetAPIVersion(apiv) + ta.SetKind(k) + + b, err := json.Marshal(obj) + if err != nil { + t.Fatalf("Unable to marshal: %v", err) + } + u := &unstructured.Unstructured{} + if err := json.Unmarshal(b, u); err != nil { + t.Fatalf("Unable to unmarshal: %v", err) + } + us = append(us, u) + } + return +} + +type key struct{} + +// TrackerKey is used to looking a FakeTracker in a context.Context +var TrackerKey key = struct{}{} + +// AssertTrackingConfig will ensure the provided Configuration is being tracked +func AssertTrackingConfig(namespace, name string) func(*testing.T, *rtesting.TableRow) { + gvk := v1alpha1.SchemeGroupVersion.WithKind("Configuration") + return AssertTrackingObject(gvk, namespace, name) +} + +// AssertTrackingRevision will ensure the provided Revision is being tracked +func AssertTrackingRevision(namespace, name string) func(*testing.T, *rtesting.TableRow) { + gvk := v1alpha1.SchemeGroupVersion.WithKind("Revision") + return AssertTrackingObject(gvk, namespace, name) +} + +// AssertTrackingObject will ensure the following objects are being tracked +func AssertTrackingObject(gvk schema.GroupVersionKind, namespace, name string) func(*testing.T, *rtesting.TableRow) { + apiVersion, kind := gvk.ToAPIVersionAndKind() + + return func(t *testing.T, r *rtesting.TableRow) { + tracker := r.Ctx.Value(TrackerKey).(*rtesting.FakeTracker) + refs := tracker.References() + + for _, ref := range refs { + if ref.APIVersion == apiVersion && + ref.Name == name && + ref.Namespace == namespace && + ref.Kind == kind { + return + } + } + + t.Errorf("Object was not tracked - %s, Name=%s, Namespace=%s", gvk.String(), name, namespace) + } + +} diff --git a/test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/listers.go b/test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/listers.go new file mode 100644 index 0000000000..547860831b --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/reconciler/testing/v1alpha1/listers.go @@ -0,0 +1,215 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + acmev1alpha2 "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2" + cmv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + istiov1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" + appsv1 "k8s.io/api/apps/v1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + appsv1listers "k8s.io/client-go/listers/apps/v1" + autoscalingv2beta1listers "k8s.io/client-go/listers/autoscaling/v2beta1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + cachingv1alpha1 "knative.dev/caching/pkg/apis/caching/v1alpha1" + fakecachingclientset "knative.dev/caching/pkg/client/clientset/versioned/fake" + cachinglisters "knative.dev/caching/pkg/client/listers/caching/v1alpha1" + "knative.dev/pkg/reconciler/testing" + av1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + networking "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + acmelisters "knative.dev/serving/pkg/client/certmanager/listers/acme/v1alpha2" + certmanagerlisters "knative.dev/serving/pkg/client/certmanager/listers/certmanager/v1alpha2" + fakeservingclientset "knative.dev/serving/pkg/client/clientset/versioned/fake" + fakeistioclientset "knative.dev/serving/pkg/client/istio/clientset/versioned/fake" + istiolisters "knative.dev/serving/pkg/client/istio/listers/networking/v1alpha3" + palisters "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1" + networkinglisters "knative.dev/serving/pkg/client/listers/networking/v1alpha1" + servinglisters "knative.dev/serving/pkg/client/listers/serving/v1alpha1" +) + +var clientSetSchemes = []func(*runtime.Scheme) error{ + fakekubeclientset.AddToScheme, + fakeistioclientset.AddToScheme, + fakeservingclientset.AddToScheme, + fakecachingclientset.AddToScheme, + cmv1alpha2.AddToScheme, + acmev1alpha2.AddToScheme, + autoscalingv2beta1.AddToScheme, +} + +type Listers struct { + sorter testing.ObjectSorter +} + +func NewListers(objs []runtime.Object) Listers { + scheme := NewScheme() + + ls := Listers{ + sorter: testing.NewObjectSorter(scheme), + } + + ls.sorter.AddObjects(objs...) + + return ls +} + +func NewScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + + for _, addTo := range clientSetSchemes { + addTo(scheme) + } + return scheme +} + +func (*Listers) NewScheme() *runtime.Scheme { + return NewScheme() +} + +// IndexerFor returns the indexer for the given object. +func (l *Listers) IndexerFor(obj runtime.Object) cache.Indexer { + return l.sorter.IndexerForObjectType(obj) +} + +func (l *Listers) GetKubeObjects() []runtime.Object { + return l.sorter.ObjectsForSchemeFunc(fakekubeclientset.AddToScheme) +} + +func (l *Listers) GetCachingObjects() []runtime.Object { + return l.sorter.ObjectsForSchemeFunc(fakecachingclientset.AddToScheme) +} + +func (l *Listers) GetServingObjects() []runtime.Object { + return l.sorter.ObjectsForSchemeFunc(fakeservingclientset.AddToScheme) +} + +func (l *Listers) GetIstioObjects() []runtime.Object { + return l.sorter.ObjectsForSchemeFunc(fakeistioclientset.AddToScheme) +} + +// GetCMCertificateObjects gets a list of Cert-Manager Certificate objects. +func (l *Listers) GetCMCertificateObjects() []runtime.Object { + return l.sorter.ObjectsForSchemeFunc(cmv1alpha2.AddToScheme) +} + +func (l *Listers) GetServiceLister() servinglisters.ServiceLister { + return servinglisters.NewServiceLister(l.IndexerFor(&v1alpha1.Service{})) +} + +func (l *Listers) GetRouteLister() servinglisters.RouteLister { + return servinglisters.NewRouteLister(l.IndexerFor(&v1alpha1.Route{})) +} + +// GetServerlessServiceLister returns a lister for the ServerlessService objects. +func (l *Listers) GetServerlessServiceLister() networkinglisters.ServerlessServiceLister { + return networkinglisters.NewServerlessServiceLister(l.IndexerFor(&networking.ServerlessService{})) +} + +func (l *Listers) GetConfigurationLister() servinglisters.ConfigurationLister { + return servinglisters.NewConfigurationLister(l.IndexerFor(&v1alpha1.Configuration{})) +} + +func (l *Listers) GetRevisionLister() servinglisters.RevisionLister { + return servinglisters.NewRevisionLister(l.IndexerFor(&v1alpha1.Revision{})) +} + +func (l *Listers) GetPodAutoscalerLister() palisters.PodAutoscalerLister { + return palisters.NewPodAutoscalerLister(l.IndexerFor(&av1alpha1.PodAutoscaler{})) +} + +// GetMetricLister returns a lister for the Metric objects. +func (l *Listers) GetMetricLister() palisters.MetricLister { + return palisters.NewMetricLister(l.IndexerFor(&av1alpha1.Metric{})) +} + +// GetHorizontalPodAutoscalerLister gets lister for HorizontalPodAutoscaler resources. +func (l *Listers) GetHorizontalPodAutoscalerLister() autoscalingv2beta1listers.HorizontalPodAutoscalerLister { + return autoscalingv2beta1listers.NewHorizontalPodAutoscalerLister(l.IndexerFor(&autoscalingv2beta1.HorizontalPodAutoscaler{})) +} + +// GetIngressLister get lister for Ingress resource. +func (l *Listers) GetIngressLister() networkinglisters.IngressLister { + return networkinglisters.NewIngressLister(l.IndexerFor(&networking.Ingress{})) +} + +// GetCertificateLister get lister for Certificate resource. +func (l *Listers) GetCertificateLister() networkinglisters.CertificateLister { + return networkinglisters.NewCertificateLister(l.IndexerFor(&networking.Certificate{})) +} + +func (l *Listers) GetVirtualServiceLister() istiolisters.VirtualServiceLister { + return istiolisters.NewVirtualServiceLister(l.IndexerFor(&istiov1alpha3.VirtualService{})) +} + +// GetGatewayLister gets lister for Istio Gateway resource. +func (l *Listers) GetGatewayLister() istiolisters.GatewayLister { + return istiolisters.NewGatewayLister(l.IndexerFor(&istiov1alpha3.Gateway{})) +} + +// GetKnCertificateLister gets lister for Knative Certificate resource. +func (l *Listers) GetKnCertificateLister() networkinglisters.CertificateLister { + return networkinglisters.NewCertificateLister(l.IndexerFor(&networking.Certificate{})) +} + +// GetCMCertificateLister gets lister for Cert Manager Certificate resource. +func (l *Listers) GetCMCertificateLister() certmanagerlisters.CertificateLister { + return certmanagerlisters.NewCertificateLister(l.IndexerFor(&cmv1alpha2.Certificate{})) +} + +// GetCMClusterIssuerLister gets lister for Cert Manager ClusterIssuer resource. +func (l *Listers) GetCMClusterIssuerLister() certmanagerlisters.ClusterIssuerLister { + return certmanagerlisters.NewClusterIssuerLister(l.IndexerFor(&cmv1alpha2.ClusterIssuer{})) +} + +// GetCMChallengeLister gets lister for Cert Manager Challenge resource. +func (l *Listers) GetCMChallengeLister() acmelisters.ChallengeLister { + return acmelisters.NewChallengeLister(l.IndexerFor(&acmev1alpha2.Challenge{})) +} + +func (l *Listers) GetImageLister() cachinglisters.ImageLister { + return cachinglisters.NewImageLister(l.IndexerFor(&cachingv1alpha1.Image{})) +} + +func (l *Listers) GetDeploymentLister() appsv1listers.DeploymentLister { + return appsv1listers.NewDeploymentLister(l.IndexerFor(&appsv1.Deployment{})) +} + +func (l *Listers) GetK8sServiceLister() corev1listers.ServiceLister { + return corev1listers.NewServiceLister(l.IndexerFor(&corev1.Service{})) +} + +func (l *Listers) GetEndpointsLister() corev1listers.EndpointsLister { + return corev1listers.NewEndpointsLister(l.IndexerFor(&corev1.Endpoints{})) +} + +func (l *Listers) GetSecretLister() corev1listers.SecretLister { + return corev1listers.NewSecretLister(l.IndexerFor(&corev1.Secret{})) +} + +func (l *Listers) GetConfigMapLister() corev1listers.ConfigMapLister { + return corev1listers.NewConfigMapLister(l.IndexerFor(&corev1.ConfigMap{})) +} + +// GetNamespaceLister gets lister for Namespace resource. +func (l *Listers) GetNamespaceLister() corev1listers.NamespaceLister { + return corev1listers.NewNamespaceLister(l.IndexerFor(&corev1.Namespace{})) +} diff --git a/test/vendor/knative.dev/serving/pkg/resources/OWNERS b/test/vendor/knative.dev/serving/pkg/resources/OWNERS new file mode 100644 index 0000000000..e57e66dd50 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-api-approvers + +reviewers: +- serving-api-reviewers + +labels: +- area/API diff --git a/test/vendor/knative.dev/serving/pkg/resources/doc.go b/test/vendor/knative.dev/serving/pkg/resources/doc.go new file mode 100644 index 0000000000..85e915958e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources contains various utilities for dealing with Kubernetes resources. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/resources/endpoints.go b/test/vendor/knative.dev/serving/pkg/resources/endpoints.go new file mode 100644 index 0000000000..0366daef6f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/endpoints.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + corev1 "k8s.io/api/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// ReadyAddressCount returns the total number of addresses ready for the given endpoint. +func ReadyAddressCount(endpoints *corev1.Endpoints) int { + var total int + for _, subset := range endpoints.Subsets { + total += len(subset.Addresses) + } + return total +} + +// ReadyPodCounter provides a count of currently ready pods. This +// information is used by UniScaler implementations to make scaling +// decisions. The interface prevents the UniScaler from needing to +// know how counts are performed. +// The int return value represents the number of pods that are ready +// to handle incoming requests. +// The error value is returned if the ReadyPodCounter is unable to +// calculate a value. +type ReadyPodCounter interface { + ReadyCount() (int, error) +} + +type scopedEndpointCounter struct { + endpointsLister corev1listers.EndpointsLister + namespace string + serviceName string +} + +func (eac *scopedEndpointCounter) ReadyCount() (int, error) { + endpoints, err := eac.endpointsLister.Endpoints(eac.namespace).Get(eac.serviceName) + if err != nil { + return 0, err + } + return ReadyAddressCount(endpoints), nil +} + +// NewScopedEndpointsCounter creates a ReadyPodCounter that uses +// a count of endpoints for a namespace/serviceName as the value +// of ready pods. The values returned by ReadyCount() will vary +// over time. +// lister is used to retrieve endpoints for counting with the +// scope of namespace/serviceName. +func NewScopedEndpointsCounter(lister corev1listers.EndpointsLister, namespace, serviceName string) ReadyPodCounter { + return &scopedEndpointCounter{ + endpointsLister: lister, + namespace: namespace, + serviceName: serviceName, + } +} diff --git a/test/vendor/knative.dev/serving/pkg/resources/endpoints_test.go b/test/vendor/knative.dev/serving/pkg/resources/endpoints_test.go new file mode 100644 index 0000000000..370c80ff3d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/endpoints_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeinformers "k8s.io/client-go/informers" + fakek8s "k8s.io/client-go/kubernetes/fake" +) + +const ( + testNamespace = "test-namespace" + testService = "test-service" +) + +func TestScopedEndpointsCounter(t *testing.T) { + kubeClient := fakek8s.NewSimpleClientset() + endpointsClient := kubeinformers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Endpoints() + createEndpoints := func(ep *corev1.Endpoints) { + kubeClient.CoreV1().Endpoints(testNamespace).Create(ep) + endpointsClient.Informer().GetIndexer().Add(ep) + } + + addressCounter := NewScopedEndpointsCounter(endpointsClient.Lister(), testNamespace, testService) + + tests := []struct { + name string + endpoints *corev1.Endpoints + want int + wantErr bool + }{{ + name: "no endpoints at all", + endpoints: nil, + want: 0, + wantErr: true, + }, { + name: "no ready addresses", + endpoints: endpoints(0), + want: 0, + }, { + name: "one ready address", + endpoints: endpoints(1), + want: 1, + }, { + name: "ten ready addresses", + endpoints: endpoints(10), + want: 10, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.endpoints != nil { + createEndpoints(test.endpoints) + } + got, err := addressCounter.ReadyCount() + if got != test.want { + t.Errorf("ReadyCount() = %d, want: %d", got, test.want) + } + if got, want := (err != nil), test.wantErr; got != want { + t.Errorf("WantErr = %v, want: %v, err: %v", got, want, err) + } + }) + } +} + +func TestReadyAddressCount(t *testing.T) { + tests := []struct { + name string + endpoints *corev1.Endpoints + want int + }{{ + name: "no ready addresses", + endpoints: endpoints(0), + want: 0, + }, { + name: "one ready address", + endpoints: endpoints(1), + want: 1, + }, { + name: "ten ready addresses", + endpoints: endpoints(10), + want: 10, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := ReadyAddressCount(test.endpoints); got != test.want { + t.Errorf("ReadyAddressCount() = %d, want: %d", got, test.want) + } + }) + } +} + +func endpoints(ipCount int) *corev1.Endpoints { + ep := &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testService, + }, + } + addresses := make([]corev1.EndpointAddress, ipCount) + for i := 0; i < ipCount; i++ { + addresses[i] = corev1.EndpointAddress{IP: fmt.Sprintf("127.0.0.%v", i+1)} + } + ep.Subsets = []corev1.EndpointSubset{{ + Addresses: addresses, + }} + return ep +} diff --git a/test/vendor/knative.dev/serving/pkg/resources/meta.go b/test/vendor/knative.dev/serving/pkg/resources/meta.go new file mode 100644 index 0000000000..83be28fe93 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/meta.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +// CopyMap makes a copy of the map. +func CopyMap(a map[string]string) map[string]string { + ret := make(map[string]string, len(a)) + for k, v := range a { + ret[k] = v + } + return ret +} + +// UnionMaps returns a map constructed from the union of `a` and `b`, +// where value from `b` wins. +func UnionMaps(a, b map[string]string) map[string]string { + out := make(map[string]string, len(a)+len(b)) + + for k, v := range a { + out[k] = v + } + for k, v := range b { + out[k] = v + } + return out +} + +// FilterMap creates a copy of the provided map, filtering out the elements +// that match `filter`. +// nil `filter` is accepted. +func FilterMap(in map[string]string, filter func(string) bool) map[string]string { + ret := make(map[string]string, len(in)) + for k, v := range in { + if filter != nil && filter(k) { + continue + } + ret[k] = v + } + return ret +} diff --git a/test/vendor/knative.dev/serving/pkg/resources/meta_test.go b/test/vendor/knative.dev/serving/pkg/resources/meta_test.go new file mode 100644 index 0000000000..b9e016bee2 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/meta_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestUnion(t *testing.T) { + tests := []struct { + name string + in map[string]string + add map[string]string + want map[string]string + }{{ + name: "nil all", + want: map[string]string{}, + }, { + name: "empty in", + in: map[string]string{}, + want: map[string]string{}, + }, { + name: "no in, only additions", + add: map[string]string{"wish": "you", "were": "here"}, + want: map[string]string{"wish": "you", "were": "here"}, + }, { + name: "in, no add", + in: map[string]string{"the-dark": "side"}, + want: map[string]string{"the-dark": "side"}, + }, { + name: "all together now", + in: map[string]string{"another": "brick"}, + add: map[string]string{"in": "the-wall"}, + want: map[string]string{"in": "the-wall", "another": "brick"}, + }, { + name: "merge wins", + in: map[string]string{"another": "brick", "in": "the-wall-pt-I"}, + add: map[string]string{"in": "the-wall-pt-II"}, + want: map[string]string{"in": "the-wall-pt-II", "another": "brick"}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := UnionMaps(test.in, test.add) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("MakeLabels (-want, +got) = %v", diff) + } + }) + } +} + +func TestFilter(t *testing.T) { + tests := []struct { + name string + in map[string]string + filter func(string) bool + want map[string]string + }{{ + name: "nil in", + want: map[string]string{}, + }, { + name: "empty in", + in: map[string]string{}, + want: map[string]string{}, + }, { + name: "no in, with filter", + in: map[string]string{}, + filter: func(string) bool { return false }, + want: map[string]string{}, + }, { + name: "pass through", + in: map[string]string{"the-dark": "side"}, + want: map[string]string{"the-dark": "side"}, + }, { + name: "filter all", + in: map[string]string{"the-dark": "side", "of-there": "moon"}, + filter: func(string) bool { return true }, + want: map[string]string{}, + }, { + name: "all together now", + in: map[string]string{"another": "brick", "in": "the-wall"}, + filter: func(s string) bool { return s == "in" }, + want: map[string]string{"another": "brick"}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := FilterMap(test.in, test.filter) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("MakeAnnotations (-want, +got) = %v", diff) + } + }) + } +} + +func TestCopy(t *testing.T) { + tests := []struct { + name string + in map[string]string + want map[string]string + }{{ + name: "nil in", + want: map[string]string{}, + }, { + name: "empty in", + in: map[string]string{}, + want: map[string]string{}, + }, { + name: "copy", + in: map[string]string{"the-dark": "side"}, + want: map[string]string{"the-dark": "side"}, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := CopyMap(test.in) + if diff := cmp.Diff(test.want, got); diff != "" { + t.Errorf("MakeAnnotations (-want, +got) = %v", diff) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/resources/resources.go b/test/vendor/knative.dev/serving/pkg/resources/resources.go new file mode 100644 index 0000000000..85e915958e --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/resources.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resources contains various utilities for dealing with Kubernetes resources. +package resources diff --git a/test/vendor/knative.dev/serving/pkg/resources/scale.go b/test/vendor/knative.dev/serving/pkg/resources/scale.go new file mode 100644 index 0000000000..90ab1637f4 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/scale.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "fmt" + + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + pav1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ScaleResourceArguments returns GroupResource and the resource name. +func ScaleResourceArguments(ref corev1.ObjectReference) (*schema.GroupVersionResource, string, error) { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, "", err + } + resource := apis.KindToResource(gv.WithKind(ref.Kind)) + return &resource, ref.Name, nil +} + +// GetScaleResource returns the current scale resource for the PA. +// TODO(markusthoemmes): We shouldn't need to pass namespace here. +func GetScaleResource(namespace string, ref corev1.ObjectReference, psInformerFactory duck.InformerFactory) (*pav1alpha1.PodScalable, error) { + gvr, name, err := ScaleResourceArguments(ref) + if err != nil { + return nil, fmt.Errorf("error getting the scale arguments: %w", err) + } + _, lister, err := psInformerFactory.Get(*gvr) + if err != nil { + return nil, fmt.Errorf("error getting a lister for a pod scalable resource '%+v': %w", gvr, err) + } + + psObj, err := lister.ByNamespace(namespace).Get(name) + if err != nil { + return nil, fmt.Errorf("error fetching Pod Scalable %s/%s: %w", namespace, name, err) + } + return psObj.(*pav1alpha1.PodScalable), nil +} diff --git a/test/vendor/knative.dev/serving/pkg/resources/scale_test.go b/test/vendor/knative.dev/serving/pkg/resources/scale_test.go new file mode 100644 index 0000000000..96080c5332 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/resources/scale_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "knative.dev/pkg/apis/duck" + fakedynamicclient "knative.dev/pkg/injection/clients/dynamicclient/fake" + "knative.dev/serving/pkg/apis/serving" + + podscalable "knative.dev/serving/pkg/client/injection/ducks/autoscaling/v1alpha1/podscalable/fake" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + . "knative.dev/pkg/reconciler/testing" +) + +func TestScaleResource(t *testing.T) { + cases := []struct { + name string + objectRef corev1.ObjectReference + wantGVR *schema.GroupVersionResource + wantName string + wantErr bool + }{{ + name: "all good", + objectRef: corev1.ObjectReference{ + Name: "test", + APIVersion: "apps/v1", + Kind: "deployment", + }, + wantGVR: &schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }, + wantName: "test", + }, { + name: "broken apiversion", + objectRef: corev1.ObjectReference{ + Name: "test", + APIVersion: "apps///v1", + Kind: "deployment", + }, + wantErr: true, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + gvr, name, err := ScaleResourceArguments(tc.objectRef) + + if !cmp.Equal(gvr, tc.wantGVR) { + t.Errorf("ScaleResource() = %v, want: %v, diff: %s", gvr, tc.wantGVR, cmp.Diff(gvr, tc.wantGVR)) + } + + if name != tc.wantName { + t.Errorf("ScaleResource() = %s, want %s", name, tc.wantName) + } + + if err == nil && tc.wantErr { + t.Error("ScaleResource() didn't return an error") + } + if err != nil && !tc.wantErr { + t.Errorf("ScaleResource() = %v, want no error", err) + } + }) + } +} + +func TestGetScaleResource(t *testing.T) { + ctx, _ := SetupFakeContext(t) + + deployment := newDeployment(t, fakedynamicclient.Get(ctx), "testdeployment", 5) + + psInformerFactory := podscalable.Get(ctx) + objectRef := corev1.ObjectReference{ + Name: deployment.Name, + Kind: "deployment", + APIVersion: "apps/v1", + } + scale, err := GetScaleResource(testNamespace, objectRef, psInformerFactory) + if err != nil { + t.Fatalf("GetScale got error = %v", err) + } + if got, want := scale.Status.Replicas, int32(5); got != want { + t.Errorf("GetScale.Status.Replicas = %d, want: %d", got, want) + } + if got, want := scale.Spec.Selector.MatchLabels[serving.RevisionUID], "1982"; got != want { + t.Errorf("GetScale.Status.Selector = %q, want = %q", got, want) + } +} + +func newDeployment(t *testing.T, dynamicClient dynamic.Interface, name string, replicas int) *v1.Deployment { + t.Helper() + + uns := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "namespace": testNamespace, + "name": name, + "uid": "1982", + }, + "spec": map[string]interface{}{ + "replicas": int64(replicas), + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + serving.RevisionUID: "1982", + }, + }, + }, + "status": map[string]interface{}{ + "replicas": int64(replicas), + }, + }, + } + + u, err := dynamicClient.Resource(schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }).Namespace(testNamespace).Create(uns, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Create() = %v", err) + } + + deployment := &v1.Deployment{} + if err := duck.FromUnstructured(u, deployment); err != nil { + t.Fatalf("FromUnstructured() = %v", err) + } + return deployment +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/OWNERS b/test/vendor/knative.dev/serving/pkg/testing/OWNERS new file mode 100644 index 0000000000..c52c9829af --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/OWNERS @@ -0,0 +1,16 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers +- serving-api-approvers +- networking-approvers +- autoscaling-approvers + +reviewers: +- productivity-reviewers +- serving-api-approvers +- networking-approvers +- autoscaling-approvers + +labels: +- area/test-and-release diff --git a/test/vendor/knative.dev/serving/pkg/testing/functional.go b/test/vendor/knative.dev/serving/pkg/testing/functional.go new file mode 100644 index 0000000000..2f77f7ef03 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/functional.go @@ -0,0 +1,379 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/kmeta" + "knative.dev/serving/pkg/apis/autoscaling" + asv1a1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" +) + +// PodAutoscalerOption is an option that can be applied to a PA. +type PodAutoscalerOption func(*asv1a1.PodAutoscaler) + +// WithProtocolType sets the protocol type on the PodAutoscaler. +func WithProtocolType(pt networking.ProtocolType) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Spec.ProtocolType = pt + } +} + +// WithReachability sets the reachability of the PodAutoscaler to the given value +func WithReachability(r asv1a1.ReachabilityType) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Spec.Reachability = r + } +} + +// WithReachabilityUnknown sets the reachability of the PodAutoscaler to unknown +func WithReachabilityUnknown(pa *asv1a1.PodAutoscaler) { + WithReachability(asv1a1.ReachabilityUnknown)(pa) +} + +// WithReachabilityReachable sets the reachability of the PodAutoscaler to reachable +func WithReachabilityReachable(pa *asv1a1.PodAutoscaler) { + WithReachability(asv1a1.ReachabilityReachable)(pa) +} + +// WithReachabilityUnreachable sets the reachability of the PodAutoscaler to unreachable +func WithReachabilityUnreachable(pa *asv1a1.PodAutoscaler) { + WithReachability(asv1a1.ReachabilityUnreachable)(pa) +} + +// WithPAOwnersRemoved clears the owner references of this PA resource. +func WithPAOwnersRemoved(pa *asv1a1.PodAutoscaler) { + pa.OwnerReferences = nil +} + +// MarkResourceNotOwnedByPA marks PA when it's now owning a resources it is supposed to own. +func MarkResourceNotOwnedByPA(rType, name string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkResourceNotOwned(rType, name) + } +} + +// WithPodAutoscalerOwnersRemoved clears the owner references of this PodAutoscaler. +func WithPodAutoscalerOwnersRemoved(r *asv1a1.PodAutoscaler) { + r.OwnerReferences = nil +} + +// WithTraffic updates the PA to reflect it receiving traffic. +func WithTraffic(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkActive() +} + +// WithPAStatusService annotates PA Status with the provided service name. +func WithPAStatusService(svc string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.ServiceName = svc + } +} + +// WithPAMetricsService annotates PA Status with the provided service name. +func WithPAMetricsService(svc string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.MetricsServiceName = svc + } +} + +// WithBufferedTraffic updates the PA to reflect that it has received +// and buffered traffic while it is being activated. +func WithBufferedTraffic(reason, message string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkActivating(reason, message) + } +} + +// WithNoTraffic updates the PA to reflect the fact that it is not +// receiving traffic. +func WithNoTraffic(reason, message string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Status.MarkInactive(reason, message) + } +} + +// WithPADeletionTimestamp will set the DeletionTimestamp on the PodAutoscaler. +func WithPADeletionTimestamp(r *asv1a1.PodAutoscaler) { + t := metav1.NewTime(time.Unix(1e9, 0)) + r.ObjectMeta.SetDeletionTimestamp(&t) +} + +// WithHPAClass updates the PA to add the hpa class annotation. +func WithHPAClass(pa *asv1a1.PodAutoscaler) { + if pa.Annotations == nil { + pa.Annotations = make(map[string]string) + } + pa.Annotations[autoscaling.ClassAnnotationKey] = autoscaling.HPA +} + +// WithPAContainerConcurrency returns a PodAutoscalerOption which sets +// the PodAutoscaler containerConcurrency to the provided value. +func WithPAContainerConcurrency(cc int64) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + pa.Spec.ContainerConcurrency = cc + } +} + +func withAnnotationValue(key, value string) PodAutoscalerOption { + return func(pa *asv1a1.PodAutoscaler) { + if pa.Annotations == nil { + pa.Annotations = make(map[string]string) + } + pa.Annotations[key] = value + } +} + +// WithTargetAnnotation returns a PodAutoscalerOption which sets +// the PodAutoscaler autoscaling.knative.dev/target annotation to the +// provided value. +func WithTargetAnnotation(target string) PodAutoscalerOption { + return withAnnotationValue(autoscaling.TargetAnnotationKey, target) +} + +// WithTUAnnotation returns a PodAutoscalerOption which sets +// the PodAutoscaler autoscaling.knative.dev/targetUtilizationPercentage +// annotation to the provided value. +func WithTUAnnotation(tu string) PodAutoscalerOption { + return withAnnotationValue(autoscaling.TargetUtilizationPercentageKey, tu) +} + +// WithWindowAnnotation returns a PodAutoScalerOption which sets +// the PodAutoscaler autoscaling.knative.dev/window annotation to the +// provided value. +func WithWindowAnnotation(window string) PodAutoscalerOption { + return withAnnotationValue(autoscaling.WindowAnnotationKey, window) +} + +// WithPanicThresholdPercentageAnnotation returns a PodAutoscalerOption +// which sets the PodAutoscaler +// autoscaling.knative.dev/targetPanicPercentage annotation to the +// provided value. +func WithPanicThresholdPercentageAnnotation(percentage string) PodAutoscalerOption { + return withAnnotationValue(autoscaling.PanicThresholdPercentageAnnotationKey, percentage) +} + +// WithPanicWindowPercentageAnnotation retturn a PodAutoscalerOption +// which set the PodAutoscaler +// autoscaling.knative.dev/windowPanicPercentage annotation to the +// provided value. +func WithPanicWindowPercentageAnnotation(percentage string) PodAutoscalerOption { + return withAnnotationValue(autoscaling.PanicWindowPercentageAnnotationKey, percentage) +} + +// WithMetricAnnotation adds a metric annotation to the PA. +func WithMetricAnnotation(metric string) PodAutoscalerOption { + return withAnnotationValue(autoscaling.MetricAnnotationKey, metric) +} + +// WithMetricOwnersRemoved clears the owner references of this PodAutoscaler. +func WithMetricOwnersRemoved(m *asv1a1.Metric) { + m.OwnerReferences = nil +} + +// WithUpperScaleBound sets maxScale to the given number. +func WithUpperScaleBound(i int) PodAutoscalerOption { + return withAnnotationValue(autoscaling.MaxScaleAnnotationKey, strconv.Itoa(i)) +} + +// WithLowerScaleBound sets minScale to the given number. +func WithLowerScaleBound(i int) PodAutoscalerOption { + return withAnnotationValue(autoscaling.MinScaleAnnotationKey, strconv.Itoa(i)) +} + +// K8sServiceOption enables further configuration of the Kubernetes Service. +type K8sServiceOption func(*corev1.Service) + +// OverrideServiceName changes the name of the Kubernetes Service. +func OverrideServiceName(name string) K8sServiceOption { + return func(svc *corev1.Service) { + svc.Name = name + } +} + +// MutateK8sService changes the service in a way that must be reconciled. +func MutateK8sService(svc *corev1.Service) { + // An effective hammer ;-P + svc.Spec = corev1.ServiceSpec{} +} + +// WithClusterIP assigns a ClusterIP to the K8s Service. +func WithClusterIP(ip string) K8sServiceOption { + return func(svc *corev1.Service) { + svc.Spec.ClusterIP = ip + } +} + +// WithExternalName gives external name to the K8s Service. +func WithExternalName(name string) K8sServiceOption { + return func(svc *corev1.Service) { + svc.Spec.ExternalName = name + } +} + +// WithK8sSvcOwnersRemoved clears the owner references of this Service. +func WithK8sSvcOwnersRemoved(svc *corev1.Service) { + svc.OwnerReferences = nil +} + +// EndpointsOption enables further configuration of the Kubernetes Endpoints. +type EndpointsOption func(*corev1.Endpoints) + +// WithSubsets adds subsets to the body of a Revision, enabling us to refer readiness. +func WithSubsets(ep *corev1.Endpoints) { + ep.Subsets = []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{IP: "127.0.0.1"}}, + Ports: []corev1.EndpointPort{{Port: 8012}, {Port: 8013}}, + }} +} + +// WithEndpointsOwnersRemoved clears the owner references of this Endpoints resource. +func WithEndpointsOwnersRemoved(eps *corev1.Endpoints) { + eps.OwnerReferences = nil +} + +// PodOption enables further configuration of a Pod. +type PodOption func(*corev1.Pod) + +// WithFailingContainer sets the .Status.ContainerStatuses on the pod to +// include a container named accordingly to fail with the given state. +func WithFailingContainer(name string, exitCode int, message string) PodOption { + return func(pod *corev1.Pod) { + pod.Status.ContainerStatuses = []corev1.ContainerStatus{{ + Name: name, + LastTerminationState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: int32(exitCode), + Message: message, + }, + }, + }} + } +} + +// WithUnschedulableContainer sets the .Status.Conditionss on the pod to +// include `PodScheduled` status to `False` with the given message and reason. +func WithUnschedulableContainer(reason, message string) PodOption { + return func(pod *corev1.Pod) { + pod.Status.Conditions = []corev1.PodCondition{{ + Type: corev1.PodScheduled, + Reason: reason, + Message: message, + Status: corev1.ConditionFalse, + }} + } +} + +// WithWaitingContainer sets the .Status.ContainerStatuses on the pod to +// include a container named accordingly to wait with the given state. +func WithWaitingContainer(name, reason, message string) PodOption { + return func(pod *corev1.Pod) { + pod.Status.ContainerStatuses = []corev1.ContainerStatus{{ + Name: name, + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: reason, + Message: message, + }, + }, + }} + } +} + +// IngressOption enables further configuration of the Ingress. +type IngressOption func(*netv1alpha1.Ingress) + +// WithHosts sets the Hosts of the ingress rule specified index +func WithHosts(index int, hosts ...string) IngressOption { + return func(ingress *netv1alpha1.Ingress) { + ingress.Spec.Rules[index].Hosts = hosts + } +} + +// SKSOption is a callback type for decorate SKS objects. +type SKSOption func(sks *netv1alpha1.ServerlessService) + +// WithPubService annotates SKS status with the given service name. +func WithPubService(sks *netv1alpha1.ServerlessService) { + sks.Status.ServiceName = sks.Name +} + +// WithDeployRef annotates SKS with a deployment objectRef +func WithDeployRef(name string) SKSOption { + return func(sks *netv1alpha1.ServerlessService) { + sks.Spec.ObjectRef = corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: name, + } + } +} + +// WithSKSReady marks SKS as ready. +func WithSKSReady(sks *netv1alpha1.ServerlessService) { + WithPrivateService(sks) + WithPubService(sks) + sks.Status.MarkEndpointsReady() +} + +// WithPrivateService annotates SKS status with the private service name. +func WithPrivateService(sks *netv1alpha1.ServerlessService) { + sks.Status.PrivateServiceName = kmeta.ChildName(sks.Name, "-private") +} + +// WithSKSOwnersRemoved clears the owner references of this SKS resource. +func WithSKSOwnersRemoved(sks *netv1alpha1.ServerlessService) { + sks.OwnerReferences = nil +} + +// WithProxyMode puts SKS into proxy mode. +func WithProxyMode(sks *netv1alpha1.ServerlessService) { + sks.Spec.Mode = netv1alpha1.SKSOperationModeProxy +} + +// SKS creates a generic ServerlessService object. +func SKS(ns, name string, so ...SKSOption) *netv1alpha1.ServerlessService { + s := &netv1alpha1.ServerlessService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + UID: "test-uid", + }, + Spec: netv1alpha1.ServerlessServiceSpec{ + Mode: netv1alpha1.SKSOperationModeServe, + ProtocolType: networking.ProtocolHTTP1, + ObjectRef: corev1.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "foo-deployment", + }, + }, + } + // By default for tests we can presume happy-serve path. + s.Status.MarkActivatorEndpointsRemoved() + for _, opt := range so { + opt(s) + } + return s +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1/configuration.go b/test/vendor/knative.dev/serving/pkg/testing/v1/configuration.go new file mode 100644 index 0000000000..5b14cf8f9d --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1/configuration.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// ConfigOption enables further configuration of a Configuration. +type ConfigOption func(*v1.Configuration) + +// WithConfigReadinessProbe sets the provided probe to be the readiness +// probe on the configuration. +func WithConfigReadinessProbe(p *corev1.Probe) ConfigOption { + return func(cfg *v1.Configuration) { + cfg.Spec.Template.Spec.Containers[0].ReadinessProbe = p + } +} + +// WithConfigImage sets the container image to be the provided string. +func WithConfigImage(img string) ConfigOption { + return func(cfg *v1.Configuration) { + cfg.Spec.Template.Spec.Containers[0].Image = img + } +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1/route.go b/test/vendor/knative.dev/serving/pkg/testing/v1/route.go new file mode 100644 index 0000000000..2cfc3745b0 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1/route.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// RouteOption enables further configuration of a Route. +type RouteOption func(*v1.Route) diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1/service.go b/test/vendor/knative.dev/serving/pkg/testing/v1/service.go new file mode 100644 index 0000000000..d759f44b2f --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1/service.go @@ -0,0 +1,203 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + presources "knative.dev/serving/pkg/resources" +) + +// ServiceOption enables further configuration of a Service. +type ServiceOption func(*v1.Service) + +// Service creates a service with ServiceOptions +func Service(name, namespace string, so ...ServiceOption) *v1.Service { + s := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + for _, opt := range so { + opt(s) + } + return s +} + +// ServiceWithoutNamespace creates a service with ServiceOptions but without a specific namespace +func ServiceWithoutNamespace(name string, so ...ServiceOption) *v1.Service { + s := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, opt := range so { + opt(s) + } + return s +} + +// WithInlineConfigSpec confgures the Service to use the given config spec +func WithInlineConfigSpec(config v1.ConfigurationSpec) ServiceOption { + return func(svc *v1.Service) { + svc.Spec.ConfigurationSpec = config + } +} + +// WithNamedPort sets the name on the Service's port to the provided name +func WithNamedPort(name string) ServiceOption { + return func(svc *v1.Service) { + c := &svc.Spec.Template.Spec.Containers[0] + if len(c.Ports) == 1 { + c.Ports[0].Name = name + } else { + c.Ports = []corev1.ContainerPort{{ + Name: name, + }} + } + } +} + +// WithNumberedPort sets the Service's port number to what's provided. +func WithNumberedPort(number int32) ServiceOption { + return func(svc *v1.Service) { + c := &svc.Spec.Template.Spec.Containers[0] + if len(c.Ports) == 1 { + c.Ports[0].ContainerPort = number + } else { + c.Ports = []corev1.ContainerPort{{ + ContainerPort: number, + }} + } + } +} + +// WithServiceDefaults will set the default values on the service. +func WithServiceDefaults(svc *v1.Service) { + svc.SetDefaults(context.Background()) +} + +// WithResourceRequirements attaches resource requirements to the service +func WithResourceRequirements(resourceRequirements corev1.ResourceRequirements) ServiceOption { + return func(svc *v1.Service) { + svc.Spec.Template.Spec.Containers[0].Resources = resourceRequirements + } +} + +// WithServiceAnnotation adds the given annotation to the service. +func WithServiceAnnotation(k, v string) ServiceOption { + return func(svc *v1.Service) { + svc.Annotations = presources.UnionMaps(svc.Annotations, map[string]string{ + k: v, + }) + } +} + +// WithServiceAnnotationRemoved adds the given annotation to the service. +func WithServiceAnnotationRemoved(k string) ServiceOption { + return func(svc *v1.Service) { + svc.Annotations = presources.FilterMap(svc.Annotations, func(s string) bool { + return k == s + }) + } +} + +// WithServiceImage sets the container image to be the provided string. +func WithServiceImage(img string) ServiceOption { + return func(svc *v1.Service) { + svc.Spec.Template.Spec.Containers[0].Image = img + } +} + +// WithServiceTemplateMeta sets the container image to be the provided string. +func WithServiceTemplateMeta(m metav1.ObjectMeta) ServiceOption { + return func(svc *v1.Service) { + svc.Spec.Template.ObjectMeta = m + } +} + +// WithRevisionTimeoutSeconds sets revision timeout +func WithRevisionTimeoutSeconds(revisionTimeoutSeconds int64) ServiceOption { + return func(service *v1.Service) { + service.Spec.Template.Spec.TimeoutSeconds = ptr.Int64(revisionTimeoutSeconds) + } +} + +// WithServiceAccountName sets revision service account name +func WithServiceAccountName(serviceAccountName string) ServiceOption { + return func(service *v1.Service) { + service.Spec.Template.Spec.ServiceAccountName = serviceAccountName + } +} + +// WithContainerConcurrency sets the given Service's concurrency. +func WithContainerConcurrency(cc int64) ServiceOption { + return func(svc *v1.Service) { + svc.Spec.Template.Spec.ContainerConcurrency = &cc + } +} + +// WithVolume adds a volume to the service +func WithVolume(name, mountPath string, volumeSource corev1.VolumeSource) ServiceOption { + return func(svc *v1.Service) { + rt := &svc.Spec.ConfigurationSpec.Template.Spec + + rt.Containers[0].VolumeMounts = append(rt.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + }) + + rt.Volumes = append(rt.Volumes, corev1.Volume{ + Name: name, + VolumeSource: volumeSource, + }) + } +} + +// WithEnv configures the Service to use the provided environment variables. +func WithEnv(evs ...corev1.EnvVar) ServiceOption { + return func(s *v1.Service) { + s.Spec.Template.Spec.Containers[0].Env = evs + } +} + +// WithEnvFrom configures the Service to use the provided environment variables. +func WithEnvFrom(evs ...corev1.EnvFromSource) ServiceOption { + return func(s *v1.Service) { + s.Spec.Template.Spec.Containers[0].EnvFrom = evs + } +} + +// WithSecurityContext configures the Service to use the provided security context. +func WithSecurityContext(sc *corev1.SecurityContext) ServiceOption { + return func(s *v1.Service) { + s.Spec.Template.Spec.Containers[0].SecurityContext = sc + } +} + +// WithWorkingDir configures the Service to use the provided working directory. +func WithWorkingDir(wd string) ServiceOption { + return func(s *v1.Service) { + s.Spec.Template.Spec.Containers[0].WorkingDir = wd + } +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/configuration.go b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/configuration.go new file mode 100644 index 0000000000..b59aac77e1 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/configuration.go @@ -0,0 +1,122 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// ConfigOption enables further configuration of a Configuration. +type ConfigOption func(*v1alpha1.Configuration) + +// WithConfigDeletionTimestamp will set the DeletionTimestamp on the Config. +func WithConfigDeletionTimestamp(r *v1alpha1.Configuration) { + t := metav1.NewTime(time.Unix(1e9, 0)) + r.ObjectMeta.SetDeletionTimestamp(&t) +} + +// WithConfigOwnersRemoved clears the owner references of this Configuration. +func WithConfigOwnersRemoved(cfg *v1alpha1.Configuration) { + cfg.OwnerReferences = nil +} + +// WithConfigContainerConcurrency sets the given Configuration's concurrency. +func WithConfigContainerConcurrency(cc int64) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Spec.GetTemplate().Spec.ContainerConcurrency = &cc + } +} + +// WithGeneration sets the generation of the Configuration. +func WithGeneration(gen int64) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Generation = gen + } +} + +// WithObservedGen sets the observed generation of the Configuration. +func WithObservedGen(cfg *v1alpha1.Configuration) { + cfg.Status.ObservedGeneration = cfg.Generation +} + +// WithCreatedAndReady sets the latest{Created,Ready}RevisionName on the Configuration. +func WithCreatedAndReady(created, ready string) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Status.SetLatestCreatedRevisionName(created) + cfg.Status.SetLatestReadyRevisionName(ready) + } +} + +// WithLatestCreated initializes the .status.latestCreatedRevisionName to be the name +// of the latest revision that the Configuration would have created. +func WithLatestCreated(name string) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Status.SetLatestCreatedRevisionName(name) + } +} + +// WithLatestReady initializes the .status.latestReadyRevisionName to be the name +// of the latest revision that the Configuration would have created. +func WithLatestReady(name string) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Status.SetLatestReadyRevisionName(name) + } +} + +// MarkRevisionCreationFailed calls .Status.MarkRevisionCreationFailed. +func MarkRevisionCreationFailed(msg string) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Status.MarkRevisionCreationFailed(msg) + } +} + +// MarkLatestCreatedFailed calls .Status.MarkLatestCreatedFailed. +func MarkLatestCreatedFailed(msg string) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Status.MarkLatestCreatedFailed(cfg.Status.LatestCreatedRevisionName, msg) + } +} + +// WithConfigLabel attaches a particular label to the configuration. +func WithConfigLabel(key, value string) ConfigOption { + return func(config *v1alpha1.Configuration) { + if config.Labels == nil { + config.Labels = make(map[string]string) + } + config.Labels[key] = value + } +} + +// WithConfigReadinessProbe sets the provided probe to be the readiness +// probe on the configuration. +func WithConfigReadinessProbe(p *corev1.Probe) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Spec.Template.Spec.Containers[0].ReadinessProbe = p + } +} + +// WithConfigRevisionTimeoutSeconds sets revision timeout. +func WithConfigRevisionTimeoutSeconds(revisionTimeoutSeconds int64) ConfigOption { + return func(cfg *v1alpha1.Configuration) { + cfg.Spec.Template.Spec.TimeoutSeconds = ptr.Int64(revisionTimeoutSeconds) + } +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/functional.go b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/functional.go new file mode 100644 index 0000000000..22618a6df6 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/functional.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +var ( + // configSpec is the spec used for the different styles of Service rollout. + configSpec = v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: "busybox", + }, + RevisionSpec: v1.RevisionSpec{ + TimeoutSeconds: ptr.Int64(60), + }, + }, + }, + } +) diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/revision.go b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/revision.go new file mode 100644 index 0000000000..53b7733174 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/revision.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// RevisionOption enables further configuration of a Revision. +type RevisionOption func(*v1alpha1.Revision) + +// WithRevisionDeletionTimestamp will set the DeletionTimestamp on the Revision. +func WithRevisionDeletionTimestamp(r *v1alpha1.Revision) { + t := metav1.NewTime(time.Unix(1e9, 0)) + r.ObjectMeta.SetDeletionTimestamp(&t) +} + +// WithInitRevConditions calls .Status.InitializeConditions() on a Revision. +func WithInitRevConditions(r *v1alpha1.Revision) { + r.Status.InitializeConditions() +} + +// WithRevName sets the name of the revision +func WithRevName(name string) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.Name = name + } +} + +// WithServiceName propagates the given service name to the revision status. +func WithServiceName(sn string) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.Status.ServiceName = sn + } +} + +// MarkResourceNotOwned calls the function of the same name on the Revision's status. +func MarkResourceNotOwned(kind, name string) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.Status.MarkResourcesAvailableFalse(v1alpha1.NotOwned, v1alpha1.ResourceNotOwnedMessage(kind, name)) + } +} + +// WithRevContainerConcurrency sets the given Revision's concurrency. +func WithRevContainerConcurrency(cc int64) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.Spec.ContainerConcurrency = &cc + } +} + +// WithLogURL sets the .Status.LogURL to the expected value. +func WithLogURL(r *v1alpha1.Revision) { + r.Status.LogURL = "http://logger.io/test-uid" +} + +// WithCreationTimestamp sets the Revision's timestamp to the provided time. +// TODO(mattmoor): Ideally this could be a more generic Option and use meta.Accessor, +// but unfortunately Go's type system cannot support that. +func WithCreationTimestamp(t time.Time) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.ObjectMeta.CreationTimestamp = metav1.Time{Time: t} + } +} + +// WithLastPinned updates the "last pinned" annotation to the provided timestamp. +func WithLastPinned(t time.Time) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.SetLastPinned(t) + } +} + +// WithRevStatus is a generic escape hatch for creating hard-to-craft +// status orientations. +func WithRevStatus(st v1alpha1.RevisionStatus) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.Status = st + } +} + +// WithImagePullSecrets updates the revision spec ImagePullSecrets to +// the provided secrets +func WithImagePullSecrets(secretName string) RevisionOption { + return func(rev *v1alpha1.Revision) { + rev.Spec.ImagePullSecrets = []corev1.LocalObjectReference{{ + Name: secretName, + }} + } +} + +// MarkActive calls .Status.MarkActive on the Revision. +func MarkActive(r *v1alpha1.Revision) { + r.Status.MarkActiveTrue() +} + +// MarkInactive calls .Status.MarkInactive on the Revision. +func MarkInactive(reason, message string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.MarkActiveFalse(reason, message) + } +} + +// MarkActivating calls .Status.MarkActivating on the Revision. +func MarkActivating(reason, message string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.MarkActiveUnknown(reason, message) + } +} + +// MarkDeploying calls .Status.MarkDeploying on the Revision. +func MarkDeploying(reason string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.MarkResourcesAvailableUnknown(reason, "") + r.Status.MarkContainerHealthyUnknown(reason, "") + } +} + +// MarkProgressDeadlineExceeded calls the method of the same name on the Revision +// with the message we expect the Revision Reconciler to pass. +func MarkProgressDeadlineExceeded(message string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.MarkResourcesAvailableFalse(v1alpha1.ProgressDeadlineExceeded, message) + } +} + +// MarkContainerMissing calls .Status.MarkContainerMissing on the Revision. +func MarkContainerMissing(rev *v1alpha1.Revision) { + rev.Status.MarkContainerHealthyFalse(v1alpha1.ContainerMissing, "It's the end of the world as we know it") +} + +// MarkContainerExiting calls .Status.MarkContainerExiting on the Revision. +func MarkContainerExiting(exitCode int32, message string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.MarkContainerHealthyFalse(v1alpha1.ExitCodeReason(exitCode), message) + } +} + +// MarkResourcesUnavailable calls .Status.MarkResourcesUnavailable on the Revision. +func MarkResourcesUnavailable(reason, message string) RevisionOption { + return func(r *v1alpha1.Revision) { + r.Status.MarkResourcesAvailableFalse(reason, message) + } +} + +// MarkRevisionReady calls the necessary helpers to make the Revision Ready=True. +func MarkRevisionReady(r *v1alpha1.Revision) { + WithInitRevConditions(r) + MarkActive(r) + r.Status.MarkResourcesAvailableTrue() + r.Status.MarkContainerHealthyTrue() +} + +// WithRevisionLabel attaches a particular label to the revision. +func WithRevisionLabel(key, value string) RevisionOption { + return func(config *v1alpha1.Revision) { + if config.Labels == nil { + config.Labels = make(map[string]string) + } + config.Labels[key] = value + } +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/route.go b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/route.go new file mode 100644 index 0000000000..53cab53e01 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/route.go @@ -0,0 +1,252 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/networking" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + routenames "knative.dev/serving/pkg/reconciler/route/resources/names" +) + +// RouteOption enables further configuration of a Route. +type RouteOption func(*v1alpha1.Route) + +// WithSpecTraffic sets the Route's traffic block to the specified traffic targets. +func WithSpecTraffic(traffic ...v1alpha1.TrafficTarget) RouteOption { + return func(r *v1alpha1.Route) { + r.Spec.Traffic = traffic + } +} + +// WithRouteUID sets the Route's UID +func WithRouteUID(uid types.UID) RouteOption { + return func(r *v1alpha1.Route) { + r.ObjectMeta.UID = uid + } +} + +// WithRouteFinalizer adds the Route finalizer to the Route. +func WithRouteFinalizer(r *v1alpha1.Route) { + r.ObjectMeta.Finalizers = append(r.ObjectMeta.Finalizers, "routes.serving.knative.dev") +} + +// WithConfigTarget sets the Route's traffic block to point at a particular Configuration. +func WithConfigTarget(config string) RouteOption { + return WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + ConfigurationName: config, + Percent: ptr.Int64(100), + }, + }) +} + +// WithRevTarget sets the Route's traffic block to point at a particular Revision. +func WithRevTarget(revision string) RouteOption { + return WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: revision, + Percent: ptr.Int64(100), + }, + }) +} + +// WithStatusTraffic sets the Route's status traffic block to the specified traffic targets. +func WithStatusTraffic(traffic ...v1alpha1.TrafficTarget) RouteOption { + return func(r *v1alpha1.Route) { + r.Status.Traffic = traffic + } +} + +// WithRouteOwnersRemoved clears the owner references of this Route. +func WithRouteOwnersRemoved(r *v1alpha1.Route) { + r.OwnerReferences = nil +} + +// MarkServiceNotOwned calls the function of the same name on the Service's status. +func MarkServiceNotOwned(r *v1alpha1.Route) { + r.Status.MarkServiceNotOwned(routenames.K8sService(r)) +} + +// WithURL sets the .Status.Domain field to the prototypical domain. +func WithURL(r *v1alpha1.Route) { + r.Status.URL = &apis.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s.%s.example.com", r.Name, r.Namespace), + } +} + +func WithHTTPSDomain(r *v1alpha1.Route) { + r.Status.URL = &apis.URL{ + Scheme: "https", + Host: fmt.Sprintf("%s.%s.example.com", r.Name, r.Namespace), + } +} + +// WithAddress sets the .Status.Address field to the prototypical internal hostname. +func WithAddress(r *v1alpha1.Route) { + r.Status.Address = &duckv1alpha1.Addressable{ + Addressable: duckv1beta1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s.%s.svc.cluster.local", r.Name, r.Namespace), + }, + }, + } +} + +// WithAnotherDomain sets the .Status.Domain field to an atypical domain. +func WithAnotherDomain(r *v1alpha1.Route) { + r.Status.URL = &apis.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s.%s.another-example.com", r.Name, r.Namespace), + } +} + +// WithLocalDomain sets the .Status.Domain field to use `svc.cluster.local` suffix. +func WithLocalDomain(r *v1alpha1.Route) { + r.Status.URL = &apis.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s.%s.svc.cluster.local", r.Name, r.Namespace), + } +} + +// WithInitRouteConditions initializes the Service's conditions. +func WithInitRouteConditions(rt *v1alpha1.Route) { + rt.Status.InitializeConditions() +} + +// MarkTrafficAssigned calls the method of the same name on .Status +func MarkTrafficAssigned(r *v1alpha1.Route) { + r.Status.MarkTrafficAssigned() +} + +// MarkCertificateNotReady calls the method of the same name on .Status +func MarkCertificateNotReady(r *v1alpha1.Route) { + r.Status.MarkCertificateNotReady(routenames.Certificate(r)) +} + +// MarkCertificateNotOwned calls the method of the same name on .Status +func MarkCertificateNotOwned(r *v1alpha1.Route) { + r.Status.MarkCertificateNotOwned(routenames.Certificate(r)) +} + +// MarkCertificateReady calls the method of the same name on .Status +func MarkCertificateReady(r *v1alpha1.Route) { + r.Status.MarkCertificateReady(routenames.Certificate(r)) +} + +// WithReadyCertificateName marks the certificate specified by name as ready. +func WithReadyCertificateName(name string) func(*v1alpha1.Route) { + return func(r *v1alpha1.Route) { + r.Status.MarkCertificateReady(name) + } +} + +// MarkIngressReady propagates a Ready=True Ingress status to the Route. +func MarkIngressReady(r *v1alpha1.Route) { + r.Status.PropagateIngressStatus(netv1alpha1.IngressStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + }) +} + +// MarkIngressNotConfigured calls the method of the same name on .Status +func MarkIngressNotConfigured(r *v1alpha1.Route) { + r.Status.MarkIngressNotConfigured() +} + +// MarkMissingTrafficTarget calls the method of the same name on .Status +func MarkMissingTrafficTarget(kind, revision string) RouteOption { + return func(r *v1alpha1.Route) { + r.Status.MarkMissingTrafficTarget(kind, revision) + } +} + +// MarkConfigurationNotReady calls the method of the same name on .Status +func MarkConfigurationNotReady(name string) RouteOption { + return func(r *v1alpha1.Route) { + r.Status.MarkConfigurationNotReady(name) + } +} + +// MarkConfigurationFailed calls the method of the same name on .Status +func MarkConfigurationFailed(name string) RouteOption { + return func(r *v1alpha1.Route) { + r.Status.MarkConfigurationFailed(name) + } +} + +// WithRouteLabel sets the specified label on the Route. +func WithRouteLabel(labels map[string]string) RouteOption { + return func(r *v1alpha1.Route) { + if r.Labels == nil { + r.Labels = make(map[string]string) + } + r.Labels = labels + } +} + +// WithIngressClass sets the ingress class annotation on the Route. +func WithIngressClass(ingressClass string) RouteOption { + return func(r *v1alpha1.Route) { + if r.Annotations == nil { + r.Annotations = make(map[string]string) + } + r.Annotations[networking.IngressClassAnnotationKey] = ingressClass + } +} + +// WithRouteAnnotation sets the specified annotation on the Route. +func WithRouteAnnotation(annotation map[string]string) RouteOption { + return func(r *v1alpha1.Route) { + if r.Annotations == nil { + r.Annotations = make(map[string]string) + } + r.Annotations = annotation + } +} + +// Route creates a route with RouteOptions +func Route(namespace, name string, ro ...RouteOption) *v1alpha1.Route { + r := &v1alpha1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + for _, opt := range ro { + opt(r) + } + r.SetDefaults(context.Background()) + return r +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/service.go b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/service.go new file mode 100644 index 0000000000..1cc87d31b8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1alpha1/service.go @@ -0,0 +1,519 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/reconciler/route/domains" + servicenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/pkg/resources" +) + +// ServiceOption enables further configuration of a Service. +type ServiceOption func(*v1alpha1.Service) + +// Service creates a service with ServiceOptions +func Service(name, namespace string, so ...ServiceOption) *v1alpha1.Service { + s := &v1alpha1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + for _, opt := range so { + opt(s) + } + return s +} + +// ServiceWithoutNamespace creates a service with ServiceOptions but without a specific namespace +func ServiceWithoutNamespace(name string, so ...ServiceOption) *v1alpha1.Service { + s := &v1alpha1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, opt := range so { + opt(s) + } + return s +} + +// DefaultService creates a service with ServiceOptions and with default values set +func DefaultService(name, namespace string, so ...ServiceOption) *v1alpha1.Service { + return Service(name, namespace, append(so, WithServiceDefaults)...) +} + +// WithServiceDefaults will set the default values on the service. +func WithServiceDefaults(svc *v1alpha1.Service) { + svc.SetDefaults(context.Background()) +} + +// WithServiceDeletionTimestamp will set the DeletionTimestamp on the Service. +func WithServiceDeletionTimestamp(r *v1alpha1.Service) { + t := metav1.NewTime(time.Unix(1e9, 0)) + r.ObjectMeta.SetDeletionTimestamp(&t) +} + +// WithEnv configures the Service to use the provided environment variables. +func WithEnv(evs ...corev1.EnvVar) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].Env = evs + } +} + +// WithEnvFrom configures the Service to use the provided environment variables. +func WithEnvFrom(evs ...corev1.EnvFromSource) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].EnvFrom = evs + } +} + +// WithInlineRollout configures the Service to be "run latest" via inline +// Route/Configuration +func WithInlineRollout(s *v1alpha1.Service) { + s.Spec = v1alpha1.ServiceSpec{ + ConfigurationSpec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + }, + }, + }, + }, + RouteSpec: v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }}, + }, + } +} + +// WithInlineNamedRevision configures the Service to use BYO Revision in the +// template spec and reference that same revision name in the route spec. +func WithInlineNamedRevision(s *v1alpha1.Service) { + s.Spec = v1alpha1.ServiceSpec{ + ConfigurationSpec: v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name + "-byo", + }, + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: "busybox", + }}, + }, + TimeoutSeconds: ptr.Int64(60), + }, + }, + }, + }, + RouteSpec: v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: s.Name + "-byo", + Percent: ptr.Int64(100), + }, + }}, + }, + } +} + +// WithRunLatestRollout configures the Service to use a "runLatest" rollout. +func WithRunLatestRollout(s *v1alpha1.Service) { + s.Spec = v1alpha1.ServiceSpec{ + DeprecatedRunLatest: &v1alpha1.RunLatestType{ + Configuration: *configSpec.DeepCopy(), + }, + } +} + +// WithInlineConfigSpec configures the Service to use the given config spec +func WithInlineConfigSpec(config v1alpha1.ConfigurationSpec) ServiceOption { + return func(svc *v1alpha1.Service) { + svc.Spec.ConfigurationSpec = config + } +} + +// WithInlineRouteSpec configures the Service to use the given route spec +func WithInlineRouteSpec(config v1alpha1.RouteSpec) ServiceOption { + return func(svc *v1alpha1.Service) { + svc.Spec.RouteSpec = config + } +} + +// WithRunLatestConfigSpec configures the Service to use a "runLatest" configuration +func WithRunLatestConfigSpec(config v1alpha1.ConfigurationSpec) ServiceOption { + return func(svc *v1alpha1.Service) { + svc.Spec = v1alpha1.ServiceSpec{ + DeprecatedRunLatest: &v1alpha1.RunLatestType{ + Configuration: config, + }, + } + } +} + +// WithServiceLabel attaches a particular label to the service. +func WithServiceLabel(key, value string) ServiceOption { + return func(service *v1alpha1.Service) { + if service.Labels == nil { + service.Labels = make(map[string]string) + } + service.Labels[key] = value + } +} + +// WithNumberedPort sets the Service's port number to what's provided. +func WithNumberedPort(number int32) ServiceOption { + return func(svc *v1alpha1.Service) { + c := &svc.Spec.Template.Spec.Containers[0] + if len(c.Ports) == 1 { + c.Ports[0].ContainerPort = number + } else { + c.Ports = []corev1.ContainerPort{{ + ContainerPort: number, + }} + } + } +} + +// WithNamedPort sets the Service's port name to what's provided. +func WithNamedPort(name string) ServiceOption { + return func(svc *v1alpha1.Service) { + c := &svc.Spec.Template.Spec.Containers[0] + if len(c.Ports) == 1 { + c.Ports[0].Name = name + } else { + c.Ports = []corev1.ContainerPort{{ + Name: name, + }} + } + } +} + +// WithResourceRequirements attaches resource requirements to the service +func WithResourceRequirements(resourceRequirements corev1.ResourceRequirements) ServiceOption { + return func(svc *v1alpha1.Service) { + if svc.Spec.DeprecatedRunLatest != nil { + svc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec.GetContainer().Resources = resourceRequirements + } else { + svc.Spec.ConfigurationSpec.Template.Spec.Containers[0].Resources = resourceRequirements + } + } +} + +// WithVolume adds a volume to the service +func WithVolume(name, mountPath string, volumeSource corev1.VolumeSource) ServiceOption { + return func(svc *v1alpha1.Service) { + var rt *v1alpha1.RevisionSpec + if svc.Spec.DeprecatedRunLatest != nil { + rt = &svc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec + } else { + rt = &svc.Spec.ConfigurationSpec.Template.Spec + } + + rt.GetContainer().VolumeMounts = append(rt.GetContainer().VolumeMounts, + corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + }) + + rt.Volumes = append(rt.Volumes, corev1.Volume{ + Name: name, + VolumeSource: volumeSource, + }) + } +} + +func WithServiceAnnotations(annotations map[string]string) ServiceOption { + return func(service *v1alpha1.Service) { + service.Annotations = resources.UnionMaps(service.Annotations, annotations) + } +} + +// WithContainerConcurrency setss the container concurrency on the resource. +func WithContainerConcurrency(cc int) ServiceOption { + return func(s *v1alpha1.Service) { + if s.Spec.DeprecatedRunLatest != nil { + s.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec.ContainerConcurrency = + ptr.Int64(int64(cc)) + } else { + s.Spec.ConfigurationSpec.Template.Spec.ContainerConcurrency = + ptr.Int64(int64(cc)) + } + } +} + +// WithConfigAnnotations assigns config annotations to a service +func WithConfigAnnotations(annotations map[string]string) ServiceOption { + return func(service *v1alpha1.Service) { + if service.Spec.DeprecatedRunLatest != nil { + service.Spec.DeprecatedRunLatest.Configuration.GetTemplate().ObjectMeta.Annotations = resources.UnionMaps( + service.Spec.DeprecatedRunLatest.Configuration.GetTemplate().ObjectMeta.Annotations, annotations) + } else { + service.Spec.ConfigurationSpec.Template.ObjectMeta.Annotations = resources.UnionMaps( + service.Spec.ConfigurationSpec.Template.ObjectMeta.Annotations, annotations) + } + } +} + +// WithRevisionTimeoutSeconds sets revision timeout +func WithRevisionTimeoutSeconds(revisionTimeoutSeconds int64) ServiceOption { + return func(service *v1alpha1.Service) { + if service.Spec.DeprecatedRunLatest != nil { + service.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec.TimeoutSeconds = ptr.Int64(revisionTimeoutSeconds) + } else { + service.Spec.ConfigurationSpec.Template.Spec.TimeoutSeconds = ptr.Int64(revisionTimeoutSeconds) + } + } +} + +// MarkConfigurationNotReconciled calls the function of the same name on the Service's status. +func MarkConfigurationNotReconciled(service *v1alpha1.Service) { + service.Status.MarkConfigurationNotReconciled() +} + +// MarkConfigurationNotOwned calls the function of the same name on the Service's status. +func MarkConfigurationNotOwned(service *v1alpha1.Service) { + service.Status.MarkConfigurationNotOwned(servicenames.Configuration(service)) +} + +// MarkRouteNotOwned calls the function of the same name on the Service's status. +func MarkRouteNotOwned(service *v1alpha1.Service) { + service.Status.MarkRouteNotOwned(servicenames.Route(service)) +} + +// WithPinnedRollout configures the Service to use a "pinned" rollout, +// which is pinned to the named revision. +// Deprecated, since PinnedType is deprecated. +func WithPinnedRollout(name string) ServiceOption { + return WithPinnedRolloutConfigSpec(name, *configSpec.DeepCopy()) +} + +// WithPinnedRolloutConfigSpec WithPinnedRollout2 +func WithPinnedRolloutConfigSpec(name string, config v1alpha1.ConfigurationSpec) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec = v1alpha1.ServiceSpec{ + DeprecatedPinned: &v1alpha1.PinnedType{ + RevisionName: name, + Configuration: config, + }, + } + } +} + +// WithReleaseRolloutAndPercentage configures the Service to use a "release" rollout, +// which spans the provided revisions. +func WithReleaseRolloutAndPercentage(percentage int, names ...string) ServiceOption { + return WithReleaseRolloutAndPercentageConfigSpec(percentage, *configSpec.DeepCopy(), + names...) +} + +// WithReleaseRolloutAndPercentageConfigSpec configures the Service to use a "release" rollout, +// which spans the provided revisions. +func WithReleaseRolloutAndPercentageConfigSpec(percentage int, config v1alpha1.ConfigurationSpec, names ...string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec = v1alpha1.ServiceSpec{ + DeprecatedRelease: &v1alpha1.ReleaseType{ + Revisions: names, + RolloutPercent: percentage, + Configuration: config, + }, + } + } +} + +// WithReleaseRollout configures the Service to use a "release" rollout, +// which spans the provided revisions. +func WithReleaseRollout(names ...string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec = v1alpha1.ServiceSpec{ + DeprecatedRelease: &v1alpha1.ReleaseType{ + Revisions: names, + Configuration: *configSpec.DeepCopy(), + }, + } + } +} + +// WithInitSvcConditions initializes the Service's conditions. +func WithInitSvcConditions(s *v1alpha1.Service) { + s.Status.InitializeConditions() +} + +// WithReadyRoute reflects the Route's readiness in the Service resource. +func WithReadyRoute(s *v1alpha1.Service) { + s.Status.PropagateRouteStatus(&v1alpha1.RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + }) +} + +// WithSvcStatusDomain propagates the domain name to the status of the Service. +func WithSvcStatusDomain(s *v1alpha1.Service) { + n, ns := s.GetName(), s.GetNamespace() + s.Status.URL = &apis.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s.%s.example.com", n, ns), + } +} + +// WithSvcStatusAddress updates the service's status with the address. +func WithSvcStatusAddress(s *v1alpha1.Service) { + s.Status.Address = &duckv1alpha1.Addressable{ + Addressable: duckv1beta1.Addressable{ + URL: &apis.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s.%s.svc.cluster.local", s.Name, s.Namespace), + }, + }, + } +} + +// WithSvcStatusTraffic sets the Service's status traffic block to the specified traffic targets. +func WithSvcStatusTraffic(targets ...v1alpha1.TrafficTarget) ServiceOption { + return func(r *v1alpha1.Service) { + // Automatically inject URL into TrafficTarget status + for _, tt := range targets { + tt.URL = domains.URL(domains.HTTPScheme, tt.Tag+".example.com") + } + r.Status.Traffic = targets + } +} + +// WithFailedRoute reflects a Route's failure in the Service resource. +func WithFailedRoute(reason, message string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Status.PropagateRouteStatus(&v1alpha1.RouteStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "False", + Reason: reason, + Message: message, + }}, + }, + }) + } +} + +// WithOutOfDateConfig reflects the Configuration's readiness in the Service +// resource. +func WithOutOfDateConfig(s *v1alpha1.Service) { + s.Status.MarkConfigurationNotReconciled() +} + +// WithReadyConfig reflects the Configuration's readiness in the Service +// resource. This must coincide with the setting of Latest{Created,Ready} +// to the provided revision name. +func WithReadyConfig(name string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Status.PropagateConfigurationStatus(&v1alpha1.ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "True", + }}, + }, + ConfigurationStatusFields: v1alpha1.ConfigurationStatusFields{ + LatestCreatedRevisionName: name, + LatestReadyRevisionName: name, + }, + }) + } +} + +// WithFailedConfig reflects the Configuration's failure in the Service +// resource. The failing revision's name is reflected in LatestCreated. +func WithFailedConfig(name, reason, message string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Status.PropagateConfigurationStatus(&v1alpha1.ConfigurationStatus{ + Status: duckv1.Status{ + Conditions: duckv1.Conditions{{ + Type: "Ready", + Status: "False", + Reason: reason, + Message: fmt.Sprintf("Revision %q failed with message: %s.", + name, message), + }}, + }, + ConfigurationStatusFields: v1alpha1.ConfigurationStatusFields{ + LatestCreatedRevisionName: name, + }, + }) + } +} + +// WithServiceLatestReadyRevision sets the latest ready revision on the Service's status. +func WithServiceLatestReadyRevision(lrr string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Status.LatestReadyRevisionName = lrr + } +} + +// WithServiceStatusRouteNotReady sets the `RoutesReady` condition on the service to `Unknown`. +func WithServiceStatusRouteNotReady(s *v1alpha1.Service) { + s.Status.MarkRouteNotYetReady() +} + +// WithSecurityContext configures the Service to use the provided security context. +func WithSecurityContext(sc *corev1.SecurityContext) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].SecurityContext = sc + } +} + +// WithWorkingDir configures the Service to use the provided working directory. +func WithWorkingDir(wd string) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].WorkingDir = wd + } +} + +// WithReadinessProbe sets the provided probe to be the readiness +// probe on the service. +func WithReadinessProbe(p *corev1.Probe) ServiceOption { + return func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].ReadinessProbe = p + } +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1beta1/configuration.go b/test/vendor/knative.dev/serving/pkg/testing/v1beta1/configuration.go new file mode 100644 index 0000000000..bb8ec26125 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1beta1/configuration.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// ConfigOption enables further configuration of a Configuration. +type ConfigOption func(*v1beta1.Configuration) + +// WithConfigReadinessProbe sets the provided probe to be the readiness +// probe on the configuration. +func WithConfigReadinessProbe(p *corev1.Probe) ConfigOption { + return func(cfg *v1beta1.Configuration) { + cfg.Spec.Template.Spec.Containers[0].ReadinessProbe = p + } +} + +// WithConfigImage sets the container image to be the provided string. +func WithConfigImage(img string) ConfigOption { + return func(cfg *v1beta1.Configuration) { + cfg.Spec.Template.Spec.Containers[0].Image = img + } +} diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1beta1/route.go b/test/vendor/knative.dev/serving/pkg/testing/v1beta1/route.go new file mode 100644 index 0000000000..858eae0392 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1beta1/route.go @@ -0,0 +1,24 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// RouteOption enables further configuration of a Route. +type RouteOption func(*v1beta1.Route) diff --git a/test/vendor/knative.dev/serving/pkg/testing/v1beta1/service.go b/test/vendor/knative.dev/serving/pkg/testing/v1beta1/service.go new file mode 100644 index 0000000000..d3f5093fb8 --- /dev/null +++ b/test/vendor/knative.dev/serving/pkg/testing/v1beta1/service.go @@ -0,0 +1,204 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + presources "knative.dev/serving/pkg/resources" +) + +// ServiceOption enables further configuration of a Service. +type ServiceOption func(*v1beta1.Service) + +// Service creates a service with ServiceOptions +func Service(name, namespace string, so ...ServiceOption) *v1beta1.Service { + s := &v1beta1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + for _, opt := range so { + opt(s) + } + return s +} + +// ServiceWithoutNamespace creates a service with ServiceOptions but without a specific namespace +func ServiceWithoutNamespace(name string, so ...ServiceOption) *v1beta1.Service { + s := &v1beta1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, opt := range so { + opt(s) + } + return s +} + +// WithServiceDefaults will set the default values on the service. +func WithServiceDefaults(svc *v1beta1.Service) { + svc.SetDefaults(context.Background()) +} + +// WithInlineConfigSpec confgures the Service to use the given config spec +func WithInlineConfigSpec(config v1.ConfigurationSpec) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Spec.ConfigurationSpec = config + } +} + +// WithNamedPort sets the name on the Service's port to the provided name +func WithNamedPort(name string) ServiceOption { + return func(svc *v1beta1.Service) { + c := &svc.Spec.Template.Spec.Containers[0] + if len(c.Ports) == 1 { + c.Ports[0].Name = name + } else { + c.Ports = []corev1.ContainerPort{{ + Name: name, + }} + } + } +} + +// WithNumberedPort sets the Service's port number to what's provided. +func WithNumberedPort(number int32) ServiceOption { + return func(svc *v1beta1.Service) { + c := &svc.Spec.Template.Spec.Containers[0] + if len(c.Ports) == 1 { + c.Ports[0].ContainerPort = number + } else { + c.Ports = []corev1.ContainerPort{{ + ContainerPort: number, + }} + } + } +} + +// WithResourceRequirements attaches resource requirements to the service +func WithResourceRequirements(resourceRequirements corev1.ResourceRequirements) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Spec.Template.Spec.Containers[0].Resources = resourceRequirements + } +} + +// WithServiceAnnotation adds the given annotation to the service. +func WithServiceAnnotation(k, v string) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Annotations = presources.UnionMaps(svc.Annotations, map[string]string{ + k: v, + }) + } +} + +// WithServiceAnnotationRemoved adds the given annotation to the service. +func WithServiceAnnotationRemoved(k string) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Annotations = presources.FilterMap(svc.Annotations, func(s string) bool { + return k == s + }) + } +} + +// WithServiceImage sets the container image to be the provided string. +func WithServiceImage(img string) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Spec.Template.Spec.Containers[0].Image = img + } +} + +// WithServiceTemplateMeta sets the container image to be the provided string. +func WithServiceTemplateMeta(m metav1.ObjectMeta) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Spec.Template.ObjectMeta = m + } +} + +// WithRevisionTimeoutSeconds sets revision timeout +func WithRevisionTimeoutSeconds(revisionTimeoutSeconds int64) ServiceOption { + return func(service *v1beta1.Service) { + service.Spec.Template.Spec.TimeoutSeconds = ptr.Int64(revisionTimeoutSeconds) + } +} + +// WithServiceAccountName sets revision service account name +func WithServiceAccountName(serviceAccountName string) ServiceOption { + return func(service *v1beta1.Service) { + service.Spec.Template.Spec.ServiceAccountName = serviceAccountName + } +} + +// WithContainerConcurrency sets the given Service's concurrency. +func WithContainerConcurrency(cc int64) ServiceOption { + return func(svc *v1beta1.Service) { + svc.Spec.Template.Spec.ContainerConcurrency = &cc + } +} + +// WithVolume adds a volume to the service +func WithVolume(name, mountPath string, volumeSource corev1.VolumeSource) ServiceOption { + return func(svc *v1beta1.Service) { + rt := &svc.Spec.ConfigurationSpec.Template.Spec + + rt.Containers[0].VolumeMounts = append(rt.Containers[0].VolumeMounts, + corev1.VolumeMount{ + Name: name, + MountPath: mountPath, + }) + + rt.Volumes = append(rt.Volumes, corev1.Volume{ + Name: name, + VolumeSource: volumeSource, + }) + } +} + +// WithEnv configures the Service to use the provided environment variables. +func WithEnv(evs ...corev1.EnvVar) ServiceOption { + return func(s *v1beta1.Service) { + s.Spec.Template.Spec.Containers[0].Env = evs + } +} + +// WithEnvFrom configures the Service to use the provided environment variables. +func WithEnvFrom(evs ...corev1.EnvFromSource) ServiceOption { + return func(s *v1beta1.Service) { + s.Spec.Template.Spec.Containers[0].EnvFrom = evs + } +} + +// WithSecurityContext configures the Service to use the provided security context. +func WithSecurityContext(sc *corev1.SecurityContext) ServiceOption { + return func(s *v1beta1.Service) { + s.Spec.Template.Spec.Containers[0].SecurityContext = sc + } +} + +// WithWorkingDir configures the Service to use the provided working directory. +func WithWorkingDir(wd string) ServiceOption { + return func(s *v1beta1.Service) { + s.Spec.Template.Spec.Containers[0].WorkingDir = wd + } +} diff --git a/test/vendor/knative.dev/serving/sample/README.md b/test/vendor/knative.dev/serving/sample/README.md new file mode 100644 index 0000000000..9581e97013 --- /dev/null +++ b/test/vendor/knative.dev/serving/sample/README.md @@ -0,0 +1,4 @@ +# Serving Samples + +Samples for Knative Serving are available in the +[Knative Docs repo](https://github.com/knative/docs/tree/master/docs/serving/samples). diff --git a/test/vendor/knative.dev/serving/test/OWNERS b/test/vendor/knative.dev/serving/test/OWNERS new file mode 100644 index 0000000000..b222981031 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/OWNERS @@ -0,0 +1,12 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers +- serving-api-approvers + +reviewers: +- productivity-reviewers +- serving-api-approvers + +labels: +- area/test-and-release diff --git a/test/vendor/knative.dev/serving/test/README.md b/test/vendor/knative.dev/serving/test/README.md new file mode 100644 index 0000000000..7c873f898b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/README.md @@ -0,0 +1,239 @@ +# Test + +This directory contains tests and testing docs for `Knative Serving`: + +- [Unit tests](#running-unit-tests) currently reside in the codebase alongside + the code they test +- [End-to-end tests](#running-end-to-end-tests), of which there are two types: + - Conformance tests in [`/test/conformance`](./conformance) + - Other end-to-end tests in [`/test/e2e`](./e2e) +- [Performance tests](#running-performance-tests) reside in + [`/test/performance`](./performance) + +The conformance tests are a subset of the end to end test with +[more strict requirements](./conformance/README.md#requirements) around what can +be tested. + +If you want to add more tests, see [adding_tests.md](./adding_tests.md). + +## Presubmit tests + +[`presubmit-tests.sh`](./presubmit-tests.sh) is the entry point for both the +[end-to-end tests](./e2e) and the [conformance tests](./conformance) + +This script, and consequently, the e2e and conformance tests will be run before +every code submission. You can run these tests manually with: + +```shell +test/presubmit-tests.sh +``` + +_Note that to run `presubmit-tests.sh` or `e2e-tests.sh` scripts, you'll need +kubernetes `kubetest` installed:_ + +```bash +go get -u k8s.io/test-infra/kubetest +``` + +## Running unit tests + +To run all unit tests: + +```bash +go test ./... +``` + +_By default `go test` will not run [the e2e tests](#running-end-to-end-tests), +which need [`-tags=e2e`](#running-end-to-end-tests) to be enabled._ + +## Running end to end tests + +To run [the e2e tests](./e2e) and [the conformance tests](./conformance), you +need to have a running environment that meets +[the e2e test environment requirements](#environment-requirements), and you need +to specify the build tag `e2e`. + +```bash +go test -v -tags=e2e -count=1 ./test/conformance/... +go test -v -tags=e2e -count=1 ./test/e2e +``` + +## Running performance tests + +Each performance test case in Knative serving is a benchmark, to run these +benchmarks, please follow +[dev.md](https://github.com/knative/serving/blob/master/test/performance/dev.md). + +> As of now, only Googlers can run these benchmarks due to one issue of +> [Mako](https://github.com/google/mako) - the benchmarking tool we use. Details +> can be found in the [issue report](https://github.com/google/mako/issues/2). + +### Running a single test case + +To run one e2e test case, e.g. TestAutoscaleUpDownUp, use +[the `-run` flag with `go test`](https://golang.org/cmd/go/#hdr-Testing_flags): + +```bash +go test -v -tags=e2e -count=1 ./test/e2e -run ^TestAutoscaleUpDownUp$ +``` + +### Running tests in short mode + +Running tests in short mode excludes some large-scale E2E tests and saves +time/resources required for running the test suite. To run the tests in short +mode, use +[the `-short` flag with `go test`](https://golang.org/cmd/go/#hdr-Testing_flags) + +```bash +go test -v -tags=e2e -count=1 -short ./test/e2e +``` + +To get a better idea where the flag is used, search for `testing.Short()` +throughout the test source code. + +### Environment requirements + +These tests require: + +1. [A running `Knative Serving` cluster.](../DEVELOPMENT.md#prerequisites) +1. The `knative-testing` resources: + + ```bash + ko apply -f test/config + ``` + +1. A docker repo containing [the test images](#test-images) + +### Common Flags + +- By default the e2e tests against the current cluster in `~/.kube/config` using + the environment specified in + [your environment variables](../DEVELOPMENT.md#setup-your-environment). +- Since these tests are fairly slow, running them with logging enabled is + recommended (`-v`). +- Using [`--logverbose`](#output-verbose-log) to see the verbose log output from + test as well as from k8s libraries. +- Using `-count=1` is + [the idiomatic way to disable test caching](https://golang.org/doc/go1.10#test) + +You can [use test flags](#flags) to control the environment your tests run +against, i.e. override +[your environment variables](../DEVELOPMENT.md#setup-your-environment): + +```bash +go test -v -tags=e2e -count=1 ./test/conformance/... --kubeconfig ~/special/kubeconfig --cluster myspecialcluster --dockerrepo myspecialdockerrepo +go test -v -tags=e2e -count=1 ./test/e2e --kubeconfig ~/special/kubeconfig --cluster myspecialcluster --dockerrepo myspecialdockerrepo +``` + +## Test images + +### Building the test images + +Note: this is only required when you run conformance/e2e tests locally with +`go test` commands. + +The [`upload-test-images.sh`](./upload-test-images.sh) script can be used to +build and push the test images used by the conformance and e2e tests. The script +expects your environment to be setup as described in +[DEVELOPMENT.md](../DEVELOPMENT.md#install-requirements). + +To run the script for all end to end test images: + +```bash +./test/upload-test-images.sh +``` + +A docker tag may be passed as an optional parameter. This can be useful on +Minikube in tandem with the `--tag` [flag](#using-a-docker-tag): + +```bash +eval $(minikube docker-env) +./test/upload-test-images.sh any-old-tag +``` + +### Adding new test images + +New test images should be placed in `./test/test_images`. + +## Flags + +These flags are useful for running against an existing cluster, making use of +your existing [environment setup](../DEVELOPMENT.md#setup-your-environment). + +Tests importing [`knative.dev/serving/test`](#test-library) recognize these +flags: + +- [All flags added by `knative/pkg/test`](https://github.com/knative/pkg/tree/master/test#flags) + such as: + - [`--dockerrepo`](#overriding-docker-repo) + - [`--tag`](#using-a-docker-tag) + - [`--ingressendpoint`](#using-a-custom-ingress-endpoint) +- [`--resolvabledomain`](#using-a-resolvable-domain) +- [`--https`](#using-https) +- [`--ingressClass`](#using-ingress-class) + +### Overridding docker repo + +The `--dockerrepo` argument lets you specify the docker repo from which images +used by your tests should be pulled. This will default to the value of your +[`KO_DOCKER_REPO` environment variable](../DEVELOPMENT.md#setup-your-environment) +if not specified. + +```bash +go test -v -tags=e2e -count=1 ./test/conformance/... --dockerrepo gcr.myhappyproject +go test -v -tags=e2e -count=1 ./test/e2e --dockerrepo gcr.myhappyproject +``` + +### Using a docker tag + +The default docker tag used for the test images is `latest`, which can be +problematic on Minikube. To avoid having to configure a remote container +registry to support the `Always` pull policy for `latest` tags, you can have the +tests use a specific tag: + +```bash +go test -v -tags=e2e -count=1 ./test/conformance/... --tag any-old-tag +go test -v -tags=e2e -count=1 ./test/e2e --tag any-old-tag +``` + +Of course, this implies that you tagged the images when you +[uploaded them](#building-the-test-images). + +### Using a custom ingress endpoint + +Some environments (like minikube) do not support a Loadbalancer to make Knative +services externally available. These environments usually rely on rewriting the +Loadbalancer to a NodePort. The external address of such a NodePort is usually +not easily obtained within the cluster automatically, but can be provided from +the outside through the `--ingressendpoint` flag. For a minikube setup for +example, you'd want to run tests against the default `ingressgateway` (port +number 31380) running on the minikube node: + +``` +go test -v -tags=e2e -count=1 ./test/conformance/... --ingressendpoint "$(minikube ip):31380" +go test -v -tags=e2e -count=1 ./test/e2e --ingressendpoint "$(minikube ip):31380" +``` + +### Using a resolvable domain + +If you set up your cluster using +[the getting started docs](../DEVELOPMENT.md#prerequisites), Routes created in +the test will use the domain `example.com`, unless the route has label +`app=prod` in which case they will use the domain `prod-domain.com`. Since these +domains will not be resolvable to deployments in your test cluster, in order to +make a request against the endpoint, the test use the IP assigned to the service +`istio-ingressgateway` in the namespace `istio-system` and spoof the `Host` in +the header. + +If you have configured your cluster to use a resolvable domain, you can use the +`--resolvabledomain` flag to indicate that the test should make requests +directly against `Route.Status.Domain` and does not need to spoof the `Host`. + +### Using https + +You can use the `--https` flag to have all tests run with https. + +### Using ingress class + +The `--ingressClass` argument lets you specify the ingress class. The default +value is `istio.ingress.networking.knative.dev`. diff --git a/test/vendor/knative.dev/serving/test/adding_tests.md b/test/vendor/knative.dev/serving/test/adding_tests.md new file mode 100644 index 0000000000..28ac7b9b18 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/adding_tests.md @@ -0,0 +1,287 @@ +# Adding tests + +If you are [developing knative](../DEVELOPMENT.md) you may need to add or +change: + +- [e2e tests](./e2e) +- [Conformance tests](./conformance) + +Both tests can use our [test library](#test-library). + +Reviewers of conformance and e2e tests (i.e. [OWNERS](./OWNERS)) are responsible +for the style and quality of the resulting tests. In order to not discourage +contributions, when style change are required, the reviewers can make the +changes themselves. + +All e2e and conformance tests _must_ be marked with the `e2e` +[build constraint](https://golang.org/pkg/go/build/) so that `go test ./...` can +be used to run only [the unit tests](README.md#running-unit-tests), i.e.: + +```go +// +build e2e +``` + +## Test library + +In the [`test`](.) dir you will find several libraries in the `test` package you +can use in your tests. + +This library exists partially in this directory and partially in +[`knative/pkg/test`](https://github.com/knative/pkg/tree/master/test). + +The libs in this dir can: + +- [Get access to client objects](#get-access-to-client-objects) +- [Make requests against deployed services](#make-requests-against-deployed-services) +- [Check Knative Serving resources](#check-knative-serving-resources) +- [Verify resource state transitions](#verify-resource-state-transitions) +- [Generate boilerplate CRDs](#generate-boilerplate-crds) + +See [`knative/pkg/test`](https://github.com/knative/pkg/tree/master/test) to: + +- [Use common test flags](#use-common-test-flags) +- Output logs +- Emit metrics +- Ensure test cleanup + +### Use common test flags + +These flags are useful for running against an existing cluster, making use of +your existing [environment setup](../DEVELOPMENT.md#setup-your-environment). + +By importing `knative.dev/pkg/test` you get access to a global variable called +`test.Flags` which holds the values of +[the command line flags](./README.md#flags). + +```go +imagePath := strings.Join([]string{test.Flags.DockerRepo, image}, "/")) +``` + +_See +[e2e_flags.go](https://github.com/knative/pkg/blob/master/test/e2e_flags.go)._ + +### Get access to client objects + +To initialize client objects that you can use the command line flags that +describe the environment: + +```go +import ( + testing + + knative.dev/serving/test + pkgTest "knative.dev/pkg/test" +) + +func Setup(t *testing.T) *test.Clients { + clients, err := test.NewClients(pkgTest.Flags.Kubeconfig, pkgTest.Flags.Cluster, namespaceName) + if err != nil { + t.Fatalf("Couldn't initialize clients: %v", err) + } + return clients +} +``` + +The `Clients` struct contains initialized clients for accessing: + +- `Kubernetes objects` +- `Services` +- `Routes` +- `Configurations` +- `Revisions` +- `Knative ingress` +- `ServerlessServices` +- `Istio objects` + +For example, to create a `Route`: + +```go +_, err = clients.ServingClient.Routes.Create(v1test.Route( + test.ResourceNames{ + Route: routeName, + Config: configName, + })) +``` + +_v1test is alias for package `knative.dev/serving/pkg/testing/v1`_ + +And you can use the client to clean up `Route` and `Configuration` resources +created by your test: + +```go +import "knative.dev/serving/test" + +func tearDown(clients *test.Clients) { + if clients != nil { + clients.ServingClient.Routes.Delete(routeName, nil) + clients.ServingClient.Configs.Delete(configName, nil) + } +} +``` + +_See [clients.go](./clients.go)._ + +### Make requests against deployed services + +After deploying (i.e. creating a `Route` and a `Configuration`) an endpoint will +not be ready to serve requests right away. To poll a deployed endpoint and wait +for it to be in the state you want it to be in (or timeout) use +`WaitForEndpointState` by importing `knative.dev/pkg/test` with alias `pkgTest`: + +```go +_, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + logger, + updatedRoute.Status.URL.URL(), + pkgTest.EventuallyMatchesBody(expectedText), + "SomeDescription", + test.ServingFlags.ResolvableDomain) +if err != nil { + t.Fatalf("The endpoint for Route %s at domain %s didn't serve the expected text \"%s\": %v", routeName, updatedRoute.Status.Domain, expectedText, err) +} +``` + +This function makes use of +[the environment flag `resolvableDomain`](README.md#using-a-resolvable-domain) +to determine if the ingress should be used or the domain should be used +directly. + +_See [request.go](https://github.com/knative/pkg/blob/master/test/request.go)._ + +If you need more low-level access to the http request or response against a +deployed service, you can directly use the `SpoofingClient` that +`WaitForEndpointState` wraps. + +```go +// Error handling elided for brevity, but you know better. +client, err := pkgTest.NewSpoofingClient(clients.KubeClient.Kube, logger, route.Status.Domain, test.ServingFlags.ResolvableDomain) +req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s", route.Status.Domain), nil) + +// Single request. +resp, err := client.Do(req) + +// Polling until we meet some condition. +resp, err := client.Poll(req, test.BodyMatches(expectedText)) +``` + +_See +[spoof.go](https://github.com/knative/pkg/blob/master/test/spoof/spoof.go)._ + +### Check Knative Serving resources + +After creating Knative Serving resources or making changes to them, you will +need to wait for the system to realize those changes. You can use the Knative +Serving CRD check and polling methods to check the resources are either in or +reach the desired state. + +The `WaitFor*` functions use the kubernetes +[`wait` package](https://godoc.org/k8s.io/apimachinery/pkg/util/wait). To poll +they use +[`PollImmediate`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate) +and the return values of the function you provide behave the same as +[`ConditionFunc`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc): +a `bool` to indicate if the function should stop or continue polling, and an +`error` to indicate if there has been an error. + +For example, you can poll a `Configuration` object to find the name of the +`Revision` that was created for it: + +```go +var revisionName string +err := v1alpha1testing.WaitForConfigurationState(clients.ServingClient, configName, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != "" { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil +}, "ConfigurationUpdatedWithRevision") +``` + +_v1alpha1testing is alias for package +`knative.dev/serving/pkg/testing/v1alpha1`_ + +We also have `Check*` variants of many of these methods with identical +signatures, same example: + +```go +var revisionName string +err := v1alpha1testing.CheckConfigurationState(clients.ServingClient, configName, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != "" { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil +}) +``` + +_v1alpha1testing is alias for package +`knative.dev/serving/pkg/testing/v1alpha1`_ + +_For knative crd state, for example `Config`. You can see the code in +[configuration.go](./v1alpha1/configuration.go). For kubernetes objects see +[kube_checks.go](https://github.com/knative/pkg/blob/master/test/kube_checks.go)._ + +### Verify resource state transitions + +To use the [check functions](#check-knative-serving-resources) you must provide +a function to check the state. Some of the expected transition states (as +defined in +[the Knative Serving spec](https://github.com/knative/docs/blob/master/docs/serving/spec/knative-api-specification-1.0.md)) +, for example `v1alpha1/Revision` state, are expressed in function in +[revision.go](./v1alpha1/revision.go). + +For example when a `Revision` has been created, the system will start the +resources required to actually serve it, and then the `Revision` object will be +updated to indicate it is ready. This can be polled with +`v1alpha1testing.IsRevisionReady`: + +```go +err := v1alpha1testing.WaitForRevisionState(clients.ServingAlphaClient, revName, v1alpha1testing.IsRevisionReady, "RevisionIsReady") +if err != nil { + t.Fatalf("The Revision %q did not become ready: %v", revName, err) +} +``` + +_v1alpha1testing is alias for package +`knative.dev/serving/pkg/testing/v1alpha1`_ + +Once the `Revision` is created, all traffic for a `Route` should be routed to +it. This can be polled with `v1alpha1testing.AllRouteTrafficAtRevision`: + +```go +err := v1alpha1testing.CheckRouteState(clients.ServingAlphaClient, names.Route, v1alpha1testing.AllRouteTrafficAtRevision(names)) +if err != nil { + t.Fatalf("The Route %s was not updated to route traffic to the Revision %s: %v", names.Route, names.Revision, err) +} +``` + +_See [route.go](./v1alpha1/route.go)._ + +### Generate boilerplate CRDs + +Your tests will probably need to create `Route` and `Configuration` objects. You +can use the existing boilerplate to describe them. + +You can also use the function `AppendRandomString` to create a random name for +your `crd` so that your tests can use unique names each time they run. + +For example to create a `Configuration` object that uses a certain docker image +with a randomized name: + +```go +func TestSomeAwesomeFeature(t *testing.T) { + var names test.ResourceNames + names.Config := test.ObjectNameForTest(t) + _, err := clients.ServingClient.Create(test.Configuration(namespaceName, names, imagePath)) + if err != nil { + // handle error case + } + // more testing +} +``` + +_test is package `knative.dev/serving/test`_ + +Please expand these functions as more use cases are tested. + +_See [crd.go](./crd.go)._ diff --git a/test/vendor/knative.dev/serving/test/apicoverage.sh b/test/vendor/knative.dev/serving/test/apicoverage.sh new file mode 100755 index 0000000000..c28dbba7bc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the end-to-end tests against Knative Serving built from source. +# It is started by prow for each PR. For convenience, it can also be executed manually. + +# If you already have the *_OVERRIDE environment variables set, call +# this script with the --run-tests arguments and it will start knative in +# the cluster and run the tests. + +# Calling this script without arguments will create a new cluster in +# project $PROJECT_ID, start knative in it, run the tests and delete the +# cluster. + +source $(dirname $0)/e2e-common.sh + +readonly SERVING_TEST_DIR=$(dirname $0) +readonly APICOVERAGE_IMAGE="${SERVING_TEST_DIR}/apicoverage/image" +readonly APICOVERAGE_TOOL="${SERVING_TEST_DIR}/apicoverage/tools" + +function knative_setup() { + install_knative_serving +} + +# Wrapper to fail_test to write apicoverage junit result file. +# Parameters: $1 - Error message to pass onto fail_test +function fail_apicoverage_run() { + go run "${APICOVERAGE_TOOL}/main.go" --build_failed=true + fail_test $1 +} + +# Script entry point. +initialize $@ --skip-istio-addon + +header "Setting up API Coverage Webhook" +kubectl apply -f "${APICOVERAGE_IMAGE}/service-account.yaml" || fail_apicoverage_run "Failed setting up service account for apicoverage-webhook" +ko apply -f "${APICOVERAGE_IMAGE}/apicoverage-webhook.yaml" || fail_apicoverage_run "Failed setting up apicoverage-webhook" + +header "Running tests" +# Run conformance tests and e2e tests +go_test_e2e -timeout=30m ./test/conformance/api/v1alpha1 ./test/conformance/api/v1beta1 ./test/conformance/runtime ./test/e2e || fail_apicoverage_run "Failed in executing Tests" + +header "Retrieving API Coverage values" +go run "${APICOVERAGE_TOOL}/main.go" || fail_test "Failed retrieving API coverage values" + +success diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/README.md b/test/vendor/knative.dev/serving/test/apicoverage/image/README.md new file mode 100644 index 0000000000..98a0ad7430 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/README.md @@ -0,0 +1,7 @@ +# Serving Webhook based API Coverage Image + +This directory contains the HTTP Server image used in Webhook based API coverage +tool. Core infra pieces for the tool comes from +[knative.dev/pkg](https://github.com/knative/pkg/tree/master/test/webhook-apicoverage). +Knative serving specific pieces of the tool (such as rules, ignored fields) +resides in this directory. diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/apicoverage-webhook.yaml b/test/vendor/knative.dev/serving/test/apicoverage/image/apicoverage-webhook.yaml new file mode 100755 index 0000000000..7746ac9c55 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/apicoverage-webhook.yaml @@ -0,0 +1,71 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: apicoverage-webhook + namespace: knative-serving + labels: + name: apicoverage-webhook +spec: + type: LoadBalancer + ports: + - name: webhook + protocol: TCP + port: 443 + targetPort: 8443 + selector: + name: apicoverage-webhook +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: apicoverage-webhook + namespace: knative-serving + labels: + name: apicoverage-webhook +spec: + replicas: 1 + template: + metadata: + name: apicoverage-webhook + annotations: + sidecar.istio.io/inject: "true" + labels: + name: apicoverage-webhook + spec: + serviceAccountName: apicoverage-webhook + containers: + - name: apicoverage-webhook + image: knative.dev/serving/test/apicoverage/image + env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + limits: + memory: 200Mi + cpu: 200m + requests: + memory: 20Mi + cpu: 20m + volumeMounts: + - name: config-logging + mountPath: /etc/config-logging + volumes: + - name: config-logging + configMap: + name: config-logging diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/common/common.go b/test/vendor/knative.dev/serving/test/apicoverage/image/common/common.go new file mode 100644 index 0000000000..69527564bf --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/common/common.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/system" + "knative.dev/pkg/webhook/resourcesemantics" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +var ( + ResourceMap = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ + v1alpha1.SchemeGroupVersion.WithKind("Revision"): &v1alpha1.Revision{}, + v1alpha1.SchemeGroupVersion.WithKind("Configuration"): &v1alpha1.Configuration{}, + v1alpha1.SchemeGroupVersion.WithKind("Route"): &v1alpha1.Route{}, + v1alpha1.SchemeGroupVersion.WithKind("Service"): &v1alpha1.Service{}, + } + WebhookNamespace = system.Namespace() +) + +const ( + CommonComponentName = "apicoverage-webhook" +) diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/kodata/ignoredfields.yaml b/test/vendor/knative.dev/serving/test/apicoverage/image/kodata/ignoredfields.yaml new file mode 100644 index 0000000000..0bc326461f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/kodata/ignoredfields.yaml @@ -0,0 +1,120 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ignoredfields.yaml contains fields that are ignored for apicoverage calculations. +- package: core/v1 + type: Container + fields: + - Name + - VolumeDevices + - Stdin + - Lifecycle + - TTY + - StdinOnce +- package: meta/v1 + type: ObjectMeta + fields: + - Initializers + - ClusterName + - DeletionGracePeriodSeconds +- package: core/v1 + type: VolumeSource + fields: + - FlexVolume + - PhotonPersistentDisk + - Flocker + - DownwardAPI + - RBD + - AzureDisk + - GCEPersistentDisk + - EmptyDir + - Cinder + - FC + - StorageOS + - VsphereVolume + - CephFS + - GitRepo + - ScaleIO + - PortworxVolume + - Glusterfs + - NFS + - Quobyte + - Projected + - AzureFile + - HostPath + - AWSElasticBlockStore + - ISCSI + - PersistentVolumeClaim +- package: core/v1 + type: SecurityContext + fields: + - Capabilities + - Privileged + - SELinuxOptions + - RunAsGroup + - RunAsNonRoot + - ReadOnlyRootFilesystem + - AllowPrivilegeEscalation + - ProcMount +- package: core/v1 + type: EnvVarSource + fields: + - FieldRef + - ResourceFieldRef +- package: core/v1 + type: ContainerPort + fields: + - HostIP + - HostPort +- package: core/v1 + type: VolumeMount + fields: + - SubPath + - MountPropagation +- package: core/v1 + type: PodSpec + fields: + - InitContainers + - RestartPolicy + - TerminationGracePeriodSeconds + - ActiveDeadlineSeconds + - DNSPolicy + - NodeSelector + - AutomountServiceAccountToken + - NodeName + - HostNetwork + - HostPID + - HostIPC + - ShareProcessNamespace + - SecurityContext + - Hostname + - Subdomain + - Affinity + - SchedulerName + - Tolerations + - HostAliases + - PriorityClassName + - Priority + - DNSConfig + - ReadinessGates + - RuntimeClassName +- package: pkg/apis + type: URL + fields: + - RawPath + - RawQuery + - Opaque + - Path + - Fragment + - User diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/main.go b/test/vendor/knative.dev/serving/test/apicoverage/image/main.go new file mode 100644 index 0000000000..a05cb5f8f3 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/main.go @@ -0,0 +1,25 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "knative.dev/serving/test/apicoverage/image/webhook" +) + +func main() { + webhook.SetupWebhookServer() +} diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/rules/coverage_rules.go b/test/vendor/knative.dev/serving/test/apicoverage/image/rules/coverage_rules.go new file mode 100644 index 0000000000..d05d4c9331 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/rules/coverage_rules.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "strings" + + "knative.dev/pkg/test/webhook-apicoverage/resourcetree" +) + +// TODO(https://github.com/knative/test-infra/issues/448): evaluate refactoring common, shared code. +// coverage_rules.go contains all the apicoverage rules specified for knative serving. + +// IgnoreLowerLevelMetaFields rule ignores metadata nodes that are at a level lower than 2. +// This is done to ensure we only have ObjectMeta and TypeMeta coverage details for higherlevel +// types like Service, Route and not for nodes which appear in spec. +func IgnoreLowerLevelMetaFields(node resourcetree.NodeInterface) bool { + lowerCaseFieldName := strings.ToLower(node.GetData().Field) + return !((strings.Contains(lowerCaseFieldName, "objectmeta") || strings.Contains(lowerCaseFieldName, "typemeta")) && + len(strings.Split(node.GetData().NodePath, ".")) > 2) +} + +// NodeRules contains all resourcetree.NodeRules specified for knative serving. +var NodeRules = resourcetree.NodeRules{ + Rules: []func(node resourcetree.NodeInterface) bool{ + IgnoreLowerLevelMetaFields, + }, +} + +// IgnoreDeprecatedFields ignores fields that are prefixed with the word "deprecated" +func IgnoreDeprecatedFields(fieldName string) bool { + return !strings.HasPrefix(strings.ToLower(fieldName), "deprecated") +} + +// FieldRules represent all resourcetree.FieldRules specified for knative serving. +var FieldRules = resourcetree.FieldRules{ + Rules: []func(fieldName string) bool{ + IgnoreDeprecatedFields, + }, +} diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/rules/display_rules.go b/test/vendor/knative.dev/serving/test/apicoverage/image/rules/display_rules.go new file mode 100644 index 0000000000..2b6f1cb1e6 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/rules/display_rules.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rules + +import ( + "strings" + + "knative.dev/pkg/test/webhook-apicoverage/view" +) + +// display_rules.go contains all the display rules specified by knative serving to display json type like result display. + +// PackageDisplayRule rule specifies how package name needs to be displayed for json type like result display +func PackageDisplayRule(packageName string) string { + if packageName != "" { + tokens := strings.Split(packageName, "/") + if len(tokens) >= 2 { + // As package names are built using reflect.Type.PackagePath, they are long. + // For better readability displaying only last two words of the package path. e.g. serving.v1alpha1 + return strings.Join(tokens[len(tokens)-2:], "/") + } + } + return packageName +} + +// GetDisplayRules returns the view.DisplayRules for knative serving. +func GetDisplayRules() view.DisplayRules { + return view.DisplayRules{ + PackageNameRule: PackageDisplayRule, + } +} diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/service-account.yaml b/test/vendor/knative.dev/serving/test/apicoverage/image/service-account.yaml new file mode 100644 index 0000000000..1652edecd2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/service-account.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: apicoverage-webhook + namespace: knative-serving +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: apicoverage-webhook-role +rules: +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +- apiGroups: ["extensions"] + resources: ["ingresses","deployments"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: apicoverage-webhook-clusterrolebinding + namespace: knative-serving +subjects: +- kind: ServiceAccount + name: apicoverage-webhook + namespace: knative-serving +roleRef: + kind: ClusterRole + name: apicoverage-webhook-role + apiGroup: rbac.authorization.k8s.io diff --git a/test/vendor/knative.dev/serving/test/apicoverage/image/webhook/webhook_server.go b/test/vendor/knative.dev/serving/test/apicoverage/image/webhook/webhook_server.go new file mode 100644 index 0000000000..89709a4489 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/image/webhook/webhook_server.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "container/list" + "log" + "net/http" + + "knative.dev/pkg/signals" + "knative.dev/pkg/test/webhook-apicoverage/resourcetree" + "knative.dev/pkg/test/webhook-apicoverage/webhook" + "knative.dev/serving/test/apicoverage/image/common" + "knative.dev/serving/test/apicoverage/image/rules" +) + +// SetupWebhookServer builds the necessary webhook configuration, HTTPServer and starts the webhook. +func SetupWebhookServer() { + namespace := common.WebhookNamespace + if len(namespace) == 0 { + log.Fatal("Namespace value to used by the webhook is not set") + } + + webhookConf := webhook.BuildWebhookConfiguration(common.CommonComponentName, common.CommonComponentName+".knative.serving.dev", common.WebhookNamespace) + ac := webhook.APICoverageRecorder{ + Logger: webhookConf.Logger, + ResourceForest: resourcetree.ResourceForest{ + Version: "v1alpha1", + ConnectedNodes: make(map[string]*list.List), + TopLevelTrees: make(map[string]resourcetree.ResourceTree), + }, + ResourceMap: common.ResourceMap, + NodeRules: rules.NodeRules, + FieldRules: rules.FieldRules, + DisplayRules: rules.GetDisplayRules(), + } + ac.Init() + + m := http.NewServeMux() + m.HandleFunc("/", ac.RecordResourceCoverage) + m.HandleFunc(webhook.ResourceCoverageEndPoint, ac.GetResourceCoverage) + m.HandleFunc(webhook.TotalCoverageEndPoint, ac.GetTotalCoverage) + m.HandleFunc(webhook.ResourcePercentageCoverageEndPoint, ac.GetResourceCoveragePercentages) + + err := webhookConf.SetupWebhook(m, ac.ResourceMap, namespace, signals.SetupSignalHandler()) + if err != nil { + log.Fatalf("Encountered error setting up Webhook: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/apicoverage/tools/main.go b/test/vendor/knative.dev/serving/test/apicoverage/tools/main.go new file mode 100644 index 0000000000..f5ec541f28 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/apicoverage/tools/main.go @@ -0,0 +1,108 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "log" + "os" + "path" + "strings" + + "knative.dev/pkg/test/webhook-apicoverage/coveragecalculator" + "knative.dev/pkg/test/webhook-apicoverage/tools" + "knative.dev/serving/test/apicoverage/image/common" + "knative.dev/serving/test/apicoverage/image/rules" + "knative.dev/test-infra/shared/prow" +) + +var buildFailed = flag.Bool("build_failed", false, + "Flag indicating if the apicoverage build failed.") + +// Helper method to produce failed coverage results. +func getFailedResourceCoverages() *coveragecalculator.CoveragePercentages { + percentCoverages := make(map[string]float64) + for resourceKind := range common.ResourceMap { + percentCoverages[resourceKind.Kind] = 0.0 + } + percentCoverages["Overall"] = 0.0 + return &coveragecalculator.CoveragePercentages{ + ResourceCoverages: percentCoverages} +} + +func main() { + var ( + kubeConfigPath string + serviceIP string + err error + ) + + flag.Parse() + // Ensure artifactsDir exist, in case not invoked from this script + artifactsDir := prow.GetLocalArtifactsDir() + if _, err := os.Stat(artifactsDir); os.IsNotExist(err) { + if err = os.MkdirAll(artifactsDir, 0777); err != nil { + log.Fatalf("Failed to create directory: %v", err) + } + } + tools.CleanupJunitFiles(artifactsDir) + + if *buildFailed { + if err := tools.WriteResourcePercentages(path.Join( + artifactsDir, "junit_bazel.xml"), + getFailedResourceCoverages()); err != nil { + log.Fatalf("Failed writing resource coverage percentages: %v", + err) + } + return + } + + if kubeConfigPath, err = tools.GetDefaultKubePath(); err != nil { + log.Fatalf("Error retrieving kubeConfig path: %v", err) + } + + if serviceIP, err = tools.GetWebhookServiceIP(kubeConfigPath, "", + common.WebhookNamespace, common.CommonComponentName); err != nil { + log.Fatalf("Error retrieving Service IP: %v", err) + } + + for resource := range common.ResourceMap { + err = tools.GetAndWriteResourceCoverage(serviceIP, resource.Kind, + path.Join(artifactsDir, strings.ToLower(resource.Kind)+".html"), + rules.GetDisplayRules()) + if err != nil { + log.Printf("Failed retrieving resource coverage for"+ + " resource %s: %v ", resource.Kind, err) + } + } + + if err := tools.GetAndWriteTotalCoverage(serviceIP, path.Join(artifactsDir, + "totalcoverage.html")); err != nil { + log.Fatalf("total coverage retrieval failed: %v", err) + } + + if coverage, err := tools.GetResourcePercentages(serviceIP); err != nil { + log.Fatalf("Failed retrieving resource coverage percentages: %v", + err) + } else { + if err = tools.WriteResourcePercentages(path.Join( + artifactsDir, "junit_bazel.xml"), coverage); err != nil { + log.Fatalf("Failed writing resource coverage percentages: %v", + err) + } + } +} diff --git a/test/vendor/knative.dev/serving/test/cleanup.go b/test/vendor/knative.dev/serving/test/cleanup.go new file mode 100644 index 0000000000..b98abe932a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/cleanup.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// cleanup allows you to define a cleanup function that will be executed +// if your test is interrupted. + +package test + +import ( + "os" + "os/signal" +) + +// CleanupOnInterrupt will execute the function cleanup if an interrupt signal is caught +func CleanupOnInterrupt(cleanup func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + for range c { + cleanup() + os.Exit(1) + } + }() +} + +// TearDown will delete created names using clients. +func TearDown(clients *Clients, names ResourceNames) { + if clients != nil && clients.ServingBetaClient != nil { + clients.ServingAlphaClient.Delete( + []string{names.Route}, + []string{names.Config}, + []string{names.Service}, + ) + } +} diff --git a/test/vendor/knative.dev/serving/test/clients.go b/test/vendor/knative.dev/serving/test/clients.go new file mode 100644 index 0000000000..7b3ee8e2bc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/clients.go @@ -0,0 +1,250 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains an object which encapsulates k8s clients which are useful for e2e tests. + +package test + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "knative.dev/pkg/test" + "knative.dev/serving/pkg/client/clientset/versioned" + networkingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/networking/v1alpha1" + servingv1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1" + servingv1alpha1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1" + servingv1beta1 "knative.dev/serving/pkg/client/clientset/versioned/typed/serving/v1beta1" + istioclientset "knative.dev/serving/pkg/client/istio/clientset/versioned" +) + +// Clients holds instances of interfaces for making requests to Knative Serving. +type Clients struct { + KubeClient *test.KubeClient + ServingAlphaClient *ServingAlphaClients + ServingBetaClient *ServingBetaClients + ServingClient *ServingClients + NetworkingClient *NetworkingClients + Dynamic dynamic.Interface + IstioClient istioclientset.Interface +} + +// ServingAlphaClients holds instances of interfaces for making requests to knative serving clients +type ServingAlphaClients struct { + Routes servingv1alpha1.RouteInterface + Configs servingv1alpha1.ConfigurationInterface + Revisions servingv1alpha1.RevisionInterface + Services servingv1alpha1.ServiceInterface +} + +// ServingBetaClients holds instances of interfaces for making requests to knative serving clients +type ServingBetaClients struct { + Routes servingv1beta1.RouteInterface + Configs servingv1beta1.ConfigurationInterface + Revisions servingv1beta1.RevisionInterface + Services servingv1beta1.ServiceInterface +} + +// ServingClients holds instances of interfaces for making requests to knative serving clients +type ServingClients struct { + Routes servingv1.RouteInterface + Configs servingv1.ConfigurationInterface + Revisions servingv1.RevisionInterface + Services servingv1.ServiceInterface +} + +// NetworkingClients holds instances of interfaces for making requests to Knative +// networking clients. +type NetworkingClients struct { + ServerlessServices networkingv1alpha1.ServerlessServiceInterface + Ingresses networkingv1alpha1.IngressInterface + Certificates networkingv1alpha1.CertificateInterface +} + +// NewClients instantiates and returns several clientsets required for making request to the +// Knative Serving cluster specified by the combination of clusterName and configPath. Clients can +// make requests within namespace. +func NewClients(configPath string, clusterName string, namespace string) (*Clients, error) { + cfg, err := BuildClientConfig(configPath, clusterName) + if err != nil { + return nil, err + } + + // We poll, so set our limits high. + cfg.QPS = 100 + cfg.Burst = 200 + + return NewClientsFromConfig(cfg, namespace) +} + +// NewClientsFromConfig instantiates and returns several clientsets required for making request to the +// Knative Serving cluster specified by the rest Config. Clients can make requests within namespace. +func NewClientsFromConfig(cfg *rest.Config, namespace string) (*Clients, error) { + clients := &Clients{} + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + clients.KubeClient = &test.KubeClient{Kube: kubeClient} + + clients.ServingAlphaClient, err = newServingAlphaClients(cfg, namespace) + if err != nil { + return nil, err + } + + clients.ServingBetaClient, err = newServingBetaClients(cfg, namespace) + if err != nil { + return nil, err + } + + clients.ServingClient, err = newServingClients(cfg, namespace) + if err != nil { + return nil, err + } + + clients.Dynamic, err = dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + + clients.IstioClient, err = istioclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + + clients.NetworkingClient, err = newNetworkingClients(cfg, namespace) + if err != nil { + return nil, err + } + + return clients, nil +} + +// newNetworkingClients instantiates and returns the networking clientset required to make requests +// to Networking resources on the Knative service cluster +func newNetworkingClients(cfg *rest.Config, namespace string) (*NetworkingClients, error) { + cs, err := versioned.NewForConfig(cfg) + if err != nil { + return nil, err + } + return &NetworkingClients{ + ServerlessServices: cs.NetworkingV1alpha1().ServerlessServices(namespace), + Ingresses: cs.NetworkingV1alpha1().Ingresses(namespace), + Certificates: cs.NetworkingV1alpha1().Certificates(namespace), + }, nil +} + +// newServingAlphaClients instantiates and returns the serving clientset required to make requests to the +// knative serving cluster. +func newServingAlphaClients(cfg *rest.Config, namespace string) (*ServingAlphaClients, error) { + cs, err := versioned.NewForConfig(cfg) + if err != nil { + return nil, err + } + + return &ServingAlphaClients{ + Configs: cs.ServingV1alpha1().Configurations(namespace), + Revisions: cs.ServingV1alpha1().Revisions(namespace), + Routes: cs.ServingV1alpha1().Routes(namespace), + Services: cs.ServingV1alpha1().Services(namespace), + }, nil +} + +// newServingBetaClients instantiates and returns the serving clientset required to make requests to the +// knative serving cluster. +func newServingBetaClients(cfg *rest.Config, namespace string) (*ServingBetaClients, error) { + cs, err := versioned.NewForConfig(cfg) + if err != nil { + return nil, err + } + + return &ServingBetaClients{ + Configs: cs.ServingV1beta1().Configurations(namespace), + Revisions: cs.ServingV1beta1().Revisions(namespace), + Routes: cs.ServingV1beta1().Routes(namespace), + Services: cs.ServingV1beta1().Services(namespace), + }, nil +} + +// newServingClients instantiates and returns the serving clientset required to make requests to the +// knative serving cluster. +func newServingClients(cfg *rest.Config, namespace string) (*ServingClients, error) { + cs, err := versioned.NewForConfig(cfg) + if err != nil { + return nil, err + } + + return &ServingClients{ + Configs: cs.ServingV1().Configurations(namespace), + Revisions: cs.ServingV1().Revisions(namespace), + Routes: cs.ServingV1().Routes(namespace), + Services: cs.ServingV1().Services(namespace), + }, nil +} + +// Delete will delete all Routes and Configs with the names routes and configs, if clients +// has been successfully initialized. +func (clients *ServingAlphaClients) Delete(routes []string, configs []string, services []string) error { + deletions := []struct { + client interface { + Delete(name string, options *v1.DeleteOptions) error + } + items []string + }{ + {clients.Routes, routes}, + {clients.Configs, configs}, + {clients.Services, services}, + } + + propPolicy := v1.DeletePropagationForeground + dopt := &v1.DeleteOptions{ + PropagationPolicy: &propPolicy, + } + + for _, deletion := range deletions { + if deletion.client == nil { + continue + } + + for _, item := range deletion.items { + if item == "" { + continue + } + + if err := deletion.client.Delete(item, dopt); err != nil { + return err + } + } + } + + return nil +} + +// BuildClientConfig builds client config for testing. +func BuildClientConfig(kubeConfigPath string, clusterName string) (*rest.Config, error) { + overrides := clientcmd.ConfigOverrides{} + // Override the cluster name if provided. + if clusterName != "" { + overrides.Context.Cluster = clusterName + } + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath}, + &overrides).ClientConfig() +} diff --git a/test/vendor/knative.dev/serving/test/config/100-namespace.yaml b/test/vendor/knative.dev/serving/test/config/100-namespace.yaml new file mode 100644 index 0000000000..d763ceee88 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/config/100-namespace.yaml @@ -0,0 +1,23 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: serving-tests +--- +apiVersion: v1 +kind: Namespace +metadata: + name: serving-tests-alt diff --git a/test/vendor/knative.dev/serving/test/config/300-configmap.yaml b/test/vendor/knative.dev/serving/test/config/300-configmap.yaml new file mode 100644 index 0000000000..0fb9f5a220 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/config/300-configmap.yaml @@ -0,0 +1,21 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: conformance-test-configmap + namespace: serving-tests +data: + testKey: testValue diff --git a/test/vendor/knative.dev/serving/test/config/300-secret.yaml b/test/vendor/knative.dev/serving/test/config/300-secret.yaml new file mode 100644 index 0000000000..7ada4c46cd --- /dev/null +++ b/test/vendor/knative.dev/serving/test/config/300-secret.yaml @@ -0,0 +1,22 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: conformance-test-secret + namespace: serving-tests +type: Opaque +data: + testKey: dGVzdFZhbHVl diff --git a/test/vendor/knative.dev/serving/test/config/config-logging.yaml b/test/vendor/knative.dev/serving/test/config/config-logging.yaml new file mode 100644 index 0000000000..12562d939d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/config/config-logging.yaml @@ -0,0 +1,103 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: knative-serving + labels: + serving.knative.dev/release: devel + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # Common configuration for all Knative codebase + zap-logger-config: | + { + "level": "info", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + + # Log level overrides + # For all components except the autoscaler and queue proxy, + # changes are be picked up immediately. + # For autoscaler and queue proxy, changes require recreation of the pods. + loglevel.controller: "info" + loglevel.autoscaler: "info" + loglevel.queueproxy: "info" + loglevel.webhook: "info" + loglevel.activator: "info" + + zap-logger-config: | + { + "level": "debug", + "development": false, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + + # Log level overrides + # For all components except the autoscaler and queue proxy, + # changes are be picked up immediately. + # For autoscaler and queue proxy, changes require recreation of the pods. + loglevel.controller: "debug" + loglevel.autoscaler: "debug" + loglevel.queueproxy: "debug" + loglevel.webhook: "debug" + loglevel.activator: "debug" diff --git a/test/vendor/knative.dev/serving/test/config/mtls/destinationrule.yaml b/test/vendor/knative.dev/serving/test/config/mtls/destinationrule.yaml new file mode 100644 index 0000000000..736f6a6a02 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/config/mtls/destinationrule.yaml @@ -0,0 +1,35 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "mtls-services" + namespace: "serving-tests" +spec: + host: "*.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL +--- +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "mtls-services" + namespace: "serving-tests-alt" +spec: + host: "*.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL diff --git a/test/vendor/knative.dev/serving/test/config/mtls/policy.yaml b/test/vendor/knative.dev/serving/test/config/mtls/policy.yaml new file mode 100644 index 0000000000..3766d690ba --- /dev/null +++ b/test/vendor/knative.dev/serving/test/config/mtls/policy.yaml @@ -0,0 +1,33 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: "authentication.istio.io/v1alpha1" +kind: "Policy" +metadata: + name: "default" + namespace: "serving-tests" +spec: + peers: + - mtls: + mode: STRICT +--- +apiVersion: "authentication.istio.io/v1alpha1" +kind: "Policy" +metadata: + name: "default" + namespace: "knative-serving" +spec: + peers: + - mtls: + mode: PERMISSIVE diff --git a/test/vendor/knative.dev/serving/test/conformance.go b/test/vendor/knative.dev/serving/test/conformance.go new file mode 100644 index 0000000000..4ede42a2db --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + pkgTest "knative.dev/pkg/test" + + // Mysteriously required to support GCP auth (required by k8s libs). Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +// Constants for test images located in test/test_images. +const ( + // Test image names + Autoscale = "autoscale" + Failing = "failing" + HelloVolume = "hellovolume" + HelloWorld = "helloworld" + HTTPProxy = "httpproxy" + InvalidHelloWorld = "invalidhelloworld" // Not a real image + PizzaPlanet1 = "pizzaplanetv1" + PizzaPlanet2 = "pizzaplanetv2" + Protocols = "protocols" + Runtime = "runtime" + SingleThreadedImage = "singlethreaded" + Timeout = "timeout" + WorkingDir = "workingdir" + + // Constants for test image output. + PizzaPlanetText1 = "What a spaceport!" + PizzaPlanetText2 = "Re-energize yourself with a slice of pepperoni!" + HelloWorldText = "Hello World! How about some tasty noodles?" + + ConcurrentRequests = 50 + // We expect to see 100% of requests succeed for traffic sent directly to revisions. + // This might be a bad assumption. + MinDirectPercentage = 1 + // We expect to see at least 25% of either response since we're routing 50/50. + // This might be a bad assumption. + MinSplitPercentage = 0.25 +) + +// Setup creates client to run Knative Service requests +func Setup(t pkgTest.TLegacy) *Clients { + t.Helper() + clients, err := NewClients(pkgTest.Flags.Kubeconfig, pkgTest.Flags.Cluster, ServingNamespace) + if err != nil { + t.Fatal("Couldn't initialize clients", "error", err.Error()) + } + return clients +} diff --git a/test/vendor/knative.dev/serving/test/conformance/README.md b/test/vendor/knative.dev/serving/test/conformance/README.md new file mode 100644 index 0000000000..62a07cfdd7 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/README.md @@ -0,0 +1,34 @@ +# Conformance tests + +- [Running conformance tests](../README.md#running-conformance-tests) + +## Adding conformance tests + +Knative Serving conformance tests +[can be run against any implementation of the Knative Serving API](#requirements) +to ensure the API has been implemented consistently. Passing these tests +indicates that apps and functions deployed to this implementation could be +ported to other implementations as well. + +_The precedent for these tests is +[the k8s conformance tests](https://github.com/cncf/k8s-conformance)._ + +These tests use [the test library](../adding_tests.md#test-library). + +### Requirements + +The conformance tests should **ONLY** cover functionality that applies to any +implementation of the API. + +The conformance tests **MUST**: + +1. Provide frequent output describing what actions they are undertaking, + especially before performing long running operations. Please see the + [Log section](../adding_tests.md#output-log) for detailed instructions. +2. Follow Golang best practices. +3. Not require any specific file system permissions to run or require any + additional binaries to be installed in the target environment before the + tests run. +4. Not depend on any k8s resources outside of those added by Knative Serving OR + they should provide flags that allow the test to run without access to those + resources. diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/blue_green_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/blue_green_test.go new file mode 100644 index 0000000000..082e5696ed --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/blue_green_test.go @@ -0,0 +1,164 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "math" + "net/url" + "testing" + + "golang.org/x/sync/errgroup" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + rtesting "knative.dev/serving/pkg/testing/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" +) + +const ( + // This test uses the two pizza planet test images for the blue and green deployment. + expectedBlue = test.PizzaPlanetText1 + expectedGreen = test.PizzaPlanetText2 +) + +// TestBlueGreenRoute verifies that a route configured with a 50/50 traffic split +// between two revisions will (approximately) route traffic evenly between them. +// Also, traffic that targets revisions *directly* will be routed to the correct +// revision 100% of the time. +func TestBlueGreenRoute(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + imagePaths := []string{ + pkgTest.ImagePath(test.PizzaPlanet1), + pkgTest.ImagePath(test.PizzaPlanet2), + } + + // Set Service and Image for names to create the initial service + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Setup Initial Service + t.Log("Creating a new Service in runLatest") + objects, err := v1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + // The first revision created is "blue" + blue := names + blue.TrafficTarget = "blue" + green := names + green.TrafficTarget = "green" + + t.Log("Updating the Service to use a different image") + service, err := v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(imagePaths[1])) + if err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, imagePaths[1], err) + } + objects.Service = service + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + green.Revision, err = v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision for image %s: %v", names.Service, imagePaths[1], err) + } + + t.Log("Updating RouteSpec") + if _, err := v1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: blue.TrafficTarget, + RevisionName: blue.Revision, + Percent: ptr.Int64(50), + }, { + Tag: green.TrafficTarget, + RevisionName: green.Revision, + Percent: ptr.Int64(50), + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + t.Log("Wait for the service domains to be ready") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err = clients.ServingClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + var blueURL, greenURL *url.URL + for _, tt := range service.Status.Traffic { + if tt.Tag == blue.TrafficTarget { + blueURL = tt.URL.URL() + } + if tt.Tag == green.TrafficTarget { + greenURL = tt.URL.URL() + } + } + if blueURL == nil || greenURL == nil { + t.Fatalf("Unable to fetch URLs from traffic targets: %#v", service.Status.Traffic) + } + tealURL := service.Status.URL.URL() + + // Istio network programming takes some time to be effective. Currently Istio + // does not expose a Status, so we rely on probes to know when they are effective. + // Since we are updating the service the teal domain probe will succeed before our changes + // take effect so we probe the green domain. + t.Logf("Probing %s", greenURL) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + greenURL, + v1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", greenURL, err) + } + + // Send concurrentRequests to blueDomain, greenDomain, and tealDomain. + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinSplitPercentage)) + return checkDistribution(t, clients, tealURL, test.ConcurrentRequests, min, []string{expectedBlue, expectedGreen}) + }) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, blueURL, test.ConcurrentRequests, min, []string{expectedBlue}) + }) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, greenURL, test.ConcurrentRequests, min, []string{expectedGreen}) + }) + if err := g.Wait(); err != nil { + t.Fatalf("Error sending requests: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/configuration_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/configuration_test.go new file mode 100644 index 0000000000..f89eba1def --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/configuration_test.go @@ -0,0 +1,168 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" +) + +func TestUpdateConfigurationMetadata(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Config: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating new configuration %s", names.Config) + if _, err := v1test.CreateConfiguration(t, clients, names); err != nil { + t.Fatalf("Failed to create configuration %s", names.Config) + } + + // Wait for the configuration to actually be ready to not race in the updates below. + if err := v1test.WaitForConfigurationState(clients.ServingClient, names.Config, v1test.IsConfigurationReady, "ConfigurationIsReady"); err != nil { + t.Fatalf("Configuration %s did not become ready: %v", names.Config, err) + } + + cfg := fetchConfiguration(names.Config, clients, t) + names.Revision = cfg.Status.LatestReadyRevisionName + + t.Logf("Updating labels of Configuration %s", names.Config) + newLabels := map[string]string{ + "labelX": "abc", + "labelY": "def", + } + // Copy over new labels. + if cfg.Labels == nil { + cfg.Labels = newLabels + } else { + for k, v := range newLabels { + cfg.Labels[k] = v + } + } + cfg, err := clients.ServingClient.Configs.Update(cfg) + if err != nil { + t.Fatalf("Failed to update labels for Configuration %s: %v", names.Config, err) + } + + if err = waitForConfigurationLabelsUpdate(clients, names, cfg.Labels); err != nil { + t.Fatalf("The labels for Configuration %s were not updated: %v", names.Config, err) + } + + cfg = fetchConfiguration(names.Config, clients, t) + expected := names.Revision + actual := cfg.Status.LatestCreatedRevisionName + if expected != actual { + t.Errorf("Did not expect a new Revision after updating labels for Configuration %s - expected Revision: %s, actual Revision: %s", + names.Config, expected, actual) + } + + t.Logf("Validating labels were not propagated to Revision %s", names.Revision) + err = v1test.CheckRevisionState(clients.ServingClient, names.Revision, func(r *v1.Revision) (bool, error) { + // Labels we placed on Configuration should _not_ appear on Revision. + return checkNoKeysPresent(newLabels, r.Labels, t), nil + }) + if err != nil { + t.Errorf("The labels for Revision %s of Configuration %s should not have been updated: %v", names.Revision, names.Config, err) + } + + t.Logf("Updating annotations of Configuration %s", names.Config) + newAnnotations := map[string]string{ + "annotationA": "123", + "annotationB": "456", + } + if cfg.Annotations == nil { + cfg.Annotations = newAnnotations + } else { + // Copy over new annotations. + for k, v := range newAnnotations { + cfg.Annotations[k] = v + } + } + cfg, err = clients.ServingClient.Configs.Update(cfg) + if err != nil { + t.Fatalf("Failed to update annotations for Configuration %s: %v", names.Config, err) + } + + if err = waitForConfigurationAnnotationsUpdate(clients, names, cfg.Annotations); err != nil { + t.Fatalf("The annotations for Configuration %s were not updated: %v", names.Config, err) + } + + cfg = fetchConfiguration(names.Config, clients, t) + actual = cfg.Status.LatestCreatedRevisionName + if expected != actual { + t.Errorf("Did not expect a new Revision after updating annotations for Configuration %s - expected Revision: %s, actual Revision: %s", + names.Config, expected, actual) + } + + t.Logf("Validating annotations were not propagated to Revision %s", names.Revision) + err = v1test.CheckRevisionState(clients.ServingClient, names.Revision, func(r *v1.Revision) (bool, error) { + // Annotations we placed on Configuration should _not_ appear on Revision. + return checkNoKeysPresent(newAnnotations, r.Annotations, t), nil + }) + if err != nil { + t.Errorf("The annotations for Revision %s of Configuration %s should not have been updated: %v", names.Revision, names.Config, err) + } +} + +func fetchConfiguration(name string, clients *test.Clients, t *testing.T) *v1.Configuration { + cfg, err := clients.ServingClient.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get configuration %s: %v", name, err) + } + return cfg +} + +func waitForConfigurationLabelsUpdate(clients *test.Clients, names test.ResourceNames, labels map[string]string) error { + return v1test.WaitForConfigurationState(clients.ServingClient, names.Config, func(c *v1.Configuration) (bool, error) { + return reflect.DeepEqual(c.Labels, labels) && c.Generation == c.Status.ObservedGeneration, nil + }, "ConfigurationMetadataUpdatedWithLabels") +} + +func waitForConfigurationAnnotationsUpdate(clients *test.Clients, names test.ResourceNames, annotations map[string]string) error { + return v1test.WaitForConfigurationState(clients.ServingClient, names.Config, func(c *v1.Configuration) (bool, error) { + return reflect.DeepEqual(c.Annotations, annotations) && c.Generation == c.Status.ObservedGeneration, nil + }, "ConfigurationMetadataUpdatedWithAnnotations") +} + +// checkNoKeysPresent returns true if _no_ keys from `expected`, are present in `actual`. +// checkNoKeysPresent will log the offending keys to t.Log. +func checkNoKeysPresent(expected map[string]string, actual map[string]string, t *testing.T) bool { + t.Helper() + present := []string{} + for k := range expected { + if _, ok := actual[k]; ok { + present = append(present, k) + } + } + if len(present) != 0 { + t.Logf("Unexpected keys: %v", present) + } + return len(present) == 0 +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/errorcondition_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/errorcondition_test.go new file mode 100644 index 0000000000..a030c44428 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/errorcondition_test.go @@ -0,0 +1,231 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ptest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + rtesting "knative.dev/serving/pkg/testing/v1" +) + +const ( + containerMissing = "ContainerMissing" +) + +// TestContainerErrorMsg is to validate the error condition defined at +// https://github.com/knative/serving/blob/master/docs/spec/errors.md +// for the container image missing scenario. +func TestContainerErrorMsg(t *testing.T) { + t.Parallel() + if strings.HasSuffix(strings.Split(ptest.Flags.DockerRepo, "/")[0], ".local") { + t.Skip("Skipping for local docker repo") + } + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.InvalidHelloWorld, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Specify an invalid image path + // A valid DockerRepo is still needed, otherwise will get UNAUTHORIZED instead of container missing error + t.Logf("Creating a new Service %s", names.Service) + svc, err := createService(t, clients, names, 2) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + + names.Config = serviceresourcenames.Configuration(svc) + names.Route = serviceresourcenames.Route(svc) + + manifestUnknown := string(transport.ManifestUnknownErrorCode) + t.Log("When the imagepath is invalid, the Configuration should have error status.") + + // Wait for ServiceState becomes NotReady. It also waits for the creation of Configuration. + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceNotReady, "ServiceIsNotReady"); err != nil { + t.Fatalf("The Service %s was unexpected state: %v", names.Service, err) + } + + // Checking for "Container image not present in repository" scenario defined in error condition spec + err = v1test.WaitForConfigurationState(clients.ServingClient, names.Config, func(r *v1.Configuration) (bool, error) { + cond := r.Status.GetCondition(v1.ConfigurationConditionReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, manifestUnknown) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("The configuration %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, containerMissing, manifestUnknown, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ContainerImageNotPresent") + + if err != nil { + t.Fatalf("Failed to validate configuration state: %s", err) + } + + revisionName, err := getRevisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the imagepath is invalid, the revision should have error status.") + err = v1test.WaitForRevisionState(clients.ServingClient, revisionName, func(r *v1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1.RevisionConditionReady) + if cond != nil { + if cond.Reason == containerMissing && strings.Contains(cond.Message, manifestUnknown) { + return true, nil + } + return true, fmt.Errorf("The revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, containerMissing, manifestUnknown, cond.Reason, cond.Message) + } + return false, nil + }, "ImagePathInvalid") + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } + + t.Log("Checking to ensure Route is in desired state") + err = v1test.CheckRouteState(clients.ServingClient, names.Route, v1test.IsRouteNotReady) + if err != nil { + t.Fatalf("the Route %s was not desired state: %v", names.Route, err) + } +} + +// TestContainerExitingMsg is to validate the error condition defined at +// https://github.com/knative/serving/blob/master/docs/spec/errors.md +// for the container crashing scenario. +func TestContainerExitingMsg(t *testing.T) { + t.Parallel() + const ( + // The given image will always exit with an exit code of 5 + exitCodeReason = "ExitCode5" + // ... and will print "Crashed..." before it exits + errorLog = "Crashed..." + ) + + tests := []struct { + Name string + ReadinessProbe *corev1.Probe + }{{ + Name: "http", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + }, { + Name: "tcp", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + }, + }} + + for _, tt := range tests { + tt := tt + t.Run(tt.Name, func(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Config: test.ObjectNameForTest(t), + Image: test.Failing, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Configuration %s", names.Config) + + if _, err := v1test.CreateConfiguration(t, clients, names, rtesting.WithConfigReadinessProbe(tt.ReadinessProbe)); err != nil { + t.Fatalf("Failed to create configuration %s: %v", names.Config, err) + } + + t.Log("When the containers keep crashing, the Configuration should have error status.") + + err := v1test.WaitForConfigurationState(clients.ServingClient, names.Config, func(r *v1.Configuration) (bool, error) { + cond := r.Status.GetCondition(v1.ConfigurationConditionReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, errorLog) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("The configuration %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, containerMissing, errorLog, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ConfigContainersCrashing") + + if err != nil { + t.Fatalf("Failed to validate configuration state: %s", err) + } + + revisionName, err := getRevisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the containers keep crashing, the revision should have error status.") + err = v1test.WaitForRevisionState(clients.ServingClient, revisionName, func(r *v1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1.RevisionConditionReady) + if cond != nil { + if cond.Reason == exitCodeReason && strings.Contains(cond.Message, errorLog) { + return true, nil + } + return true, fmt.Errorf("The revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, exitCodeReason, errorLog, cond.Reason, cond.Message) + } + return false, nil + }, "RevisionContainersCrashing") + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } + }) + } +} + +// Get revision name from configuration. +func getRevisionFromConfiguration(clients *test.Clients, configName string) (string, error) { + config, err := clients.ServingClient.Configs.Get(configName, metav1.GetOptions{}) + if err != nil { + return "", err + } + if config.Status.LatestCreatedRevisionName != "" { + return config.Status.LatestCreatedRevisionName, nil + } + return "", fmt.Errorf("No valid revision name found in configuration %s", configName) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/generatename_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/generatename_test.go new file mode 100644 index 0000000000..6971e4b5cf --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/generatename_test.go @@ -0,0 +1,204 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "net/url" + "regexp" + "testing" + + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + rtesting "knative.dev/serving/pkg/testing/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" +) + +func setServiceGenerateName(generateName string) rtesting.ServiceOption { + return func(service *v1.Service) { + service.ObjectMeta.GenerateName = generateName + } +} + +func setConfigurationGenerateName(generateName string) rtesting.ConfigOption { + return func(config *v1.Configuration) { + config.ObjectMeta.GenerateName = generateName + } +} + +func setRouteGenerateName(generateName string) rtesting.RouteOption { + return func(route *v1.Route) { + route.ObjectMeta.GenerateName = generateName + } +} + +// generateNamePrefix returns the object name to be used for testing, shorted to +// 44 characters to avoid #3236, as generateNames longer than 44 characters may cause +// some knative resources to never become ready. +func generateNamePrefix(t *testing.T) string { + generateName := test.ObjectNameForTest(t) + "-" + + generateNameLength := len(generateName) + if generateNameLength > 44 { + generateNameLength = 44 + } + return generateName[0:generateNameLength] +} + +// validateName checks that a name generated using a generateName is valid. It checks +// 1. The generateName is a prefix of the name, but they are not equal +// 2. Any number of valid name characters (alphanumeric, -, and .) are added togenerateName to +// create the value of name. +func validateName(generateName, name string) error { + r := regexp.MustCompile("^" + regexp.QuoteMeta(generateName) + "[a-zA-Z0-9\\-.]+$") + + if !r.MatchString(name) { + return fmt.Errorf("generated name = %q, want to match %q", name, r.String()) + } + return nil +} + +func canServeRequests(t *testing.T, clients *test.Clients, route *v1.Route) error { + t.Logf("Route %s has a domain set in its status", route.Name) + var url *url.URL + err := v1test.WaitForRouteState( + clients.ServingClient, + route.Name, + func(r *v1.Route) (bool, error) { + url = r.Status.URL.URL() + return url != nil, nil + }, + "RouteDomain", + ) + if err != nil { + return fmt.Errorf("route did not get assigned an URL: %w", err) + } + + t.Logf("Route %s can serve the expected data at %s", route.Name, url) + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", route.Name, url, test.HelloWorldText, err) + } + + return nil +} + +// TestServiceGenerateName checks that knative Services MAY request names generated by +// the system using metadata.generateName. It ensures that knative Services created this way can become ready +// and serve requests. +func TestServiceGenerateName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + generateName := generateNamePrefix(t) + names := test.ResourceNames{ + Image: test.HelloWorld, + } + + // Cleanup on test failure. + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer func() { test.TearDown(clients, names) }() + + // Create the service using the generate name field. If the service does not become ready this will fail. + t.Logf("Creating new service with generateName %s", generateName) + resources, err := v1test.CreateServiceReady(t, clients, &names, setServiceGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create service with generateName %s: %v", generateName, err) + } + + // Ensure that the name given to the service is generated from the generateName field. + t.Log("When the service is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Service); err != nil { + t.Errorf("Illegal name generated for service %s: %v", names.Service, err) + } + + // Ensure that the service can serve requests + err = canServeRequests(t, clients, resources.Route) + if err != nil { + t.Errorf("Service %s could not serve requests: %v", names.Service, err) + } +} + +// TestRouteAndConfiguration checks that both routes and configurations MAY request names generated by +// the system using metadata.generateName. It ensures that routes and configurations created this way both: +// 1. Become ready +// 2. Can serve requests. +func TestRouteAndConfigGenerateName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + generateName := generateNamePrefix(t) + names := test.ResourceNames{ + Image: test.HelloWorld, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer func() { test.TearDown(clients, names) }() + + t.Logf("Creating new configuration with generateName %s", generateName) + config, err := v1test.CreateConfiguration(t, clients, names, setConfigurationGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create configuration with generateName %s: %v", generateName, err) + } + names.Config = config.Name + + // Ensure the associated revision is created. This also checks that the configuration becomes ready. + t.Log("The configuration will be updated with the name of the associated Revision once it is created.") + names.Revision, err = v1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the new revision: %v", names.Config, err) + } + + // Ensure that the name given to the configuration is generated from the generate name field. + t.Log("When the configuration is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Config); err != nil { + t.Errorf("Illegal name generated for configuration %s: %v", names.Config, err) + } + + // Create a route that maps to the revision created by the configuration above + t.Logf("Create new Route with generateName %s", generateName) + route, err := v1test.CreateRoute(t, clients, names, setRouteGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create route with generateName %s: %v", generateName, err) + } + names.Route = route.Name + + t.Log("When the route is created, it will become ready") + if err := v1test.WaitForRouteState(clients.ServingClient, names.Route, v1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("Error waiting for the route %s to become ready: %v", names.Route, err) + } + + // Ensure that the name given to the route is generated from the generate name field + t.Log("When the route is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Route); err != nil { + t.Errorf("Illegal name generated for route %s: %v", names.Route, err) + } + + // Ensure that the generated route endpoint can serve requests + if err := canServeRequests(t, clients, route); err != nil { + t.Errorf("Configuration %s with Route %s could not serve requests: %v", names.Config, names.Route, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/main_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/main_test.go new file mode 100644 index 0000000000..c4604c647a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/main_test.go @@ -0,0 +1,33 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "flag" + "os" + "testing" + + pkgTest "knative.dev/pkg/test" +) + +func TestMain(m *testing.M) { + flag.Parse() + pkgTest.SetupLoggingFlags() + os.Exit(m.Run()) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/migration_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/migration_test.go new file mode 100644 index 0000000000..ffa3f3b270 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/migration_test.go @@ -0,0 +1,117 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "testing" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "knative.dev/pkg/test/logstream" + v1a1test "knative.dev/serving/test/v1alpha1" + + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" +) + +func TestTranslation(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + // Create a legacy RunLatest service. This should perform conversion during the webhook + // and return back a converted service resource. + service, err := v1a1test.CreateLatestServiceLegacy(t, clients, names) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + // Access the service over the v1 endpoint. + v1b1, err := clients.ServingClient.Services.Get(service.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get v1.Service: %v: %v", names.Service, err) + } + + // Access the service over the v1 endpoint. + v1, err := clients.ServingClient.Services.Get(service.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get v1.Service: %v: %v", names.Service, err) + } + + // Check that all PodSpecs match + if !equality.Semantic.DeepEqual(v1b1.Spec.Template.Spec.PodSpec, service.Spec.Template.Spec.PodSpec) { + t.Fatalf("Failed to parse unstructured as v1.Service: %v: %v", names.Service, err) + } + if !equality.Semantic.DeepEqual(v1.Spec.Template.Spec.PodSpec, service.Spec.Template.Spec.PodSpec) { + t.Fatalf("Failed to parse unstructured as v1.Service: %v: %v", names.Service, err) + } +} + +func TestV1beta1Rejection(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + // Create a legacy RunLatest service, but give it the TypeMeta of v1. + service := v1a1test.LatestServiceLegacy(names) + service.APIVersion = v1.SchemeGroupVersion.String() + service.Kind = "Service" + + // Turn it into an unstructured resource for sending through the dynamic client. + b, err := json.Marshal(service) + if err != nil { + t.Fatalf("Failed to marshal v1alpha1.Service: %v: %v", names.Service, err) + } + u := &unstructured.Unstructured{} + if err := json.Unmarshal(b, u); err != nil { + t.Fatalf("Failed to unmarshal as unstructured: %v: %v", names.Service, err) + } + + // Try to create the "run latest" service through v1. + gvr := v1.SchemeGroupVersion.WithResource("services") + svc, err := clients.Dynamic.Resource(gvr).Namespace(service.Namespace). + Create(u, metav1.CreateOptions{}) + if err == nil { + t.Fatalf("Unexpected success creating %#v", svc) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/resources_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/resources_test.go new file mode 100644 index 0000000000..5631b26982 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/resources_test.go @@ -0,0 +1,117 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + rtesting "knative.dev/serving/pkg/testing/v1" +) + +func TestCustomResourcesLimits(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + t.Log("Creating a new Route and Configuration") + withResources := rtesting.WithResourceRequirements(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("350Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("350Mi"), + }, + }) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Autoscale, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + objects, err := v1test.CreateServiceReady(t, clients, &names, withResources) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + endpoint := objects.Route.Status.URL.URL() + + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + endpoint, + v1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK)), + "ResourceTestServesText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error probing %s: %v", endpoint, err) + } + + sendPostRequest := func(resolvableDomain bool, url *url.URL) (*spoof.Response, error) { + t.Logf("Request %s", url) + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), resolvableDomain) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url.String(), nil) + if err != nil { + return nil, err + } + return client.Do(req) + } + + pokeCowForMB := func(mb int) error { + u, _ := url.Parse(endpoint.String()) + q := u.Query() + q.Set("bloat", fmt.Sprintf("%d", mb)) + u.RawQuery = q.Encode() + response, err := sendPostRequest(test.ServingFlags.ResolvableDomain, u) + if err != nil { + return err + } + if response.StatusCode != http.StatusOK { + return fmt.Errorf("StatusCode = %d, want %d", response.StatusCode, http.StatusOK) + } + return nil + } + + t.Log("Querying the application to see if the memory limits are enforced.") + if err := pokeCowForMB(100); err != nil { + t.Fatalf("Didn't get a response from bloating cow with %d MBs of Memory: %v", 100, err) + } + + if err := pokeCowForMB(200); err != nil { + t.Fatalf("Didn't get a response from bloating cow with %d MBs of Memory: %v", 200, err) + } + + if err := pokeCowForMB(500); err == nil { + t.Fatalf("We shouldn't have got a response from bloating cow with %d MBs of Memory: %v", 500, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/revision_timeout_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/revision_timeout_test.go new file mode 100644 index 0000000000..1570c8a7a8 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/revision_timeout_test.go @@ -0,0 +1,241 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "github.com/mattbaird/jsonpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + . "knative.dev/serving/pkg/testing/v1" +) + +// createService creates a service in namespace with the name names.Service +// that uses the image specified by names.Image +func createService(t *testing.T, clients *test.Clients, names test.ResourceNames, revisionTimeoutSeconds int64) (*v1.Service, error) { + service := v1test.Service(names, WithRevisionTimeoutSeconds(revisionTimeoutSeconds)) + v1test.LogResourceObject(t, v1test.ResourceObjects{Service: service}) + svc, err := clients.ServingClient.Services.Create(service) + return svc, err +} + +func updateServiceWithTimeout(clients *test.Clients, names test.ResourceNames, revisionTimeoutSeconds int) error { + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/spec/template/spec/timeoutSeconds", + Value: revisionTimeoutSeconds, + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + return err + } + _, err = clients.ServingClient.Services.Patch(names.Service, types.JSONPatchType, patchBytes, "") + if err != nil { + return err + } + return nil +} + +// sendRequests send a request to "endpoint", returns error if unexpected response code, nil otherwise. +func sendRequest(t *testing.T, clients *test.Clients, endpoint *url.URL, initialSleepSeconds int, sleepSeconds int, expectedResponseCode int) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, endpoint.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Logf("Spoofing client failed: %v", err) + return err + } + + initialSleepMs := initialSleepSeconds * 1000 + sleepMs := sleepSeconds * 1000 + + start := time.Now().UnixNano() + defer func() { + end := time.Now().UnixNano() + t.Logf("URL: %v, initialSleep: %v, sleep: %v, request elapsed %.2f ms", endpoint, initialSleepMs, sleepMs, float64(end-start)/1e6) + }() + u, _ := url.Parse(endpoint.String()) + q := u.Query() + q.Set("initialTimeout", fmt.Sprintf("%d", initialSleepMs)) + q.Set("timeout", fmt.Sprintf("%d", sleepMs)) + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + t.Logf("Failed new request: %v", err) + return err + } + + resp, err := client.Do(req) + if err != nil { + t.Logf("Failed request err: %v", err) + return err + } + + t.Logf("Response status code: %v, expected: %v", resp.StatusCode, expectedResponseCode) + if expectedResponseCode != resp.StatusCode { + return fmt.Errorf("got response status code %v, wanted %v", resp.StatusCode, expectedResponseCode) + } + return nil +} + +func TestRevisionTimeout(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + var rev2s, rev5s test.ResourceNames + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Timeout, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service ") + svc, err := createService(t, clients, names, 2) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + t.Log("The Service will be updated with the name of the Revision once it is created") + revisionName, err := v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the new revision: %v", names.Service, err) + } + rev2s.Revision = revisionName + + t.Log("When the Service reports as Ready, everything should be ready") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic to Revision %s: %v", names.Service, names.Revision, err) + } + + t.Log("Updating the Service to use a different revision timeout") + err = updateServiceWithTimeout(clients, names, 5) + if err != nil { + t.Fatalf("Patch update for Service %s with new timeout 5s failed: %v", names.Service, err) + } + + // getNextRevisionName waits for names.Revision to change, so we set it to the rev2s revision and wait for the (new) rev5s revision. + names.Revision = rev2s.Revision + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + rev5s.Revision, err = v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision with timeout 5s: %v", names.Service, err) + } + + t.Logf("Waiting for revision %q to be ready", rev2s.Revision) + if err := v1test.WaitForRevisionState(clients.ServingClient, rev2s.Revision, v1test.IsRevisionReady, "RevisionIsReady"); err != nil { + t.Fatalf("The Revision %q still can't serve traffic: %v", rev2s.Revision, err) + } + t.Logf("Waiting for revision %q to be ready", rev5s.Revision) + if err := v1test.WaitForRevisionState(clients.ServingClient, rev5s.Revision, v1test.IsRevisionReady, "RevisionIsReady"); err != nil { + t.Fatalf("The Revision %q still can't serve traffic: %v", rev5s.Revision, err) + } + + // Set names for traffic targets to make them directly routable. + rev2s.TrafficTarget = "rev2s" + rev5s.TrafficTarget = "rev5s" + + t.Log("Updating RouteSpec") + if _, err := v1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: rev2s.TrafficTarget, + RevisionName: rev2s.Revision, + Percent: ptr.Int64(50), + }, { + Tag: rev5s.TrafficTarget, + RevisionName: rev5s.Revision, + Percent: ptr.Int64(50), + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + t.Log("Wait for the service domains to be ready") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err := clients.ServingClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + var rev2sURL, rev5sURL *url.URL + for _, tt := range service.Status.Traffic { + if tt.Tag == rev2s.TrafficTarget { + rev2sURL = tt.URL.URL() + } + if tt.Tag == rev5s.TrafficTarget { + rev5sURL = tt.URL.URL() + } + } + if rev2sURL == nil || rev5sURL == nil { + t.Fatalf("Unable to fetch URLs from traffic targets: %#v", service.Status.Traffic) + } + + t.Logf("Probing %s", rev5sURL) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + rev5sURL, + v1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", rev5sURL, err) + } + + // Quick sanity check + if err := sendRequest(t, clients, rev2sURL, 0, 0, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 0s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 0, 0, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 0s with revision timeout 5s: %v", err) + } + + // Fail by surpassing the initial timeout. + if err := sendRequest(t, clients, rev2sURL, 5, 0, http.StatusServiceUnavailable); err != nil { + t.Errorf("Did not fail request with sleep 5s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 7, 0, http.StatusServiceUnavailable); err != nil { + t.Errorf("Did not fail request with sleep 7s with revision timeout 5s: %v", err) + } + + // Not fail by not surpassing in the initial timeout, but in the overall request duration. + if err := sendRequest(t, clients, rev2sURL, 1, 3, http.StatusOK); err != nil { + t.Errorf("Did not fail request with sleep 1s/3s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 3, 3, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 3s/3s with revision timeout 5s: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/route_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/route_test.go new file mode 100644 index 0000000000..d138348338 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/route_test.go @@ -0,0 +1,157 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "net/url" + "testing" + + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + rtesting "knative.dev/serving/pkg/testing/v1" +) + +func assertResourcesUpdatedWhenRevisionIsReady(t *testing.T, clients *test.Clients, names test.ResourceNames, url *url.URL, expectedGeneration, expectedText string) { + t.Log("When the Route reports as Ready, everything should be ready.") + if err := v1test.WaitForRouteState(clients.ServingClient, names.Route, v1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("The Route %s was not marked as Ready to serve traffic to Revision %s: %v", names.Route, names.Revision, err) + } + + // TODO(#1178): Remove "Wait" from all checks below this point. + t.Log("Serves the expected data at the endpoint") + + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve the expected text %q: %v", names.Route, url, expectedText, err) + } + + // We want to verify that the endpoint works as soon as Ready: True, but there are a bunch of other pieces of state that we validate for conformance. + t.Log("The Revision will be marked as Ready when it can serve traffic") + err = v1test.CheckRevisionState(clients.ServingClient, names.Revision, v1test.IsRevisionReady) + if err != nil { + t.Fatalf("Revision %s did not become ready to serve traffic: %v", names.Revision, err) + } + t.Log("The Revision will be annotated with the generation") + err = v1test.CheckRevisionState(clients.ServingClient, names.Revision, v1test.IsRevisionAtExpectedGeneration(expectedGeneration)) + if err != nil { + t.Fatalf("Revision %s did not have an expected annotation with generation %s: %v", names.Revision, expectedGeneration, err) + } + t.Log("Updates the Configuration that the Revision is ready") + err = v1test.CheckConfigurationState(clients.ServingClient, names.Config, func(c *v1.Configuration) (bool, error) { + return c.Status.LatestReadyRevisionName == names.Revision, nil + }) + if err != nil { + t.Fatalf("The Configuration %s was not updated indicating that the Revision %s was ready: %v", names.Config, names.Revision, err) + } + t.Log("Updates the Route to route traffic to the Revision") + err = v1test.CheckRouteState(clients.ServingClient, names.Route, v1test.AllRouteTrafficAtRevision(names)) + if err != nil { + t.Fatalf("The Route %s was not updated to route traffic to the Revision %s: %v", names.Route, names.Revision, err) + } +} + +func getRouteURL(clients *test.Clients, names test.ResourceNames) (*url.URL, error) { + var url *url.URL + + err := v1test.WaitForRouteState( + clients.ServingClient, + names.Route, + func(r *v1.Route) (bool, error) { + if r.Status.URL == nil { + return false, nil + } + url = r.Status.URL.URL() + return url != nil, nil + }, + "RouteURL", + ) + + return url, err +} + +func TestRouteCreation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + var objects v1test.ResourceObjects + svcName := test.ObjectNameForTest(t) + names := test.ResourceNames{ + Config: svcName, + Route: svcName, + TrafficTarget: svcName, + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Route and Configuration") + config, err := v1test.CreateConfiguration(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Configuration: %v", err) + } + objects.Config = config + + route, err := v1test.CreateRoute(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Route: %v", err) + } + objects.Route = route + + t.Log("The Configuration will be updated with the name of the Revision") + names.Revision, err = v1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the new revision: %v", names.Config, err) + } + + url, err := getRouteURL(clients, names) + if err != nil { + t.Fatalf("Failed to get URL from route %s: %v", names.Route, err) + } + + t.Logf("The Route URL is: %s", url) + assertResourcesUpdatedWhenRevisionIsReady(t, clients, names, url, "1", test.PizzaPlanetText1) + + // We start a prober at background thread to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, url) + defer test.AssertProberDefault(t, prober) + + t.Log("Updating the Configuration to use a different image") + objects.Config, err = v1test.PatchConfig(t, clients, objects.Config, rtesting.WithConfigImage(pkgTest.ImagePath(test.PizzaPlanet2))) + if err != nil { + t.Fatalf("Patch update for Configuration %s with new image %s failed: %v", names.Config, test.PizzaPlanet2, err) + } + + t.Log("Since the Configuration was updated a new Revision will be created and the Configuration will be updated") + names.Revision, err = v1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the Revision for image %s: %v", names.Config, test.PizzaPlanet2, err) + } + + assertResourcesUpdatedWhenRevisionIsReady(t, clients, names, url, "2", test.PizzaPlanetText2) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/service_account_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/service_account_test.go new file mode 100644 index 0000000000..1462b195bb --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/service_account_test.go @@ -0,0 +1,57 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "strings" + "testing" + + . "knative.dev/serving/pkg/testing/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" +) + +const ( + invalidServiceAccountName = "foo@bar.baz" +) + +func TestServiceAccountValidation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Service %s", names.Service) + service := v1test.Service(names, WithServiceAccountName(invalidServiceAccountName)) + v1test.LogResourceObject(t, v1test.ResourceObjects{Service: service}) + + _, err := clients.ServingClient.Services.Create(service) + if err == nil { + t.Fatal("Expected Service creation to fail") + } + if got, want := err.Error(), "serviceAccountName: spec.template.spec."+invalidServiceAccountName; !strings.Contains(got, want) { + t.Errorf("Error = %q, want to contain = %q", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/service_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/service_test.go new file mode 100644 index 0000000000..30dcf5ad0d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/service_test.go @@ -0,0 +1,586 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + rtesting "knative.dev/serving/pkg/testing/v1" +) + +// TestService tests both Creation and Update paths for a service. The test performs a series of Update/Validate steps to ensure that +// the service transitions as expected during each step. +// Currently the test performs the following updates: +// 1. Update Container Image +// 2. Update Metadata +// a. Update Labels +// b. Update Annotations +func TestService(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Setup initial Service + objects, err := v1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err = validateLabelsPropagation(t, *objects, names); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // We start a background prober to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, names.URL) + defer test.AssertProberDefault(t, prober) + + // Update Container Image + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image2)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image2, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + + // Validate State after Image Update + if err = validateControlPlane(t, clients, names, "2"); err != nil { + t.Error(err) + } + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil { + t.Error(err) + } + + // Update Metadata (Labels) + t.Logf("Updating labels of the RevisionTemplateSpec for service %s.", names.Service) + metadata := metav1.ObjectMeta{ + Labels: map[string]string{ + "labelX": "abc", + "labelY": "def", + }, + } + if objects.Service, err = v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceTemplateMeta(metadata)); err != nil { + t.Fatalf("Service %s was not updated with labels in its RevisionTemplateSpec: %v", names.Service, err) + } + + t.Log("Waiting for the new revision to appear as LatestRevision.") + if names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s after updating labels in its RevisionTemplateSpec: %v", names.Service, names.Revision, err) + } + + // Update Metadata (Annotations) + t.Logf("Updating annotations of RevisionTemplateSpec for service %s", names.Service) + metadata = metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotationA": "123", + "annotationB": "456", + }, + } + if objects.Service, err = v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceTemplateMeta(metadata)); err != nil { + t.Fatalf("Service %s was not updated with annotation in its RevisionTemplateSpec: %v", names.Service, err) + } + + t.Log("Waiting for the new revision to appear as LatestRevision.") + names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("The new revision has not become ready in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + + // Validate the Service shape. + if err = validateControlPlane(t, clients, names, "4"); err != nil { + t.Error(err) + } + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil { + t.Error(err) + } +} + +func waitForDesiredTrafficShape(t *testing.T, sName string, want map[string]v1.TrafficTarget, clients *test.Clients) error { + return v1test.WaitForServiceState( + clients.ServingClient, sName, func(s *v1.Service) (bool, error) { + // IsServiceReady never returns an error. + if ok, _ := v1test.IsServiceReady(s); !ok { + return false, nil + } + // Match the traffic shape. + got := map[string]v1.TrafficTarget{} + for _, tt := range s.Status.Traffic { + got[tt.Tag] = tt + } + ignoreURLs := cmpopts.IgnoreFields(v1.TrafficTarget{}, "URL") + if !cmp.Equal(got, want, ignoreURLs) { + t.Logf("For service %s traffic shape mismatch: (-got, +want) %s", + sName, cmp.Diff(got, want, ignoreURLs)) + return false, nil + } + return true, nil + }, "Verify Service Traffic Shape", + ) +} + +func TestServiceBYOName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + revName := names.Service + "-byoname" + + // Setup initial Service + objects, err := v1test.CreateServiceReady(t, clients, &names, func(svc *v1.Service) { + svc.Spec.Template.Name = revName + }) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + if got, want := names.Revision, revName; got != want { + t.Errorf("CreateServiceReady() = %s, wanted %s", got, want) + } + + // Validate State after Creation + + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err = validateLabelsPropagation(t, *objects, names); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // We start a background prober to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, names.URL) + defer test.AssertProberDefault(t, prober) + + // Update Container Image + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image2)); err == nil { + t.Fatalf("Patch update for Service %s didn't fail.", names.Service) + } +} + +// TestServiceWithTrafficSplit creates a Service with a variety of "release"-like traffic shapes. +// Currently tests for the following combinations: +// 1. One Revision Specified, current == latest +// 2. One Revision Specified, current != latest +// 3. Two Revisions Specified, 50% rollout, candidate == latest +// 4. Two Revisions Specified, 50% rollout, candidate != latest +// 5. Two Revisions Specified, 50% rollout, candidate != latest, candidate is configurationName. +func TestServiceWithTrafficSplit(t *testing.T) { + t.Parallel() + // Create Initial Service + clients := test.Setup(t) + releaseImagePath2 := pkgTest.ImagePath(test.PizzaPlanet2) + releaseImagePath3 := pkgTest.ImagePath(test.HelloWorld) + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Expected Text for different revisions. + const ( + expectedFirstRev = test.PizzaPlanetText1 + expectedSecondRev = test.PizzaPlanetText2 + expectedThirdRev = test.HelloWorldText + ) + + // Setup initial Service + objects, err := v1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + t.Log("Validating service shape.") + if err := validateReleaseServiceShape(objects); err != nil { + t.Fatalf("Release shape is incorrect: %v", err) + } + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + firstRevision := names.Revision + + // 1. One Revision Specified, current == latest. + t.Log("1. Updating Service to ReleaseType using lastCreatedRevision") + objects.Service, err = v1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(100), + }, { + Tag: "latest", + Percent: nil, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + desiredTrafficShape := map[string]v1.TrafficTarget{ + "current": { + Tag: "current", + RevisionName: objects.Config.Status.LatestReadyRevisionName, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + "latest": { + Tag: "latest", + RevisionName: objects.Config.Status.LatestReadyRevisionName, + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Service traffic should go to the first revision and be available on two names traffic targets: 'current' and 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev}, + []string{"latest", "current"}, + []string{expectedFirstRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 2. One Revision Specified, current != latest. + t.Log("2. Updating the Service Spec with a new image") + if objects.Service, err = v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(releaseImagePath2)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, releaseImagePath2, err) + } + + t.Log("Since the Service was updated a new Revision will be created") + if names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s: %v", names.Service, names.Revision, err) + } + secondRevision := names.Revision + + // Also verify traffic is in the correct shape. + desiredTrafficShape["latest"] = v1.TrafficTarget{ + Tag: "latest", + RevisionName: secondRevision, + LatestRevision: ptr.Bool(true), + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Since the Service is using release the Route will not be updated, but new revision will be available at 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev}, + []string{"latest", "current"}, + []string{expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 3. Two Revisions Specified, 50% rollout, candidate == latest. + t.Log("3. Updating Service to split traffic between two revisions using Release mode") + objects.Service, err = v1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + }, { + Tag: "candidate", + RevisionName: secondRevision, + Percent: ptr.Int64(50), + }, { + Tag: "latest", + Percent: nil, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + desiredTrafficShape = map[string]v1.TrafficTarget{ + "current": { + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + "candidate": { + Tag: "candidate", + RevisionName: secondRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + "latest": { + Tag: "latest", + RevisionName: secondRevision, + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Traffic should be split between the two revisions and available on three named traffic targets, 'current', 'candidate', and 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedSecondRev}, + []string{"candidate", "latest", "current"}, + []string{expectedSecondRev, expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 4. Two Revisions Specified, 50% rollout, candidate != latest. + t.Log("4. Updating the Service Spec with a new image") + if objects.Service, err = v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(releaseImagePath3)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, releaseImagePath3, err) + } + t.Log("Since the Service was updated a new Revision will be created") + if names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s: %v", names.Service, names.Revision, err) + } + thirdRevision := names.Revision + + desiredTrafficShape["latest"] = v1.TrafficTarget{ + Tag: "latest", + RevisionName: thirdRevision, + LatestRevision: ptr.Bool(true), + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Traffic should remain between the two images, and the new revision should be available on the named traffic target 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedSecondRev}, + []string{"latest", "candidate", "current"}, + []string{expectedThirdRev, expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // Now update the service to use `@latest` as candidate. + t.Log("5. Updating Service to split traffic between two `current` and `@latest`") + + objects.Service, err = v1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + }, { + Tag: "candidate", + Percent: ptr.Int64(50), + }, { + Tag: "latest", + Percent: nil, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + // Verify in the end it's still the case. + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // `candidate` now points to the latest. + desiredTrafficShape["candidate"] = v1.TrafficTarget{ + Tag: "candidate", + RevisionName: thirdRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedThirdRev}, + []string{"latest", "candidate", "current"}, + []string{expectedThirdRev, expectedThirdRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } +} + +func TestAnnotationPropagation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Setup initial Service + objects, err := v1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + + if objects.Service, err = v1test.PatchService(t, clients, objects.Service, + rtesting.WithServiceAnnotation("juicy", "jamba")); err != nil { + t.Fatalf("Service %s was not updated with new annotation: %v", names.Service, err) + } + + // Updating metadata does not trigger revision or generation + // change, so let's generate a change that we can watch. + t.Log("Updating the Service to use a different image.") + image2 := pkgTest.ImagePath(test.PizzaPlanet2) + if _, err := v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image2)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image2, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + objects, err = v1test.GetResourceObjects(clients, names) + if err != nil { + t.Errorf("Error getting objects: %v", err) + } + + // Now we can validate the annotations. + if err := validateAnnotations(objects, "juicy"); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + + if objects.Service, err = v1test.PatchService(t, clients, objects.Service, + rtesting.WithServiceAnnotationRemoved("juicy")); err != nil { + t.Fatalf("Service %s was not updated with annotation deleted: %v", names.Service, err) + } + + // Updating metadata does not trigger revision or generation + // change, so let's generate a change that we can watch. + t.Log("Updating the Service to use a different image.") + image3 := pkgTest.ImagePath(test.HelloWorld) + if _, err := v1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image3)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image3, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1test.WaitForServiceState(clients.ServingClient, names.Service, v1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + objects, err = v1test.GetResourceObjects(clients, names) + if err != nil { + t.Errorf("Error getting objects: %v", err) + } + + // Now we can validate the annotations. + if err := validateAnnotations(objects); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + if _, ok := objects.Config.Annotations["juicy"]; ok { + t.Error("Config still has `juicy` annotation") + } + if _, ok := objects.Route.Annotations["juicy"]; ok { + t.Error("Route still has `juicy` annotation") + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/single_threaded_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/single_threaded_test.go new file mode 100644 index 0000000000..6237364890 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/single_threaded_test.go @@ -0,0 +1,107 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + "time" + + "golang.org/x/sync/errgroup" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + rtesting "knative.dev/serving/pkg/testing/v1" +) + +func TestSingleConcurrency(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.SingleThreadedImage, + } + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + objects, err := v1test.CreateServiceReady(t, clients, &names, rtesting.WithContainerConcurrency(1)) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + url := objects.Service.Status.URL.URL() + + // Ready does not actually mean Ready for a Route just yet. + // See https://github.com/knative/serving/issues/1582 + t.Logf("Probing %s", url) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", url, err) + } + + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating spoofing client: %v", err) + } + + concurrency := 5 + duration := 20 * time.Second + t.Logf("Maintaining %d concurrent requests for %v.", concurrency, duration) + group, _ := errgroup.WithContext(context.Background()) + for i := 0; i < concurrency; i++ { + group.Go(func() error { + done := time.After(duration) + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return fmt.Errorf("error creating http request: %w", err) + } + + for { + select { + case <-done: + return nil + default: + res, err := client.Do(req) + if err != nil { + return fmt.Errorf("error making request %w", err) + } + if res.StatusCode == http.StatusInternalServerError { + return errors.New("detected concurrent requests") + } else if res.StatusCode != http.StatusOK { + return fmt.Errorf("non 200 response %v", res.StatusCode) + } + } + } + }) + } + t.Log("Waiting for all requests to complete.") + if err := group.Wait(); err != nil { + t.Fatalf("Error making requests for single threaded test: %v.", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/util.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/util.go new file mode 100644 index 0000000000..4fdb095df2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/util.go @@ -0,0 +1,339 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "math" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "golang.org/x/sync/errgroup" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" +) + +func waitForExpectedResponse(t pkgTest.TLegacy, clients *test.Clients, url *url.URL, expectedResponse string) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + return err + } + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return err + } + _, err = client.Poll(req, v1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedResponse)))) + return err +} + +func validateDomains(t pkgTest.TLegacy, clients *test.Clients, baseDomain *url.URL, + baseExpected, trafficTargets, targetsExpected []string) error { + var subdomains []*url.URL + for _, target := range trafficTargets { + subdomain, _ := url.Parse(baseDomain.String()) + subdomain.Host = target + "-" + baseDomain.Host + subdomains = append(subdomains, subdomain) + } + + g, _ := errgroup.WithContext(context.Background()) + // We don't have a good way to check if the route is updated so we will wait until a subdomain has + // started returning at least one expected result to key that we should validate percentage splits. + // In order for tests to succeed reliably, we need to make sure that all domains succeed. + for _, resp := range baseExpected { + // Check for each of the responses we expect from the base domain. + resp := resp + g.Go(func() error { + t.Logf("Waiting for route to update %s", baseDomain) + return waitForExpectedResponse(t, clients, baseDomain, resp) + }) + } + for i, s := range subdomains { + i, s := i, s + g.Go(func() error { + t.Logf("Waiting for route to update %s", s) + return waitForExpectedResponse(t, clients, s, targetsExpected[i]) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("error with initial domain probing: %w", err) + } + + g.Go(func() error { + minBasePercentage := test.MinSplitPercentage + if len(baseExpected) == 1 { + minBasePercentage = test.MinDirectPercentage + } + min := int(math.Floor(test.ConcurrentRequests * minBasePercentage)) + return checkDistribution(t, clients, baseDomain, test.ConcurrentRequests, min, baseExpected) + }) + for i, subdomain := range subdomains { + i, subdomain := i, subdomain + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, subdomain, test.ConcurrentRequests, min, []string{targetsExpected[i]}) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("error checking routing distribution: %w", err) + } + return nil +} + +// checkDistribution sends "num" requests to "domain", then validates that +// we see each body in "expectedResponses" at least "min" times. +func checkDistribution(t pkgTest.TLegacy, clients *test.Clients, url *url.URL, num, min int, expectedResponses []string) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + return err + } + + t.Logf("Performing %d concurrent requests to %s", num, url) + actualResponses, err := sendRequests(client, url, num) + if err != nil { + return err + } + + return checkResponses(t, num, min, url.Hostname(), expectedResponses, actualResponses) +} + +// checkResponses verifies that each "expectedResponse" is present in "actualResponses" at least "min" times. +func checkResponses(t pkgTest.TLegacy, num int, min int, domain string, expectedResponses []string, actualResponses []string) error { + // counts maps the expected response body to the number of matching requests we saw. + counts := make(map[string]int) + // badCounts maps the unexpected response body to the number of matching requests we saw. + badCounts := make(map[string]int) + + // counts := eval( + // SELECT body, count(*) AS total + // FROM $actualResponses + // WHERE body IN $expectedResponses + // GROUP BY body + // ) + for _, ar := range actualResponses { + expected := false + for _, er := range expectedResponses { + if strings.Contains(ar, er) { + counts[er]++ + expected = true + } + } + if !expected { + badCounts[ar]++ + } + } + + // Verify that we saw each entry in "expectedResponses" at least "min" times. + // check(SELECT body FROM $counts WHERE total < $min) + totalMatches := 0 + for _, er := range expectedResponses { + count := counts[er] + if count < min { + return fmt.Errorf("domain %s failed: want at least %d, got %d for response %q", domain, min, count, er) + } + + t.Logf("For domain %s: wanted at least %d, got %d requests.", domain, min, count) + totalMatches += count + } + // Verify that the total expected responses match the number of requests made. + for badResponse, count := range badCounts { + t.Logf("Saw unexpected response %q %d times.", badResponse, count) + } + if totalMatches < num { + return fmt.Errorf("domain %s: saw expected responses %d times, wanted %d", domain, totalMatches, num) + } + // If we made it here, the implementation conforms. Congratulations! + return nil +} + +// sendRequests sends "num" requests to "url", returning a string for each spoof.Response.Body. +func sendRequests(client spoof.Interface, url *url.URL, num int) ([]string, error) { + responses := make([]string, num) + + // Launch "num" requests, recording the responses we get in "responses". + g, _ := errgroup.WithContext(context.Background()) + for i := 0; i < num; i++ { + // We don't index into "responses" inside the goroutine to avoid a race, see #1545. + result := &responses[i] + g.Go(func() error { + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return err + } + + resp, err := client.Do(req) + if err != nil { + return err + } + + *result = string(resp.Body) + return nil + }) + } + return responses, g.Wait() +} + +// Validates service health and vended content match for a runLatest Service. +// The checks in this method should be able to be performed at any point in a +// runLatest Service's lifecycle so long as the service is in a "Ready" state. +func validateDataPlane(t pkgTest.TLegacy, clients *test.Clients, names test.ResourceNames, expectedText string) error { + t.Logf("Checking that the endpoint vends the expected text: %s", expectedText) + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + names.URL, + v1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", names.Route, names.URL, expectedText, err) + } + + return nil +} + +// Validates the state of Configuration, Revision, and Route objects for a runLatest Service. +// The checks in this method should be able to be performed at any point in a +// runLatest Service's lifecycle so long as the service is in a "Ready" state. +func validateControlPlane(t pkgTest.T, clients *test.Clients, names test.ResourceNames, expectedGeneration string) error { + t.Log("Checking to ensure Revision is in desired state with", "generation", expectedGeneration) + err := v1test.CheckRevisionState(clients.ServingClient, names.Revision, func(r *v1.Revision) (bool, error) { + if ready, err := v1test.IsRevisionReady(r); !ready { + return false, fmt.Errorf("revision %s did not become ready to serve traffic: %w", names.Revision, err) + } + if r.Status.ImageDigest == "" { + return false, fmt.Errorf("imageDigest not present for revision %s", names.Revision) + } + if validDigest, err := validateImageDigest(names.Image, r.Status.ImageDigest); !validDigest { + return false, fmt.Errorf("imageDigest %s is not valid for imageName %s: %w", r.Status.ImageDigest, names.Image, err) + } + return true, nil + }) + if err != nil { + return err + } + err = v1test.CheckRevisionState(clients.ServingClient, names.Revision, v1test.IsRevisionAtExpectedGeneration(expectedGeneration)) + if err != nil { + return fmt.Errorf("revision %s did not have an expected annotation with generation %s: %w", names.Revision, expectedGeneration, err) + } + + t.Log("Checking to ensure Configuration is in desired state.") + err = v1test.CheckConfigurationState(clients.ServingClient, names.Config, func(c *v1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + return false, fmt.Errorf("the Configuration %s was not updated indicating that the Revision %s was created: %w", names.Config, names.Revision, err) + } + if c.Status.LatestReadyRevisionName != names.Revision { + return false, fmt.Errorf("the Configuration %s was not updated indicating that the Revision %s was ready: %w", names.Config, names.Revision, err) + } + return true, nil + }) + if err != nil { + return err + } + + t.Log("Checking to ensure Route is in desired state with", "generation", expectedGeneration) + err = v1test.CheckRouteState(clients.ServingClient, names.Route, v1test.AllRouteTrafficAtRevision(names)) + if err != nil { + return fmt.Errorf("the Route %s was not updated to route traffic to the Revision %s: %w", names.Route, names.Revision, err) + } + + return nil +} + +// Validates labels on Revision, Configuration, and Route objects when created by a Service +// see spec here: https://github.com/knative/serving/blob/master/docs/spec/spec.md#revision +func validateLabelsPropagation(t pkgTest.T, objects v1test.ResourceObjects, names test.ResourceNames) error { + t.Log("Validate Labels on Revision Object") + revision := objects.Revision + + if revision.Labels["serving.knative.dev/configuration"] != names.Config { + return fmt.Errorf("expect Confguration name in Revision label %q but got %q ", names.Config, revision.Labels["serving.knative.dev/configuration"]) + } + if revision.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Revision label %q but got %q ", names.Service, revision.Labels["serving.knative.dev/service"]) + } + + t.Log("Validate Labels on Configuration Object") + config := objects.Config + if config.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Configuration label %q but got %q ", names.Service, config.Labels["serving.knative.dev/service"]) + } + if config.Labels["serving.knative.dev/route"] != names.Route { + return fmt.Errorf("expect Route name in Configuration label %q but got %q ", names.Route, config.Labels["serving.knative.dev/route"]) + } + + t.Log("Validate Labels on Route Object") + route := objects.Route + if route.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Route label %q but got %q ", names.Service, route.Labels["serving.knative.dev/service"]) + } + return nil +} + +func validateAnnotations(objs *v1test.ResourceObjects, extraKeys ...string) error { + // This checks whether the annotations are set on the resources that + // expect them to have. + // List of issues listing annotations that we check: #1642. + + anns := objs.Service.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("service expected %s annotation to be set, but was empty", a) + } + } + anns = objs.Route.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("route expected %s annotation to be set, but was empty", a) + } + } + anns = objs.Config.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("config expected %s annotation to be set, but was empty", a) + } + } + return nil +} + +func validateReleaseServiceShape(objs *v1test.ResourceObjects) error { + // Traffic should be routed to the lastest created revision. + if got, want := objs.Service.Status.Traffic[0].RevisionName, objs.Config.Status.LatestReadyRevisionName; got != want { + return fmt.Errorf("Status.Traffic[0].RevisionsName = %s, want: %s", got, want) + } + return nil +} + +func validateImageDigest(imageName string, imageDigest string) (bool, error) { + ref, err := name.ParseReference(pkgTest.ImagePath(imageName)) + if err != nil { + return false, err + } + + digest, err := name.NewDigest(imageDigest) + if err != nil { + return false, err + } + + return ref.Context().String() == digest.Context().String(), nil +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1/volumes_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1/volumes_test.go new file mode 100644 index 0000000000..f1e0c0d500 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1/volumes_test.go @@ -0,0 +1,419 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "path" + "path/filepath" + "testing" + + "knative.dev/pkg/ptr" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" + v1test "knative.dev/serving/test/v1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "knative.dev/serving/pkg/testing/v1" +) + +// TestConfigMapVolume tests that we echo back the appropriate text from the ConfigMap volume. +func TestConfigMapVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.HelloVolume, + } + + text := test.AppendRandomString("hello-volumes-") + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Delete(configMap.Name, nil); err != nil { + t.Errorf("ConfigMaps().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Optional: ptr.Bool(false), + }, + }) + + withOptionalBadVolume := WithVolume("blah", "/does/not/matter", corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "does-not-exist", + }, + Optional: ptr.Bool(true), + }, + }) + + // Setup initial Service + if _, err := v1test.CreateServiceReady(t, clients, &names, withVolume, withOptionalBadVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedConfigMapVolume tests that we echo back the appropriate text from the ConfigMap volume. +func TestProjectedConfigMapVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text := test.AppendRandomString("hello-volumes-") + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Delete(configMap.Name, nil); err != nil { + t.Errorf("ConfigMaps().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Optional: ptr.Bool(false), + }, + }, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "does-not-matter", + }, + Optional: ptr.Bool(true), + }, + }}, + }, + }) + + // Setup initial Service + if _, err := v1test.CreateServiceReady(t, clients, &names, withVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestSecretVolume tests that we echo back the appropriate text from the Secret volume. +func TestSecretVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.HelloVolume, + } + + text := test.ObjectNameForTest(t) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.Name, + Optional: ptr.Bool(false), + }, + }) + + withOptionalBadVolume := WithVolume("blah", "/does/not/matter", corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "does-not-exist", + Optional: ptr.Bool(true), + }, + }) + + // Setup initial Service + if _, err := v1test.CreateServiceReady(t, clients, &names, withVolume, withOptionalBadVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedSecretVolume tests that we echo back the appropriate text from the Secret volume. +func TestProjectedSecretVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text := test.ObjectNameForTest(t) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Optional: ptr.Bool(false), + }, + }}, + }, + }) + withSubpath := func(svc *v1.Service) { + vm := &svc.Spec.Template.Spec.Containers[0].VolumeMounts[0] + vm.MountPath = test.HelloVolumePath + vm.SubPath = filepath.Base(test.HelloVolumePath) + } + + // Setup initial Service + if _, err := v1test.CreateServiceReady(t, clients, &names, withVolume, withSubpath); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedComplex tests that we echo back the appropriate text from the complex Projected volume. +func TestProjectedComplex(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text1 := test.ObjectNameForTest(t) + text2 := test.ObjectNameForTest(t) + text3 := test.ObjectNameForTest(t) + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text1, + "other": text2, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text3, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Items: []corev1.KeyToPath{{ + Key: "other", + Path: "another", + }}, + Optional: ptr.Bool(false), + }, + }, { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Items: []corev1.KeyToPath{{ + Key: filepath.Base(test.HelloVolumePath), + Path: filepath.Base(test.HelloVolumePath), + }}, + Optional: ptr.Bool(false), + }, + }}, + }, + }) + + // Setup initial Service + if _, err := v1test.CreateServiceReady(t, clients, &names, withVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + // Observation shows that when keys collide, the last source listed wins, + // so for the main key, we should get back text3 (vs. text1) + if err = validateDataPlane(t, clients, names, text3); err != nil { + t.Error(err) + } + + // Verify that we get multiple files mounted in, in this case from the + // second source, which was partially shadowed in our check above. + names.URL.Path = path.Join(names.URL.Path, "another") + if err = validateDataPlane(t, clients, names, text2); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/blue_green_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/blue_green_test.go new file mode 100644 index 0000000000..567cfa0ef0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/blue_green_test.go @@ -0,0 +1,169 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "math" + "net/url" + "testing" + + "golang.org/x/sync/errgroup" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +const ( + // This test uses the two pizza planet test images for the blue and green deployment. + expectedBlue = test.PizzaPlanetText1 + expectedGreen = test.PizzaPlanetText2 +) + +// TestBlueGreenRoute verifies that a route configured with a 50/50 traffic split +// between two revisions will (approximately) route traffic evenly between them. +// Also, traffic that targets revisions *directly* will be routed to the correct +// revision 100% of the time. +func TestBlueGreenRoute(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + imagePaths := []string{ + pkgTest.ImagePath(test.PizzaPlanet1), + pkgTest.ImagePath(test.PizzaPlanet2), + } + + // Set Service and Image for names to create the initial service + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Setup Initial Service + t.Log("Creating a new Service in runLatest") + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + // The first revision created is "blue" + blue := names + blue.TrafficTarget = "blue" + green := names + green.TrafficTarget = "green" + + t.Log("Updating the Service to use a different image") + service, err := v1a1test.PatchServiceImage(t, clients, objects.Service, imagePaths[1]) + if err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, imagePaths[1], err) + } + objects.Service = service + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + green.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision for image %s: %v", names.Service, imagePaths[1], err) + } + + t.Log("Updating RouteSpec") + if _, err := v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: blue.TrafficTarget, + RevisionName: blue.Revision, + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: green.TrafficTarget, + RevisionName: green.Revision, + Percent: ptr.Int64(50), + }, + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + t.Log("Wait for the service domains to be ready") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err = clients.ServingAlphaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + var blueURL, greenURL *url.URL + for _, tt := range service.Status.Traffic { + if tt.Tag == blue.TrafficTarget { + blueURL = tt.URL.URL() + } + if tt.Tag == green.TrafficTarget { + greenURL = tt.URL.URL() + } + } + if blueURL == nil || greenURL == nil { + t.Fatalf("Unable to fetch URLs from traffic targets: %#v", service.Status.Traffic) + } + tealURL := service.Status.URL.URL() + + // Istio network programming takes some time to be effective. Currently Istio + // does not expose a Status, so we rely on probes to know when they are effective. + // Since we are updating the service the teal domain probe will succeed before our changes + // take effect so we probe the green domain. + t.Logf("Probing %s", greenURL) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + greenURL, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", greenURL, err) + } + + // Send concurrentRequests to blueDomain, greenDomain, and tealDomain. + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinSplitPercentage)) + return checkDistribution(t, clients, tealURL, test.ConcurrentRequests, min, []string{expectedBlue, expectedGreen}) + }) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, blueURL, test.ConcurrentRequests, min, []string{expectedBlue}) + }) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, greenURL, test.ConcurrentRequests, min, []string{expectedGreen}) + }) + if err := g.Wait(); err != nil { + t.Fatalf("Error sending requests: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/configuration_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/configuration_test.go new file mode 100644 index 0000000000..e817248894 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/configuration_test.go @@ -0,0 +1,168 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestUpdateConfigurationMetadata(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Config: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating new configuration %s", names.Config) + if _, err := v1a1test.CreateConfiguration(t, clients, names); err != nil { + t.Fatalf("Failed to create configuration %s", names.Config) + } + + // Wait for the configuration to actually be ready to not race in the updates below. + if err := v1a1test.WaitForConfigurationState(clients.ServingAlphaClient, names.Config, v1a1test.IsConfigurationReady, "ConfigurationIsReady"); err != nil { + t.Fatalf("Configuration %s did not become ready: %v", names.Config, err) + } + + cfg := fetchConfiguration(names.Config, clients, t) + names.Revision = cfg.Status.LatestReadyRevisionName + + t.Logf("Updating labels of Configuration %s", names.Config) + newLabels := map[string]string{ + "labelX": "abc", + "labelY": "def", + } + // Copy over new labels. + if cfg.Labels == nil { + cfg.Labels = newLabels + } else { + for k, v := range newLabels { + cfg.Labels[k] = v + } + } + cfg, err := clients.ServingAlphaClient.Configs.Update(cfg) + if err != nil { + t.Fatalf("Failed to update labels for Configuration %s: %v", names.Config, err) + } + + if err = waitForConfigurationLabelsUpdate(clients, names, cfg.Labels); err != nil { + t.Fatalf("The labels for Configuration %s were not updated: %v", names.Config, err) + } + + cfg = fetchConfiguration(names.Config, clients, t) + expected := names.Revision + actual := cfg.Status.LatestCreatedRevisionName + if expected != actual { + t.Errorf("Did not expect a new Revision after updating labels for Configuration %s - expected Revision: %s, actual Revision: %s", + names.Config, expected, actual) + } + + t.Logf("Validating labels were not propagated to Revision %s", names.Revision) + err = v1a1test.CheckRevisionState(clients.ServingAlphaClient, names.Revision, func(r *v1alpha1.Revision) (bool, error) { + // Labels we placed on Configuration should _not_ appear on Revision. + return checkNoKeysPresent(newLabels, r.Labels, t), nil + }) + if err != nil { + t.Errorf("The labels for Revision %s of Configuration %s should not have been updated: %v", names.Revision, names.Config, err) + } + + t.Logf("Updating annotations of Configuration %s", names.Config) + newAnnotations := map[string]string{ + "annotationA": "123", + "annotationB": "456", + } + if cfg.Annotations == nil { + cfg.Annotations = newAnnotations + } else { + // Copy over new annotations. + for k, v := range newAnnotations { + cfg.Annotations[k] = v + } + } + cfg, err = clients.ServingAlphaClient.Configs.Update(cfg) + if err != nil { + t.Fatalf("Failed to update annotations for Configuration %s: %v", names.Config, err) + } + + if err = waitForConfigurationAnnotationsUpdate(clients, names, cfg.Annotations); err != nil { + t.Fatalf("The annotations for Configuration %s were not updated: %v", names.Config, err) + } + + cfg = fetchConfiguration(names.Config, clients, t) + actual = cfg.Status.LatestCreatedRevisionName + if expected != actual { + t.Errorf("Did not expect a new Revision after updating annotations for Configuration %s - expected Revision: %s, actual Revision: %s", + names.Config, expected, actual) + } + + t.Logf("Validating annotations were not propagated to Revision %s", names.Revision) + err = v1a1test.CheckRevisionState(clients.ServingAlphaClient, names.Revision, func(r *v1alpha1.Revision) (bool, error) { + // Annotations we placed on Configuration should _not_ appear on Revision. + return checkNoKeysPresent(newAnnotations, r.Annotations, t), nil + }) + if err != nil { + t.Errorf("The annotations for Revision %s of Configuration %s should not have been updated: %v", names.Revision, names.Config, err) + } +} + +func fetchConfiguration(name string, clients *test.Clients, t *testing.T) *v1alpha1.Configuration { + cfg, err := clients.ServingAlphaClient.Configs.Get(name, v1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get configuration %s: %v", name, err) + } + return cfg +} + +func waitForConfigurationLabelsUpdate(clients *test.Clients, names test.ResourceNames, labels map[string]string) error { + return v1a1test.WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + return reflect.DeepEqual(c.Labels, labels) && c.Generation == c.Status.ObservedGeneration, nil + }, "ConfigurationMetadataUpdatedWithLabels") +} + +func waitForConfigurationAnnotationsUpdate(clients *test.Clients, names test.ResourceNames, annotations map[string]string) error { + return v1a1test.WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + return reflect.DeepEqual(c.Annotations, annotations) && c.Generation == c.Status.ObservedGeneration, nil + }, "ConfigurationMetadataUpdatedWithAnnotations") +} + +// checkNoKeysPresent returns true if _no_ keys from `expected`, are present in `actual`. +// checkNoKeysPresent will log the offending keys to t.Log. +func checkNoKeysPresent(expected map[string]string, actual map[string]string, t *testing.T) bool { + t.Helper() + present := []string{} + for k := range expected { + if _, ok := actual[k]; ok { + present = append(present, k) + } + } + if len(present) != 0 { + t.Logf("Unexpected keys: %v", present) + } + return len(present) == 0 +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/errorcondition_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/errorcondition_test.go new file mode 100644 index 0000000000..179d5d4bda --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/errorcondition_test.go @@ -0,0 +1,229 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ptest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +const ( + containerMissing = "ContainerMissing" +) + +// TestContainerErrorMsg is to validate the error condition defined at +// https://github.com/knative/serving/blob/master/docs/spec/errors.md +// for the container image missing scenario. +func TestContainerErrorMsg(t *testing.T) { + t.Parallel() + if strings.HasSuffix(strings.Split(ptest.Flags.DockerRepo, "/")[0], ".local") { + t.Skip("Skipping for local docker repo") + } + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.InvalidHelloWorld, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Specify an invalid image path + // A valid DockerRepo is still needed, otherwise will get UNAUTHORIZED instead of container missing error + t.Logf("Creating a new Service %s", names.Service) + svc, err := v1a1test.CreateLatestService(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + names.Config = serviceresourcenames.Configuration(svc) + names.Route = serviceresourcenames.Route(svc) + + manifestUnknown := string(transport.ManifestUnknownErrorCode) + t.Log("When the imagepath is invalid, the Configuration should have error status.") + + // Wait for ServiceState becomes NotReady. It also waits for the creation of Configuration. + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceNotReady, "ServiceIsNotReady"); err != nil { + t.Fatalf("The Service %s was unexpected state: %v", names.Service, err) + } + + // Checking for "Container image not present in repository" scenario defined in error condition spec + err = v1a1test.WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(r *v1alpha1.Configuration) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.ConfigurationConditionReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, manifestUnknown) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("The configuration %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, containerMissing, manifestUnknown, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ContainerImageNotPresent") + + if err != nil { + t.Fatalf("Failed to validate configuration state: %s", err) + } + + revisionName, err := getRevisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the imagepath is invalid, the revision should have error status.") + err = v1a1test.WaitForRevisionState(clients.ServingAlphaClient, revisionName, func(r *v1alpha1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.RevisionConditionReady) + if cond != nil { + if cond.Reason == containerMissing && strings.Contains(cond.Message, manifestUnknown) { + return true, nil + } + return true, fmt.Errorf("The revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, containerMissing, manifestUnknown, cond.Reason, cond.Message) + } + return false, nil + }, "ImagePathInvalid") + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } + + t.Log("When the revision has error condition, route should not be ready.") + err = v1a1test.CheckRouteState(clients.ServingAlphaClient, names.Route, v1a1test.IsRouteNotReady) + if err != nil { + t.Fatalf("the Route %s was not desired state: %v", names.Route, err) + } +} + +// TestContainerExitingMsg is to validate the error condition defined at +// https://github.com/knative/serving/blob/master/docs/spec/errors.md +// for the container crashing scenario. +func TestContainerExitingMsg(t *testing.T) { + t.Parallel() + const ( + // The given image will always exit with an exit code of 5 + exitCodeReason = "ExitCode5" + // ... and will print "Crashed..." before it exits + errorLog = "Crashed..." + ) + + tests := []struct { + Name string + ReadinessProbe *corev1.Probe + }{{ + Name: "http", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + }, { + Name: "tcp", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + }, + }} + + for _, tt := range tests { + tt := tt + t.Run(tt.Name, func(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Config: test.ObjectNameForTest(t), + Image: test.Failing, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Configuration %s", names.Config) + + if _, err := v1a1test.CreateConfiguration(t, clients, names, v1a1opts.WithConfigReadinessProbe(tt.ReadinessProbe)); err != nil { + t.Fatalf("Failed to create configuration %s: %v", names.Config, err) + } + + t.Log("When the containers keep crashing, the Configuration should have error status.") + + err := v1a1test.WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(r *v1alpha1.Configuration) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.ConfigurationConditionReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, errorLog) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("The configuration %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, containerMissing, errorLog, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ConfigContainersCrashing") + + if err != nil { + t.Fatalf("Failed to validate configuration state: %s", err) + } + + revisionName, err := getRevisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the containers keep crashing, the revision should have error status.") + err = v1a1test.WaitForRevisionState(clients.ServingAlphaClient, revisionName, func(r *v1alpha1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.RevisionConditionReady) + if cond != nil { + if cond.Reason == exitCodeReason && strings.Contains(cond.Message, errorLog) { + return true, nil + } + return true, fmt.Errorf("The revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, exitCodeReason, errorLog, cond.Reason, cond.Message) + } + return false, nil + }, "RevisionContainersCrashing") + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } + }) + } +} + +// Get revision name from configuration. +func getRevisionFromConfiguration(clients *test.Clients, configName string) (string, error) { + config, err := clients.ServingAlphaClient.Configs.Get(configName, metav1.GetOptions{}) + if err != nil { + return "", err + } + if config.Status.LatestCreatedRevisionName != "" { + return config.Status.LatestCreatedRevisionName, nil + } + return "", fmt.Errorf("No valid revision name found in configuration %s", configName) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/generatename_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/generatename_test.go new file mode 100644 index 0000000000..39504f0e83 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/generatename_test.go @@ -0,0 +1,206 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "net/url" + "regexp" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + rtesting "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func setServiceGenerateName(generateName string) rtesting.ServiceOption { + return func(service *v1alpha1.Service) { + service.ObjectMeta.GenerateName = generateName + } +} + +func setConfigurationGenerateName(generateName string) rtesting.ConfigOption { + return func(config *v1alpha1.Configuration) { + config.ObjectMeta.GenerateName = generateName + } +} + +func setRouteGenerateName(generateName string) rtesting.RouteOption { + return func(route *v1alpha1.Route) { + route.ObjectMeta.GenerateName = generateName + } +} + +// generateNamePrefix returns the object name to be used for testing, shorted to +// 44 characters to avoid #3236, as generateNames longer than 44 characters may cause +// some knative resources to never become ready. +func generateNamePrefix(t *testing.T) string { + generateName := test.ObjectNameForTest(t) + "-" + + generateNameLength := len(generateName) + if generateNameLength > 44 { + generateNameLength = 44 + } + return generateName[0:generateNameLength] +} + +// validateName checks that a name generated using a generateName is valid. It checks +// 1. The generateName is a prefix of the name, but they are not equal +// 2. Any number of valid name characters (alphanumeric, -, and .) are added togenerateName to +// create the value of name. +func validateName(generateName, name string) error { + r := regexp.MustCompile("^" + regexp.QuoteMeta(generateName) + "[a-zA-Z0-9\\-.]+$") + + if !r.MatchString(name) { + return fmt.Errorf("generated name = %q, want to match %q", name, r.String()) + } + return nil +} + +func canServeRequests(t *testing.T, clients *test.Clients, route *v1alpha1.Route) error { + t.Logf("Route %s has a domain set in its status", route.Name) + var url *url.URL + err := v1a1test.WaitForRouteState( + clients.ServingAlphaClient, + route.Name, + func(r *v1alpha1.Route) (bool, error) { + url = r.Status.URL.URL() + return url != nil, nil + }, + "RouteDomain", + ) + if err != nil { + return fmt.Errorf("route did not get assigned an URL : %w", err) + } + + t.Logf("Route %s can serve the expected data at %s", route.Name, url) + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", route.Name, url, test.HelloWorldText, err) + } + + return nil +} + +// TestServiceGenerateName checks that knative Services MAY request names generated by +// the system using metadata.generateName. It ensures that knative Services created this way can become ready +// and serve requests. +func TestServiceGenerateName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + generateName := generateNamePrefix(t) + names := test.ResourceNames{ + Image: test.HelloWorld, + } + + // Cleanup on test failure. + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer func() { test.TearDown(clients, names) }() + + // Create the service using the generate name field. If the service does not become ready this will fail. + t.Logf("Creating new service with generateName %s", generateName) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + setServiceGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create service with generateName %s: %v", generateName, err) + } + + // Ensure that the name given to the service is generated from the generateName field. + t.Log("When the service is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Service); err != nil { + t.Errorf("Illegal name generated for service %s: %v", names.Service, err) + } + + // Ensure that the service can serve requests + err = canServeRequests(t, clients, resources.Route) + if err != nil { + t.Errorf("Service %s could not serve requests: %v", names.Service, err) + } +} + +// TestRouteAndConfiguration checks that both routes and configurations MAY request names generated by +// the system using metadata.generateName. It ensures that routes and configurations created this way both: +// 1. Become ready +// 2. Can serve requests. +func TestRouteAndConfigGenerateName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + generateName := generateNamePrefix(t) + names := test.ResourceNames{ + Image: test.HelloWorld, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer func() { test.TearDown(clients, names) }() + + t.Logf("Creating new configuration with generateName %s", generateName) + config, err := v1a1test.CreateConfiguration(t, clients, names, setConfigurationGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create configuration with generateName %s: %v", generateName, err) + } + names.Config = config.Name + + // Ensure the associated revision is created. This also checks that the configuration becomes ready. + t.Log("The configuration will be updated with the name of the associated Revision once it is created.") + names.Revision, err = v1a1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the new revision: %v", names.Config, err) + } + + // Ensure that the name given to the configuration is generated from the generate name field. + t.Log("When the configuration is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Config); err != nil { + t.Errorf("Illegal name generated for configuration %s: %v", names.Config, err) + } + + // Create a route that maps to the revision created by the configuration above + t.Logf("Create new Route with generateName %s", generateName) + route, err := v1a1test.CreateRoute(t, clients, names, setRouteGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create route with generateName %s: %v", generateName, err) + } + names.Route = route.Name + + t.Log("When the route is created, it will become ready") + if err := v1a1test.WaitForRouteState(clients.ServingAlphaClient, names.Route, v1a1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("Error waiting for the route %s to become ready: %v", names.Route, err) + } + + // Ensure that the name given to the route is generated from the generate name field + t.Log("When the route is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Route); err != nil { + t.Errorf("Illegal name generated for route %s: %v", names.Route, err) + } + + // Ensure that the generated route endpoint can serve requests + if err := canServeRequests(t, clients, route); err != nil { + t.Errorf("Configuration %s with Route %s could not serve requests: %v", names.Config, names.Route, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/main_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/main_test.go new file mode 100644 index 0000000000..72f7c212bd --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/main_test.go @@ -0,0 +1,33 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "flag" + "os" + "testing" + + pkgTest "knative.dev/pkg/test" +) + +func TestMain(m *testing.M) { + flag.Parse() + pkgTest.SetupLoggingFlags() + os.Exit(m.Run()) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/resources_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/resources_test.go new file mode 100644 index 0000000000..bd8ffdd68d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/resources_test.go @@ -0,0 +1,118 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestCustomResourcesLimits(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + t.Log("Creating a new Route and Configuration") + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("350Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("350Mi"), + }, + } + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Autoscale, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithResourceRequirements(resources)) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + endpoint := objects.Route.Status.URL.URL() + + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + endpoint, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK)), + "ResourceTestServesText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error probing %s: %v", endpoint, err) + } + + sendPostRequest := func(resolvableDomain bool, url *url.URL) (*spoof.Response, error) { + t.Logf("Request %s", url) + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), resolvableDomain) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url.String(), nil) + if err != nil { + return nil, err + } + return client.Do(req) + } + + pokeCowForMB := func(mb int) error { + u, _ := url.Parse(endpoint.String()) + q := u.Query() + q.Set("bloat", fmt.Sprintf("%d", mb)) + u.RawQuery = q.Encode() + response, err := sendPostRequest(test.ServingFlags.ResolvableDomain, u) + if err != nil { + return err + } + if response.StatusCode != http.StatusOK { + return fmt.Errorf("StatusCode = %d, want %d", response.StatusCode, http.StatusOK) + } + return nil + } + + t.Log("Querying the application to see if the memory limits are enforced.") + if err := pokeCowForMB(100); err != nil { + t.Fatalf("Didn't get a response from bloating cow with %d MBs of Memory: %v", 100, err) + } + + if err := pokeCowForMB(200); err != nil { + t.Fatalf("Didn't get a response from bloating cow with %d MBs of Memory: %v", 200, err) + } + + if err := pokeCowForMB(500); err == nil { + t.Fatalf("We shouldn't have got a response from bloating cow with %d MBs of Memory: %v", 500, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/revision_timeout_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/revision_timeout_test.go new file mode 100644 index 0000000000..57c4659923 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/revision_timeout_test.go @@ -0,0 +1,246 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "github.com/mattbaird/jsonpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// createLatestService creates a service in namespace with the name names.Service +// that uses the image specified by names.Image +func createLatestService(t *testing.T, clients *test.Clients, names test.ResourceNames, revisionTimeoutSeconds int64) (*v1alpha1.Service, error) { + service := v1a1test.LatestService(names, WithRevisionTimeoutSeconds(revisionTimeoutSeconds)) + v1a1test.LogResourceObject(t, v1a1test.ResourceObjects{Service: service}) + svc, err := clients.ServingAlphaClient.Services.Create(service) + return svc, err +} + +func updateServiceWithTimeout(clients *test.Clients, names test.ResourceNames, revisionTimeoutSeconds int) error { + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/spec/template/spec/timeoutSeconds", + Value: revisionTimeoutSeconds, + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + return err + } + _, err = clients.ServingAlphaClient.Services.Patch(names.Service, types.JSONPatchType, patchBytes, "") + if err != nil { + return err + } + return nil +} + +// sendRequests send a request to "endpoint", returns error if unexpected response code, nil otherwise. +func sendRequest(t *testing.T, clients *test.Clients, endpoint *url.URL, initialSleepSeconds int, sleepSeconds int, expectedResponseCode int) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, endpoint.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Logf("Spoofing client failed: %v", err) + return err + } + + initialSleepMs := initialSleepSeconds * 1000 + sleepMs := sleepSeconds * 1000 + + start := time.Now().UnixNano() + defer func() { + end := time.Now().UnixNano() + t.Logf("URL: %v, initialSleep: %v, sleep: %v, request elapsed %.2f ms", endpoint, initialSleepMs, sleepMs, float64(end-start)/1e6) + }() + u, _ := url.Parse(endpoint.String()) + q := u.Query() + q.Set("initialTimeout", fmt.Sprintf("%d", initialSleepMs)) + q.Set("timeout", fmt.Sprintf("%d", sleepMs)) + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + t.Logf("Failed new request: %v", err) + return err + } + + resp, err := client.Do(req) + if err != nil { + t.Logf("Failed request err: %v", err) + return err + } + + t.Logf("Response status code: %v, expected: %v", resp.StatusCode, expectedResponseCode) + if expectedResponseCode != resp.StatusCode { + return fmt.Errorf("got response status code %v, wanted %v", resp.StatusCode, expectedResponseCode) + } + return nil +} + +func TestRevisionTimeout(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + var rev2s, rev5s test.ResourceNames + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Timeout, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service in runLatest") + svc, err := createLatestService(t, clients, names, 2) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + t.Log("The Service will be updated with the name of the Revision once it is created") + revisionName, err := v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the new revision: %v", names.Service, err) + } + rev2s.Revision = revisionName + + t.Log("When the Service reports as Ready, everything should be ready") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic to Revision %s: %v", names.Service, names.Revision, err) + } + + t.Log("Updating the Service to use a different revision timeout") + err = updateServiceWithTimeout(clients, names, 5) + if err != nil { + t.Fatalf("Patch update for Service %s with new timeout 5s failed: %v", names.Service, err) + } + + // getNextRevisionName waits for names.Revision to change, so we set it to the rev2s revision and wait for the (new) rev5s revision. + names.Revision = rev2s.Revision + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + rev5s.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision with timeout 5s: %v", names.Service, err) + } + + t.Logf("Waiting for revision %q to be ready", rev2s.Revision) + if err := v1a1test.WaitForRevisionState(clients.ServingAlphaClient, rev2s.Revision, v1a1test.IsRevisionReady, "RevisionIsReady"); err != nil { + t.Fatalf("The Revision %q still can't serve traffic: %v", rev2s.Revision, err) + } + t.Logf("Waiting for revision %q to be ready", rev5s.Revision) + if err := v1a1test.WaitForRevisionState(clients.ServingAlphaClient, rev5s.Revision, v1a1test.IsRevisionReady, "RevisionIsReady"); err != nil { + t.Fatalf("The Revision %q still can't serve traffic: %v", rev5s.Revision, err) + } + + // Set names for traffic targets to make them directly routable. + rev2s.TrafficTarget = "rev2s" + rev5s.TrafficTarget = "rev5s" + + t.Log("Updating RouteSpec") + if _, err := v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: rev2s.TrafficTarget, + RevisionName: rev2s.Revision, + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: rev5s.TrafficTarget, + RevisionName: rev5s.Revision, + Percent: ptr.Int64(50), + }, + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + t.Log("Wait for the service to be ready") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err := clients.ServingAlphaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + var rev2sURL, rev5sURL *url.URL + for _, tt := range service.Status.Traffic { + if tt.Tag == rev2s.TrafficTarget { + rev2sURL = tt.URL.URL() + } + if tt.Tag == rev5s.TrafficTarget { + rev5sURL = tt.URL.URL() + } + } + if rev2sURL == nil || rev5sURL == nil { + t.Fatalf("Unable to fetch URLs from traffic targets: %#v", service.Status.Traffic) + } + + t.Logf("Probing %s", rev5sURL) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + rev5sURL, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", rev5sURL, err) + } + + // Quick sanity check + if err := sendRequest(t, clients, rev2sURL, 0, 0, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 0s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 0, 0, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 0s with revision timeout 5s: %v", err) + } + + // Fail by surpassing the initial timeout. + if err := sendRequest(t, clients, rev2sURL, 5, 0, http.StatusServiceUnavailable); err != nil { + t.Errorf("Did not fail request with sleep 5s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 7, 0, http.StatusServiceUnavailable); err != nil { + t.Errorf("Did not fail request with sleep 7s with revision timeout 5s: %v", err) + } + + // Not fail by not surpassing in the initial timeout, but in the overall request duration. + if err := sendRequest(t, clients, rev2sURL, 1, 3, http.StatusOK); err != nil { + t.Errorf("Did not fail request with sleep 1s/3s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 3, 3, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 3s/3s with revision timeout 5s: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/route_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/route_test.go new file mode 100644 index 0000000000..89cdbad15a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/route_test.go @@ -0,0 +1,154 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "net/url" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func assertResourcesUpdatedWhenRevisionIsReady(t *testing.T, clients *test.Clients, names test.ResourceNames, url *url.URL, expectedGeneration, expectedText string) { + t.Log("When the Route reports as Ready, everything should be ready.") + if err := v1a1test.WaitForRouteState(clients.ServingAlphaClient, names.Route, v1a1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("The Route %s was not marked as Ready to serve traffic to Revision %s: %v", names.Route, names.Revision, err) + } + + // TODO(#1178): Remove "Wait" from all checks below this point. + t.Log("Serves the expected data at the endpoint") + + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve the expected text %q: %v", names.Route, url, expectedText, err) + } + + // We want to verify that the endpoint works as soon as Ready: True, but there are a bunch of other pieces of state that we validate for conformance. + t.Log("The Revision will be marked as Ready when it can serve traffic") + err = v1a1test.CheckRevisionState(clients.ServingAlphaClient, names.Revision, v1a1test.IsRevisionReady) + if err != nil { + t.Fatalf("Revision %s did not become ready to serve traffic: %v", names.Revision, err) + } + t.Log("The Revision will be annotated with the generation") + err = v1a1test.CheckRevisionState(clients.ServingAlphaClient, names.Revision, v1a1test.IsRevisionAtExpectedGeneration(expectedGeneration)) + if err != nil { + t.Fatalf("Revision %s did not have an expected annotation with generation %s: %v", names.Revision, expectedGeneration, err) + } + t.Log("Updates the Configuration that the Revision is ready") + err = v1a1test.CheckConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + return c.Status.LatestReadyRevisionName == names.Revision, nil + }) + if err != nil { + t.Fatalf("The Configuration %s was not updated indicating that the Revision %s was ready: %v", names.Config, names.Revision, err) + } + t.Log("Updates the Route to route traffic to the Revision") + err = v1a1test.CheckRouteState(clients.ServingAlphaClient, names.Route, v1a1test.AllRouteTrafficAtRevision(names)) + if err != nil { + t.Fatalf("The Route %s was not updated to route traffic to the Revision %s: %v", names.Route, names.Revision, err) + } +} + +func getRouteURL(clients *test.Clients, names test.ResourceNames) (*url.URL, error) { + var url *url.URL + + err := v1a1test.WaitForRouteState( + clients.ServingAlphaClient, + names.Route, + func(r *v1alpha1.Route) (bool, error) { + if r.Status.URL == nil { + return false, nil + } + url = r.Status.URL.URL() + return url != nil, nil + }, + "RouteURL", + ) + return url, err +} + +func TestRouteCreation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + var objects v1a1test.ResourceObjects + svcName := test.ObjectNameForTest(t) + names := test.ResourceNames{ + Config: svcName, + Route: svcName, + TrafficTarget: svcName, + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Route and Configuration") + config, err := v1a1test.CreateConfiguration(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Configuration: %v", err) + } + objects.Config = config + + route, err := v1a1test.CreateRoute(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Route: %v", err) + } + objects.Route = route + + t.Log("The Configuration will be updated with the name of the Revision") + names.Revision, err = v1a1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the new revision: %v", names.Config, err) + } + + url, err := getRouteURL(clients, names) + if err != nil { + t.Fatalf("Failed to get URL from route %s: %v", names.Route, err) + } + + t.Logf("The Route URL is: %s", url) + assertResourcesUpdatedWhenRevisionIsReady(t, clients, names, url, "1", test.PizzaPlanetText1) + + // We start a prober at background thread to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, url) + defer test.AssertProberDefault(t, prober) + + t.Log("Updating the Configuration to use a different image") + objects.Config, err = v1a1test.PatchConfigImage(clients, objects.Config, pkgTest.ImagePath(test.PizzaPlanet2)) + if err != nil { + t.Fatalf("Patch update for Configuration %s with new image %s failed: %v", names.Config, test.PizzaPlanet2, err) + } + + t.Log("Since the Configuration was updated a new Revision will be created and the Configuration will be updated") + names.Revision, err = v1a1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the Revision for image %s: %v", names.Config, test.PizzaPlanet2, err) + } + + assertResourcesUpdatedWhenRevisionIsReady(t, clients, names, url, "2", test.PizzaPlanetText2) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/service_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/service_test.go new file mode 100644 index 0000000000..cfffd01da8 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/service_test.go @@ -0,0 +1,626 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +// TestRunLatestService tests both Creation and Update paths of a runLatest service. The test performs a series of Update/Validate steps to ensure that +// the service transitions as expected during each step. +// Currently the test performs the following updates: +// 1. Update Container Image +// 2. Update Metadata +// a. Update Labels +// b. Update Annotations +func TestRunLatestService(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Setup initial Service + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateRunLatestDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err = validateLabelsPropagation(t, *objects, names); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // We start a background prober to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, names.URL) + defer test.AssertProberDefault(t, prober) + + // Update Container Image + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1a1test.PatchServiceImage(t, clients, objects.Service, image2); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image2, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + + // Validate State after Image Update + if err = validateRunLatestControlPlane(t, clients, names, "2"); err != nil { + t.Error(err) + } + if err = validateRunLatestDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil { + t.Error(err) + } + + // Update Metadata (Labels) + t.Logf("Updating labels of the RevisionTemplateSpec for service %s.", names.Service) + metadata := metav1.ObjectMeta{ + Labels: map[string]string{ + "labelX": "abc", + "labelY": "def", + }, + } + if objects.Service, err = v1a1test.PatchServiceTemplateMetadata(t, clients, objects.Service, metadata); err != nil { + t.Fatalf("Service %s was not updated with labels in its RevisionTemplateSpec: %v", names.Service, err) + } + + t.Log("Waiting for the new revision to appear as LatestRevision.") + if names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s after updating labels in its RevisionTemplateSpec: %v", names.Service, names.Revision, err) + } + + // Update Metadata (Annotations) + t.Logf("Updating annotations of RevisionTemplateSpec for service %s", names.Service) + metadata = metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotationA": "123", + "annotationB": "456", + }, + } + if objects.Service, err = v1a1test.PatchServiceTemplateMetadata(t, clients, objects.Service, metadata); err != nil { + t.Fatalf("Service %s was not updated with annotation in its RevisionTemplateSpec: %v", names.Service, err) + } + + t.Log("Waiting for the new revision to appear as LatestRevision.") + names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("The new revision has not become ready in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + + // Validate the Service shape. + if err = validateRunLatestControlPlane(t, clients, names, "4"); err != nil { + t.Error(err) + } + if err = validateRunLatestDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil { + t.Error(err) + } +} + +func waitForDesiredTrafficShape(t *testing.T, sName string, want map[string]v1alpha1.TrafficTarget, clients *test.Clients) error { + return v1a1test.WaitForServiceState( + clients.ServingAlphaClient, sName, func(s *v1alpha1.Service) (bool, error) { + // IsServiceReady never returns an error. + if ok, _ := v1a1test.IsServiceReady(s); !ok { + return false, nil + } + // Match the traffic shape. + got := map[string]v1alpha1.TrafficTarget{} + for _, tt := range s.Status.Traffic { + got[tt.Tag] = tt + } + ignoreURLs := cmpopts.IgnoreFields(v1alpha1.TrafficTarget{}, + "TrafficTarget.URL", "DeprecatedName") + if !cmp.Equal(got, want, ignoreURLs) { + t.Logf("For service %s traffic shape mismatch: (-got, +want) %s", + sName, cmp.Diff(got, want, ignoreURLs)) + return false, nil + } + return true, nil + }, "Verify Service Traffic Shape", + ) +} + +func TestRunLatestServiceBYOName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + revName := names.Service + "-byoname" + + // Setup initial Service + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + func(svc *v1alpha1.Service) { + svc.Spec.ConfigurationSpec.GetTemplate().Name = revName + }) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + if got, want := names.Revision, revName; got != want { + t.Errorf("CreateRunLatestServiceReady() = %s, wanted %s", got, want) + } + + // Validate State after Creation + + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateRunLatestDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err = validateLabelsPropagation(t, *objects, names); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // We start a background prober to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, names.URL) + defer test.AssertProberDefault(t, prober) + + // Update Container Image + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1a1test.PatchServiceImage(t, clients, objects.Service, image2); err == nil { + t.Fatalf("Patch update for Service %s didn't fail.", names.Service) + } +} + +// TestReleaseService creates a Service with a variety of "release"-like traffic shapes. +// Currently tests for the following combinations: +// 1. One Revision Specified, current == latest +// 2. One Revision Specified, current != latest +// 3. Two Revisions Specified, 50% rollout, candidate == latest +// 4. Two Revisions Specified, 50% rollout, candidate != latest +// 5. Two Revisions Specified, 50% rollout, candidate != latest, candidate is configurationName. +func TestReleaseService(t *testing.T) { + t.Parallel() + // Create Initial Service + clients := test.Setup(t) + releaseImagePath2 := pkgTest.ImagePath(test.PizzaPlanet2) + releaseImagePath3 := pkgTest.ImagePath(test.HelloWorld) + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Expected Text for different revisions. + const ( + expectedFirstRev = test.PizzaPlanetText1 + expectedSecondRev = test.PizzaPlanetText2 + expectedThirdRev = test.HelloWorldText + ) + + // Setup initial Service + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + t.Log("Validating service shape.") + if err := validateReleaseServiceShape(objects); err != nil { + t.Fatalf("Release shape is incorrect: %v", err) + } + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + firstRevision := names.Revision + + // 1. One Revision Specified, current == latest. + t.Log("1. Updating Service to ReleaseType using lastCreatedRevision") + objects.Service, err = v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(100), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: nil, + }, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + desiredTrafficShape := map[string]v1alpha1.TrafficTarget{ + "current": { + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: objects.Config.Status.LatestReadyRevisionName, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + }, + "latest": { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + RevisionName: objects.Config.Status.LatestReadyRevisionName, + LatestRevision: ptr.Bool(true), + }, + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Service traffic should go to the first revision and be available on two names traffic targets: 'current' and 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev}, + []string{"latest", "current"}, + []string{expectedFirstRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 2. One Revision Specified, current != latest. + t.Log("2. Updating the Service Spec with a new image") + if objects.Service, err = v1a1test.PatchServiceImage(t, clients, objects.Service, releaseImagePath2); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, releaseImagePath2, err) + } + + t.Log("Since the Service was updated a new Revision will be created") + if names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s: %v", names.Service, names.Revision, err) + } + secondRevision := names.Revision + + // Also verify traffic is in the correct shape. + desiredTrafficShape["latest"] = v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + RevisionName: secondRevision, + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Since the Service is using release the Route will not be updated, but new revision will be available at 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev}, + []string{"latest", "current"}, + []string{expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 3. Two Revisions Specified, 50% rollout, candidate == latest. + t.Log("3. Updating Service to split traffic between two revisions using Release mode") + objects.Service, err = v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + RevisionName: secondRevision, + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: nil, + }, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + desiredTrafficShape = map[string]v1alpha1.TrafficTarget{ + "current": { + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + }, + "candidate": { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + RevisionName: secondRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + }, + "latest": { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + RevisionName: secondRevision, + LatestRevision: ptr.Bool(true), + }, + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Traffic should be split between the two revisions and available on three named traffic targets, 'current', 'candidate', and 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedSecondRev}, + []string{"candidate", "latest", "current"}, + []string{expectedSecondRev, expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 4. Two Revisions Specified, 50% rollout, candidate != latest. + t.Log("4. Updating the Service Spec with a new image") + if objects.Service, err = v1a1test.PatchServiceImage(t, clients, objects.Service, releaseImagePath3); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, releaseImagePath3, err) + } + t.Log("Since the Service was updated a new Revision will be created") + if names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s: %v", names.Service, names.Revision, err) + } + thirdRevision := names.Revision + + desiredTrafficShape["latest"] = v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + RevisionName: thirdRevision, + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Traffic should remain between the two images, and the new revision should be available on the named traffic target 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedSecondRev}, + []string{"latest", "candidate", "current"}, + []string{expectedThirdRev, expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // Now update the service to use `@latest` as candidate. + t.Log("5. Updating Service to split traffic between two `current` and `@latest`") + + objects.Service, err = v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: "latest", + Percent: nil, + }, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + // Verify in the end it's still the case. + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // `candidate` now points to the latest. + desiredTrafficShape["candidate"] = v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: "candidate", + RevisionName: thirdRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedThirdRev}, + []string{"latest", "candidate", "current"}, + []string{expectedThirdRev, expectedThirdRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } +} + +func TestAnnotationPropagation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Setup initial Service + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + + desiredSvc := objects.Service.DeepCopy() + desiredSvc.Annotations["juicy"] = "jamba" + if objects.Service, err = v1a1test.PatchService(t, clients, objects.Service, desiredSvc); err != nil { + t.Fatalf("Service %s was not updated with new annotation: %v", names.Service, err) + } + // Updating metadata does not trigger revision or generation + // change, so let's generate a change that we can watch. + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1a1test.PatchServiceImage(t, clients, objects.Service, image2); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image2, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + objects, err = v1a1test.GetResourceObjects(clients, names) + if err != nil { + t.Errorf("Error getting objects: %v", err) + } + + // Now we can validate the annotations. + if err := validateAnnotations(objects, "juicy"); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + + // Test annotation deletion. + desiredSvc = objects.Service.DeepCopy() + delete(desiredSvc.Annotations, "juicy") + if objects.Service, err = v1a1test.PatchService(t, clients, objects.Service, desiredSvc); err != nil { + t.Fatalf("Service %s was not updated with deleted annotation: %v", names.Service, err) + } + // Updating metadata does not trigger revision or generation + // change, so let's generate a change that we can watch. + t.Log("Updating the Service to use a different image.") + names.Image = test.HelloWorld + image3 := pkgTest.ImagePath(test.HelloWorld) + if _, err := v1a1test.PatchServiceImage(t, clients, objects.Service, image3); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image3, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + objects, err = v1a1test.GetResourceObjects(clients, names) + if err != nil { + t.Errorf("Error getting objects: %v", err) + } + + // Now we can validate the annotations. + if err := validateAnnotations(objects); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + if _, ok := objects.Config.Annotations["juicy"]; ok { + t.Error("Config still has `juicy` annotation") + } + if _, ok := objects.Route.Annotations["juicy"]; ok { + t.Error("Route still has `juicy` annotation") + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/single_threaded_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/single_threaded_test.go new file mode 100644 index 0000000000..d81d5bb21f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/single_threaded_test.go @@ -0,0 +1,108 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + "time" + + "golang.org/x/sync/errgroup" + + pkgTest "knative.dev/pkg/test" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestSingleConcurrency(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.SingleThreadedImage, + } + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithContainerConcurrency(1)) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + url := objects.Service.Status.URL.URL() + + // Ready does not actually mean Ready for a Route just yet. + // See https://github.com/knative/serving/issues/1582 + t.Logf("Probing %s", url) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", url, err) + } + + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating spoofing client: %v", err) + } + + concurrency := 5 + duration := 20 * time.Second + t.Logf("Maintaining %d concurrent requests for %v.", concurrency, duration) + group, _ := errgroup.WithContext(context.Background()) + for i := 0; i < concurrency; i++ { + group.Go(func() error { + done := time.After(duration) + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return fmt.Errorf("error creating http request: %w", err) + } + + for { + select { + case <-done: + return nil + default: + res, err := client.Do(req) + if err != nil { + return fmt.Errorf("error making request %w", err) + } + if res.StatusCode == http.StatusInternalServerError { + return errors.New("detected concurrent requests") + } else if res.StatusCode != http.StatusOK { + return fmt.Errorf("non 200 response %v", res.StatusCode) + } + } + } + }) + } + t.Log("Waiting for all requests to complete.") + if err := group.Wait(); err != nil { + t.Fatalf("Error making requests for single threaded test: %v.", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/util.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/util.go new file mode 100644 index 0000000000..fb517e5d14 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/util.go @@ -0,0 +1,339 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "math" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "golang.org/x/sync/errgroup" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func waitForExpectedResponse(t pkgTest.TLegacy, clients *test.Clients, url *url.URL, expectedResponse string) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + return err + } + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return err + } + _, err = client.Poll(req, v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedResponse)))) + return err +} + +func validateDomains(t pkgTest.TLegacy, clients *test.Clients, baseDomain *url.URL, + baseExpected, trafficTargets, targetsExpected []string) error { + var subdomains []*url.URL + for _, target := range trafficTargets { + subdomain, _ := url.Parse(baseDomain.String()) + subdomain.Host = target + "-" + baseDomain.Host + subdomains = append(subdomains, subdomain) + } + + g, _ := errgroup.WithContext(context.Background()) + // We don't have a good way to check if the route is updated so we will wait until a subdomain has + // started returning at least one expected result to key that we should validate percentage splits. + // In order for tests to succeed reliably, we need to make sure that all domains succeed. + for _, resp := range baseExpected { + // Check for each of the responses we expect from the base domain. + resp := resp + g.Go(func() error { + t.Logf("Waiting for route to update %s", baseDomain) + return waitForExpectedResponse(t, clients, baseDomain, resp) + }) + } + for i, s := range subdomains { + i, s := i, s + g.Go(func() error { + t.Logf("Waiting for route to update %s", s) + return waitForExpectedResponse(t, clients, s, targetsExpected[i]) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("error with initial domain probing: %w", err) + } + + g.Go(func() error { + minBasePercentage := test.MinSplitPercentage + if len(baseExpected) == 1 { + minBasePercentage = test.MinDirectPercentage + } + min := int(math.Floor(test.ConcurrentRequests * minBasePercentage)) + return checkDistribution(t, clients, baseDomain, test.ConcurrentRequests, min, baseExpected) + }) + for i, subdomain := range subdomains { + i, subdomain := i, subdomain + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, subdomain, test.ConcurrentRequests, min, []string{targetsExpected[i]}) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("error checking routing distribution: %w", err) + } + return nil +} + +// checkDistribution sends "num" requests to "domain", then validates that +// we see each body in "expectedResponses" at least "min" times. +func checkDistribution(t pkgTest.TLegacy, clients *test.Clients, url *url.URL, num, min int, expectedResponses []string) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + return err + } + + t.Logf("Performing %d concurrent requests to %s", num, url) + actualResponses, err := sendRequests(client, url, num) + if err != nil { + return err + } + + return checkResponses(t, num, min, url.Hostname(), expectedResponses, actualResponses) +} + +// checkResponses verifies that each "expectedResponse" is present in "actualResponses" at least "min" times. +func checkResponses(t pkgTest.TLegacy, num int, min int, domain string, expectedResponses []string, actualResponses []string) error { + // counts maps the expected response body to the number of matching requests we saw. + counts := make(map[string]int) + // badCounts maps the unexpected response body to the number of matching requests we saw. + badCounts := make(map[string]int) + + // counts := eval( + // SELECT body, count(*) AS total + // FROM $actualResponses + // WHERE body IN $expectedResponses + // GROUP BY body + // ) + for _, ar := range actualResponses { + expected := false + for _, er := range expectedResponses { + if strings.Contains(ar, er) { + counts[er]++ + expected = true + } + } + if !expected { + badCounts[ar]++ + } + } + + // Verify that we saw each entry in "expectedResponses" at least "min" times. + // check(SELECT body FROM $counts WHERE total < $min) + totalMatches := 0 + for _, er := range expectedResponses { + count := counts[er] + if count < min { + return fmt.Errorf("domain %s failed: want at least %d, got %d for response %q", domain, min, count, er) + } + + t.Logf("For domain %s: wanted at least %d, got %d requests.", domain, min, count) + totalMatches += count + } + // Verify that the total expected responses match the number of requests made. + for badResponse, count := range badCounts { + t.Logf("Saw unexpected response %q %d times.", badResponse, count) + } + if totalMatches < num { + return fmt.Errorf("domain %s: saw expected responses %d times, wanted %d", domain, totalMatches, num) + } + // If we made it here, the implementation conforms. Congratulations! + return nil +} + +// sendRequests sends "num" requests to "url", returning a string for each spoof.Response.Body. +func sendRequests(client spoof.Interface, url *url.URL, num int) ([]string, error) { + responses := make([]string, num) + + // Launch "num" requests, recording the responses we get in "responses". + g, _ := errgroup.WithContext(context.Background()) + for i := 0; i < num; i++ { + // We don't index into "responses" inside the goroutine to avoid a race, see #1545. + result := &responses[i] + g.Go(func() error { + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return err + } + + resp, err := client.Do(req) + if err != nil { + return err + } + + *result = string(resp.Body) + return nil + }) + } + return responses, g.Wait() +} + +// Validates service health and vended content match for a runLatest Service. +// The checks in this method should be able to be performed at any point in a +// runLatest Service's lifecycle so long as the service is in a "Ready" state. +func validateRunLatestDataPlane(t pkgTest.TLegacy, clients *test.Clients, names test.ResourceNames, expectedText string) error { + t.Logf("Checking that the endpoint vends the expected text: %s", expectedText) + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + names.URL, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", names.Route, names.URL.String(), expectedText, err) + } + + return nil +} + +// Validates the state of Configuration, Revision, and Route objects for a runLatest Service. +// The checks in this method should be able to be performed at any point in a +// runLatest Service's lifecycle so long as the service is in a "Ready" state. +func validateRunLatestControlPlane(t pkgTest.T, clients *test.Clients, names test.ResourceNames, expectedGeneration string) error { + t.Log("Checking to ensure Revision is in desired state with", "generation", expectedGeneration) + err := v1a1test.CheckRevisionState(clients.ServingAlphaClient, names.Revision, func(r *v1alpha1.Revision) (bool, error) { + if ready, err := v1a1test.IsRevisionReady(r); !ready { + return false, fmt.Errorf("revision %s did not become ready to serve traffic: %w", names.Revision, err) + } + if r.Status.ImageDigest == "" { + return false, fmt.Errorf("imageDigest not present for revision %s", names.Revision) + } + if validDigest, err := validateImageDigest(names.Image, r.Status.ImageDigest); !validDigest { + return false, fmt.Errorf("imageDigest %s is not valid for imageName %s: %w", r.Status.ImageDigest, names.Image, err) + } + return true, nil + }) + if err != nil { + return err + } + err = v1a1test.CheckRevisionState(clients.ServingAlphaClient, names.Revision, v1a1test.IsRevisionAtExpectedGeneration(expectedGeneration)) + if err != nil { + return fmt.Errorf("revision %s did not have an expected annotation with generation %s: %w", names.Revision, expectedGeneration, err) + } + + t.Log("Checking to ensure Configuration is in desired state.") + err = v1a1test.CheckConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + return false, fmt.Errorf("the Configuration %s was not updated indicating that the Revision %s was created: %w", names.Config, names.Revision, err) + } + if c.Status.LatestReadyRevisionName != names.Revision { + return false, fmt.Errorf("the Configuration %s was not updated indicating that the Revision %s was ready: %w", names.Config, names.Revision, err) + } + return true, nil + }) + if err != nil { + return err + } + + t.Log("Checking to ensure Route is in desired state with", "generation", expectedGeneration) + err = v1a1test.CheckRouteState(clients.ServingAlphaClient, names.Route, v1a1test.AllRouteTrafficAtRevision(names)) + if err != nil { + return fmt.Errorf("the Route %s was not updated to route traffic to the Revision %s: %w", names.Route, names.Revision, err) + } + + return nil +} + +// Validates labels on Revision, Configuration, and Route objects when created by a Service +// see spec here: https://github.com/knative/serving/blob/master/docs/spec/spec.md#revision +func validateLabelsPropagation(t pkgTest.T, objects v1a1test.ResourceObjects, names test.ResourceNames) error { + t.Log("Validate Labels on Revision Object") + revision := objects.Revision + + if revision.Labels["serving.knative.dev/configuration"] != names.Config { + return fmt.Errorf("expect Confguration name in Revision label %q but got %q ", names.Config, revision.Labels["serving.knative.dev/configuration"]) + } + if revision.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Revision label %q but got %q ", names.Service, revision.Labels["serving.knative.dev/service"]) + } + + t.Log("Validate Labels on Configuration Object") + config := objects.Config + if config.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Configuration label %q but got %q ", names.Service, config.Labels["serving.knative.dev/service"]) + } + if config.Labels["serving.knative.dev/route"] != names.Route { + return fmt.Errorf("expect Route name in Configuration label %q but got %q ", names.Route, config.Labels["serving.knative.dev/route"]) + } + + t.Log("Validate Labels on Route Object") + route := objects.Route + if route.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Route label %q but got %q ", names.Service, route.Labels["serving.knative.dev/service"]) + } + return nil +} + +func validateAnnotations(objs *v1a1test.ResourceObjects, extraKeys ...string) error { + // This checks whether the annotations are set on the resources that + // expect them to have. + // List of issues listing annotations that we check: #1642. + + anns := objs.Service.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("service expected %s annotation to be set, but was empty", a) + } + } + anns = objs.Route.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("route expected %s annotation to be set, but was empty", a) + } + } + anns = objs.Config.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("config expected %s annotation to be set, but was empty", a) + } + } + return nil +} + +func validateReleaseServiceShape(objs *v1a1test.ResourceObjects) error { + // Traffic should be routed to the lastest created revision. + if got, want := objs.Service.Status.Traffic[0].RevisionName, objs.Config.Status.LatestReadyRevisionName; got != want { + return fmt.Errorf("Status.Traffic[0].RevisionsName = %s, want: %s", got, want) + } + return nil +} + +func validateImageDigest(imageName string, imageDigest string) (bool, error) { + ref, err := name.ParseReference(pkgTest.ImagePath(imageName)) + if err != nil { + return false, err + } + + digest, err := name.NewDigest(imageDigest) + if err != nil { + return false, err + } + + return ref.Context().String() == digest.Context().String(), nil +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/volumes_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/volumes_test.go new file mode 100644 index 0000000000..5dd5ab6506 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1alpha1/volumes_test.go @@ -0,0 +1,429 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "path" + "path/filepath" + "testing" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// TestConfigMapVolume tests that we echo back the appropriate text from the ConfigMap volume. +func TestConfigMapVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.HelloVolume, + } + + text := test.AppendRandomString("hello-volumes-") + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Delete(configMap.Name, nil); err != nil { + t.Errorf("ConfigMaps().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Optional: ptr.Bool(false), + }, + }) + + withOptionalBadVolume := WithVolume("blah", "/does/not/matter", corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "does-not-exist", + }, + Optional: ptr.Bool(true), + }, + }) + + // Setup initial Service + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withVolume, withOptionalBadVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateRunLatestDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedConfigMapVolume tests that we echo back the appropriate text from the ConfigMap volume. +func TestProjectedConfigMapVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text := test.AppendRandomString("hello-volumes-") + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Delete(configMap.Name, nil); err != nil { + t.Errorf("ConfigMaps().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Optional: ptr.Bool(false), + }, + }, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "does-not-matter", + }, + Optional: ptr.Bool(true), + }, + }}, + }, + }) + + // Setup initial Service + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateRunLatestDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestSecretVolume tests that we echo back the appropriate text from the Secret volume. +func TestSecretVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.HelloVolume, + } + + text := test.ObjectNameForTest(t) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.Name, + Optional: ptr.Bool(false), + }, + }) + + withOptionalBadVolume := WithVolume("blah", "/does/not/matter", corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "does-not-exist", + Optional: ptr.Bool(true), + }, + }) + + // Setup initial Service + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withVolume, withOptionalBadVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateRunLatestDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedSecretVolume tests that we echo back the appropriate text from the Secret volume. +func TestProjectedSecretVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text := test.ObjectNameForTest(t) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Optional: ptr.Bool(false), + }, + }}, + }, + }) + withSubpath := func(svc *v1alpha1.Service) { + vm := &svc.Spec.Template.Spec.Containers[0].VolumeMounts[0] + vm.MountPath = test.HelloVolumePath + vm.SubPath = filepath.Base(test.HelloVolumePath) + } + + // Setup initial Service + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withVolume, withSubpath); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateRunLatestDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedComplex tests that we echo back the appropriate text from the complex Projected volume. +func TestProjectedComplex(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text1 := test.ObjectNameForTest(t) + text2 := test.ObjectNameForTest(t) + text3 := test.ObjectNameForTest(t) + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text1, + "other": text2, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text3, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Items: []corev1.KeyToPath{{ + Key: "other", + Path: "another", + }}, + Optional: ptr.Bool(false), + }, + }, { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Items: []corev1.KeyToPath{{ + Key: filepath.Base(test.HelloVolumePath), + Path: filepath.Base(test.HelloVolumePath), + }}, + Optional: ptr.Bool(false), + }, + }}, + }, + }) + + // Setup initial Service + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateRunLatestControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + // Observation shows that when keys collide, the last source listed wins, + // so for the main key, we should get back text3 (vs. text1) + if err = validateRunLatestDataPlane(t, clients, names, text3); err != nil { + t.Error(err) + } + + // Verify that we get multiple files mounted in, in this case from the + // second source, which was partially shadowed in our check above. + names.URL.Path = path.Join(names.URL.Path, "another") + if err = validateRunLatestDataPlane(t, clients, names, text2); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/blue_green_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/blue_green_test.go new file mode 100644 index 0000000000..6d60795a36 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/blue_green_test.go @@ -0,0 +1,164 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "math" + "net/url" + "testing" + + "golang.org/x/sync/errgroup" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + rtesting "knative.dev/serving/pkg/testing/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" +) + +const ( + // This test uses the two pizza planet test images for the blue and green deployment. + expectedBlue = test.PizzaPlanetText1 + expectedGreen = test.PizzaPlanetText2 +) + +// TestBlueGreenRoute verifies that a route configured with a 50/50 traffic split +// between two revisions will (approximately) route traffic evenly between them. +// Also, traffic that targets revisions *directly* will be routed to the correct +// revision 100% of the time. +func TestBlueGreenRoute(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + imagePaths := []string{ + pkgTest.ImagePath(test.PizzaPlanet1), + pkgTest.ImagePath(test.PizzaPlanet2), + } + + // Set Service and Image for names to create the initial service + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Setup Initial Service + t.Log("Creating a new Service in runLatest") + objects, err := v1b1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + // The first revision created is "blue" + blue := names + blue.TrafficTarget = "blue" + green := names + green.TrafficTarget = "green" + + t.Log("Updating the Service to use a different image") + service, err := v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(imagePaths[1])) + if err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, imagePaths[1], err) + } + objects.Service = service + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + green.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision for image %s: %v", names.Service, imagePaths[1], err) + } + + t.Log("Updating RouteSpec") + if _, err := v1b1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: blue.TrafficTarget, + RevisionName: blue.Revision, + Percent: ptr.Int64(50), + }, { + Tag: green.TrafficTarget, + RevisionName: green.Revision, + Percent: ptr.Int64(50), + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + t.Log("Wait for the service domains to be ready") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err = clients.ServingBetaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + var blueURL, greenURL *url.URL + for _, tt := range service.Status.Traffic { + if tt.Tag == blue.TrafficTarget { + blueURL = tt.URL.URL() + } + if tt.Tag == green.TrafficTarget { + greenURL = tt.URL.URL() + } + } + if blueURL == nil || greenURL == nil { + t.Fatalf("Unable to fetch URLs from traffic targets: %#v", service.Status.Traffic) + } + tealURL := service.Status.URL.URL() + + // Istio network programming takes some time to be effective. Currently Istio + // does not expose a Status, so we rely on probes to know when they are effective. + // Since we are updating the service the teal domain probe will succeed before our changes + // take effect so we probe the green domain. + t.Logf("Probing %s", greenURL) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + greenURL, + v1b1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", greenURL, err) + } + + // Send concurrentRequests to blueDomain, greenDomain, and tealDomain. + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinSplitPercentage)) + return checkDistribution(t, clients, tealURL, test.ConcurrentRequests, min, []string{expectedBlue, expectedGreen}) + }) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, blueURL, test.ConcurrentRequests, min, []string{expectedBlue}) + }) + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, greenURL, test.ConcurrentRequests, min, []string{expectedGreen}) + }) + if err := g.Wait(); err != nil { + t.Fatalf("Error sending requests: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/configuration_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/configuration_test.go new file mode 100644 index 0000000000..6d450b75bc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/configuration_test.go @@ -0,0 +1,168 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "reflect" + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" +) + +func TestUpdateConfigurationMetadata(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Config: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating new configuration %s", names.Config) + if _, err := v1b1test.CreateConfiguration(t, clients, names); err != nil { + t.Fatalf("Failed to create configuration %s", names.Config) + } + + // Wait for the configuration to actually be ready to not race in the updates below. + if err := v1b1test.WaitForConfigurationState(clients.ServingBetaClient, names.Config, v1b1test.IsConfigurationReady, "ConfigurationIsReady"); err != nil { + t.Fatalf("Configuration %s did not become ready: %v", names.Config, err) + } + + cfg := fetchConfiguration(names.Config, clients, t) + names.Revision = cfg.Status.LatestReadyRevisionName + + t.Logf("Updating labels of Configuration %s", names.Config) + newLabels := map[string]string{ + "labelX": "abc", + "labelY": "def", + } + // Copy over new labels. + if cfg.Labels == nil { + cfg.Labels = newLabels + } else { + for k, v := range newLabels { + cfg.Labels[k] = v + } + } + cfg, err := clients.ServingBetaClient.Configs.Update(cfg) + if err != nil { + t.Fatalf("Failed to update labels for Configuration %s: %v", names.Config, err) + } + + if err = waitForConfigurationLabelsUpdate(clients, names, cfg.Labels); err != nil { + t.Fatalf("The labels for Configuration %s were not updated: %v", names.Config, err) + } + + cfg = fetchConfiguration(names.Config, clients, t) + expected := names.Revision + actual := cfg.Status.LatestCreatedRevisionName + if expected != actual { + t.Errorf("Did not expect a new Revision after updating labels for Configuration %s - expected Revision: %s, actual Revision: %s", + names.Config, expected, actual) + } + + t.Logf("Validating labels were not propagated to Revision %s", names.Revision) + err = v1b1test.CheckRevisionState(clients.ServingBetaClient, names.Revision, func(r *v1beta1.Revision) (bool, error) { + // Labels we placed on Configuration should _not_ appear on Revision. + return checkNoKeysPresent(newLabels, r.Labels, t), nil + }) + if err != nil { + t.Errorf("The labels for Revision %s of Configuration %s should not have been updated: %v", names.Revision, names.Config, err) + } + + t.Logf("Updating annotations of Configuration %s", names.Config) + newAnnotations := map[string]string{ + "annotationA": "123", + "annotationB": "456", + } + if cfg.Annotations == nil { + cfg.Annotations = newAnnotations + } else { + // Copy over new annotations. + for k, v := range newAnnotations { + cfg.Annotations[k] = v + } + } + cfg, err = clients.ServingBetaClient.Configs.Update(cfg) + if err != nil { + t.Fatalf("Failed to update annotations for Configuration %s: %v", names.Config, err) + } + + if err = waitForConfigurationAnnotationsUpdate(clients, names, cfg.Annotations); err != nil { + t.Fatalf("The annotations for Configuration %s were not updated: %v", names.Config, err) + } + + cfg = fetchConfiguration(names.Config, clients, t) + actual = cfg.Status.LatestCreatedRevisionName + if expected != actual { + t.Errorf("Did not expect a new Revision after updating annotations for Configuration %s - expected Revision: %s, actual Revision: %s", + names.Config, expected, actual) + } + + t.Logf("Validating annotations were not propagated to Revision %s", names.Revision) + err = v1b1test.CheckRevisionState(clients.ServingBetaClient, names.Revision, func(r *v1beta1.Revision) (bool, error) { + // Annotations we placed on Configuration should _not_ appear on Revision. + return checkNoKeysPresent(newAnnotations, r.Annotations, t), nil + }) + if err != nil { + t.Errorf("The annotations for Revision %s of Configuration %s should not have been updated: %v", names.Revision, names.Config, err) + } +} + +func fetchConfiguration(name string, clients *test.Clients, t *testing.T) *v1beta1.Configuration { + cfg, err := clients.ServingBetaClient.Configs.Get(name, v1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get configuration %s: %v", name, err) + } + return cfg +} + +func waitForConfigurationLabelsUpdate(clients *test.Clients, names test.ResourceNames, labels map[string]string) error { + return v1b1test.WaitForConfigurationState(clients.ServingBetaClient, names.Config, func(c *v1beta1.Configuration) (bool, error) { + return reflect.DeepEqual(c.Labels, labels) && c.Generation == c.Status.ObservedGeneration, nil + }, "ConfigurationMetadataUpdatedWithLabels") +} + +func waitForConfigurationAnnotationsUpdate(clients *test.Clients, names test.ResourceNames, annotations map[string]string) error { + return v1b1test.WaitForConfigurationState(clients.ServingBetaClient, names.Config, func(c *v1beta1.Configuration) (bool, error) { + return reflect.DeepEqual(c.Annotations, annotations) && c.Generation == c.Status.ObservedGeneration, nil + }, "ConfigurationMetadataUpdatedWithAnnotations") +} + +// checkNoKeysPresent returns true if _no_ keys from `expected`, are present in `actual`. +// checkNoKeysPresent will log the offending keys to t.Log. +func checkNoKeysPresent(expected map[string]string, actual map[string]string, t *testing.T) bool { + t.Helper() + present := []string{} + for k := range expected { + if _, ok := actual[k]; ok { + present = append(present, k) + } + } + if len(present) != 0 { + t.Logf("Unexpected keys: %v", present) + } + return len(present) == 0 +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/errorcondition_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/errorcondition_test.go new file mode 100644 index 0000000000..a51f3a3388 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/errorcondition_test.go @@ -0,0 +1,231 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ptest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1beta1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + rtesting "knative.dev/serving/pkg/testing/v1beta1" +) + +const ( + containerMissing = "ContainerMissing" +) + +// TestContainerErrorMsg is to validate the error condition defined at +// https://github.com/knative/serving/blob/master/docs/spec/errors.md +// for the container image missing scenario. +func TestContainerErrorMsg(t *testing.T) { + t.Parallel() + if strings.HasSuffix(strings.Split(ptest.Flags.DockerRepo, "/")[0], ".local") { + t.Skip("Skipping for local docker repo") + } + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.InvalidHelloWorld, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Specify an invalid image path + // A valid DockerRepo is still needed, otherwise will get UNAUTHORIZED instead of container missing error + t.Logf("Creating a new Service %s", names.Service) + svc, err := createService(t, clients, names, 2) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + + names.Config = serviceresourcenames.Configuration(svc) + names.Route = serviceresourcenames.Route(svc) + + manifestUnknown := string(transport.ManifestUnknownErrorCode) + t.Log("When the imagepath is invalid, the Configuration should have error status.") + + // Wait for ServiceState becomes NotReady. It also waits for the creation of Configuration. + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceNotReady, "ServiceIsNotReady"); err != nil { + t.Fatalf("The Service %s was unexpected state: %v", names.Service, err) + } + + // Checking for "Container image not present in repository" scenario defined in error condition spec + err = v1b1test.WaitForConfigurationState(clients.ServingBetaClient, names.Config, func(r *v1beta1.Configuration) (bool, error) { + cond := r.Status.GetCondition(v1beta1.ConfigurationConditionReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, manifestUnknown) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("The configuration %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, containerMissing, manifestUnknown, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ContainerImageNotPresent") + + if err != nil { + t.Fatalf("Failed to validate configuration state: %s", err) + } + + revisionName, err := getRevisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the imagepath is invalid, the revision should have error status.") + err = v1b1test.WaitForRevisionState(clients.ServingBetaClient, revisionName, func(r *v1beta1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1beta1.RevisionConditionReady) + if cond != nil { + if cond.Reason == containerMissing && strings.Contains(cond.Message, manifestUnknown) { + return true, nil + } + return true, fmt.Errorf("The revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, containerMissing, manifestUnknown, cond.Reason, cond.Message) + } + return false, nil + }, "ImagePathInvalid") + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } + + t.Log("Checking to ensure Route is in desired state") + err = v1b1test.CheckRouteState(clients.ServingBetaClient, names.Route, v1b1test.IsRouteNotReady) + if err != nil { + t.Fatalf("the Route %s was not desired state: %v", names.Route, err) + } +} + +// TestContainerExitingMsg is to validate the error condition defined at +// https://github.com/knative/serving/blob/master/docs/spec/errors.md +// for the container crashing scenario. +func TestContainerExitingMsg(t *testing.T) { + t.Parallel() + const ( + // The given image will always exit with an exit code of 5 + exitCodeReason = "ExitCode5" + // ... and will print "Crashed..." before it exits + errorLog = "Crashed..." + ) + + tests := []struct { + Name string + ReadinessProbe *corev1.Probe + }{{ + Name: "http", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + }, { + Name: "tcp", + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + }, + }} + + for _, tt := range tests { + tt := tt + t.Run(tt.Name, func(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Config: test.ObjectNameForTest(t), + Image: test.Failing, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Configuration %s", names.Config) + + if _, err := v1b1test.CreateConfiguration(t, clients, names, rtesting.WithConfigReadinessProbe(tt.ReadinessProbe)); err != nil { + t.Fatalf("Failed to create configuration %s: %v", names.Config, err) + } + + t.Log("When the containers keep crashing, the Configuration should have error status.") + + err := v1b1test.WaitForConfigurationState(clients.ServingBetaClient, names.Config, func(r *v1beta1.Configuration) (bool, error) { + cond := r.Status.GetCondition(v1beta1.ConfigurationConditionReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, errorLog) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("The configuration %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, containerMissing, errorLog, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ConfigContainersCrashing") + + if err != nil { + t.Fatalf("Failed to validate configuration state: %s", err) + } + + revisionName, err := getRevisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the containers keep crashing, the revision should have error status.") + err = v1b1test.WaitForRevisionState(clients.ServingBetaClient, revisionName, func(r *v1beta1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1beta1.RevisionConditionReady) + if cond != nil { + if cond.Reason == exitCodeReason && strings.Contains(cond.Message, errorLog) { + return true, nil + } + return true, fmt.Errorf("The revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, exitCodeReason, errorLog, cond.Reason, cond.Message) + } + return false, nil + }, "RevisionContainersCrashing") + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } + }) + } +} + +// Get revision name from configuration. +func getRevisionFromConfiguration(clients *test.Clients, configName string) (string, error) { + config, err := clients.ServingBetaClient.Configs.Get(configName, metav1.GetOptions{}) + if err != nil { + return "", err + } + if config.Status.LatestCreatedRevisionName != "" { + return config.Status.LatestCreatedRevisionName, nil + } + return "", fmt.Errorf("No valid revision name found in configuration %s", configName) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/generatename_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/generatename_test.go new file mode 100644 index 0000000000..680110b707 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/generatename_test.go @@ -0,0 +1,204 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "net/url" + "regexp" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1beta1" + rtesting "knative.dev/serving/pkg/testing/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" +) + +func setServiceGenerateName(generateName string) rtesting.ServiceOption { + return func(service *v1beta1.Service) { + service.ObjectMeta.GenerateName = generateName + } +} + +func setConfigurationGenerateName(generateName string) rtesting.ConfigOption { + return func(config *v1beta1.Configuration) { + config.ObjectMeta.GenerateName = generateName + } +} + +func setRouteGenerateName(generateName string) rtesting.RouteOption { + return func(route *v1beta1.Route) { + route.ObjectMeta.GenerateName = generateName + } +} + +// generateNamePrefix returns the object name to be used for testing, shorted to +// 44 characters to avoid #3236, as generateNames longer than 44 characters may cause +// some knative resources to never become ready. +func generateNamePrefix(t *testing.T) string { + generateName := test.ObjectNameForTest(t) + "-" + + generateNameLength := len(generateName) + if generateNameLength > 44 { + generateNameLength = 44 + } + return generateName[0:generateNameLength] +} + +// validateName checks that a name generated using a generateName is valid. It checks +// 1. The generateName is a prefix of the name, but they are not equal +// 2. Any number of valid name characters (alphanumeric, -, and .) are added togenerateName to +// create the value of name. +func validateName(generateName, name string) error { + r := regexp.MustCompile("^" + regexp.QuoteMeta(generateName) + "[a-zA-Z0-9\\-.]+$") + + if !r.MatchString(name) { + return fmt.Errorf("generated name = %q, want to match %q", name, r.String()) + } + return nil +} + +func canServeRequests(t *testing.T, clients *test.Clients, route *v1beta1.Route) error { + t.Logf("Route %s has a domain set in its status", route.Name) + var url *url.URL + err := v1b1test.WaitForRouteState( + clients.ServingBetaClient, + route.Name, + func(r *v1beta1.Route) (bool, error) { + url = r.Status.URL.URL() + return url != nil, nil + }, + "RouteDomain", + ) + if err != nil { + return fmt.Errorf("route did not get assigned a domain: %w", err) + } + + t.Logf("Route %s can serve the expected data at %s", route.Name, url) + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1b1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", route.Name, url, test.HelloWorldText, err) + } + + return nil +} + +// TestServiceGenerateName checks that knative Services MAY request names generated by +// the system using metadata.generateName. It ensures that knative Services created this way can become ready +// and serve requests. +func TestServiceGenerateName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + generateName := generateNamePrefix(t) + names := test.ResourceNames{ + Image: test.HelloWorld, + } + + // Cleanup on test failure. + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer func() { test.TearDown(clients, names) }() + + // Create the service using the generate name field. If the serivce does not become ready this will fail. + t.Logf("Creating new service with generateName %s", generateName) + resources, err := v1b1test.CreateServiceReady(t, clients, &names, setServiceGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create service with generateName %s: %v", generateName, err) + } + + // Ensure that the name given to the service is generated from the generateName field. + t.Log("When the service is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Service); err != nil { + t.Errorf("Illegal name generated for service %s: %v", names.Service, err) + } + + // Ensure that the service can serve requests + err = canServeRequests(t, clients, resources.Route) + if err != nil { + t.Errorf("Service %s could not serve requests: %v", names.Service, err) + } +} + +// TestRouteAndConfiguration checks that both routes and configurations MAY request names generated by +// the system using metadata.generateName. It ensures that routes and configurations created this way both: +// 1. Become ready +// 2. Can serve requests. +func TestRouteAndConfigGenerateName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + generateName := generateNamePrefix(t) + names := test.ResourceNames{ + Image: test.HelloWorld, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer func() { test.TearDown(clients, names) }() + + t.Logf("Creating new configuration with generateName %s", generateName) + config, err := v1b1test.CreateConfiguration(t, clients, names, setConfigurationGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create configuration with generateName %s: %v", generateName, err) + } + names.Config = config.Name + + // Ensure the associated revision is created. This also checks that the configuration becomes ready. + t.Log("The configuration will be updated with the name of the associated Revision once it is created.") + names.Revision, err = v1b1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the new revision: %v", names.Config, err) + } + + // Ensure that the name given to the configuration is generated from the generate name field. + t.Log("When the configuration is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Config); err != nil { + t.Errorf("Illegal name generated for configuration %s: %v", names.Config, err) + } + + // Create a route that maps to the revision created by the configuration above + t.Logf("Create new Route with generateName %s", generateName) + route, err := v1b1test.CreateRoute(t, clients, names, setRouteGenerateName(generateName)) + if err != nil { + t.Fatalf("Failed to create route with generateName %s: %v", generateName, err) + } + names.Route = route.Name + + t.Log("When the route is created, it will become ready") + if err := v1b1test.WaitForRouteState(clients.ServingBetaClient, names.Route, v1b1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("Error waiting for the route %s to become ready: %v", names.Route, err) + } + + // Ensure that the name given to the route is generated from the generate name field + t.Log("When the route is created, the name is generated using the provided generateName") + if err := validateName(generateName, names.Route); err != nil { + t.Errorf("Illegal name generated for route %s: %v", names.Route, err) + } + + // Ensure that the generated route endpoint can serve requests + if err := canServeRequests(t, clients, route); err != nil { + t.Errorf("Configuration %s with Route %s could not serve requests: %v", names.Config, names.Route, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/main_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/main_test.go new file mode 100644 index 0000000000..6d4acc5264 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/main_test.go @@ -0,0 +1,33 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "flag" + "os" + "testing" + + pkgTest "knative.dev/pkg/test" +) + +func TestMain(m *testing.M) { + flag.Parse() + pkgTest.SetupLoggingFlags() + os.Exit(m.Run()) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/migration_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/migration_test.go new file mode 100644 index 0000000000..eee859f12a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/migration_test.go @@ -0,0 +1,117 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + "testing" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "knative.dev/pkg/test/logstream" + v1a1test "knative.dev/serving/test/v1alpha1" + + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" +) + +func TestTranslation(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + // Create a legacy RunLatest service. This should perform conversion during the webhook + // and return back a converted service resource. + service, err := v1a1test.CreateLatestServiceLegacy(t, clients, names) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + // Access the service over the v1beta1 endpoint. + v1b1, err := clients.ServingBetaClient.Services.Get(service.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get v1beta1.Service: %v: %v", names.Service, err) + } + + // Access the service over the v1 endpoint. + v1, err := clients.ServingClient.Services.Get(service.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get v1.Service: %v: %v", names.Service, err) + } + + // Check that all PodSpecs match + if !equality.Semantic.DeepEqual(v1b1.Spec.Template.Spec.PodSpec, service.Spec.Template.Spec.PodSpec) { + t.Fatalf("Failed to parse unstructured as v1beta1.Service: %v: %v", names.Service, err) + } + if !equality.Semantic.DeepEqual(v1.Spec.Template.Spec.PodSpec, service.Spec.Template.Spec.PodSpec) { + t.Fatalf("Failed to parse unstructured as v1.Service: %v: %v", names.Service, err) + } +} + +func TestV1beta1Rejection(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + // Create a legacy RunLatest service, but give it the TypeMeta of v1beta1. + service := v1a1test.LatestServiceLegacy(names) + service.APIVersion = v1beta1.SchemeGroupVersion.String() + service.Kind = "Service" + + // Turn it into an unstructured resource for sending through the dynamic client. + b, err := json.Marshal(service) + if err != nil { + t.Fatalf("Failed to marshal v1alpha1.Service: %v: %v", names.Service, err) + } + u := &unstructured.Unstructured{} + if err := json.Unmarshal(b, u); err != nil { + t.Fatalf("Failed to unmarshal as unstructured: %v: %v", names.Service, err) + } + + // Try to create the "run latest" service through v1beta1. + gvr := v1beta1.SchemeGroupVersion.WithResource("services") + svc, err := clients.Dynamic.Resource(gvr).Namespace(service.Namespace). + Create(u, metav1.CreateOptions{}) + if err == nil { + t.Fatalf("Unexpected success creating %#v", svc) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/resources_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/resources_test.go new file mode 100644 index 0000000000..7e1fb67108 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/resources_test.go @@ -0,0 +1,117 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + rtesting "knative.dev/serving/pkg/testing/v1beta1" +) + +func TestCustomResourcesLimits(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + t.Log("Creating a new Route and Configuration") + withResources := rtesting.WithResourceRequirements(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("350Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("350Mi"), + }, + }) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Autoscale, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + objects, err := v1b1test.CreateServiceReady(t, clients, &names, withResources) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + endpoint := objects.Route.Status.URL.URL() + + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + endpoint, + v1b1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK)), + "ResourceTestServesText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error probing %s: %v", endpoint, err) + } + + sendPostRequest := func(resolvableDomain bool, url *url.URL) (*spoof.Response, error) { + t.Logf("Request %s", url) + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), resolvableDomain) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url.String(), nil) + if err != nil { + return nil, err + } + return client.Do(req) + } + + pokeCowForMB := func(mb int) error { + u, _ := url.Parse(endpoint.String()) + q := u.Query() + q.Set("bloat", fmt.Sprintf("%d", mb)) + u.RawQuery = q.Encode() + response, err := sendPostRequest(test.ServingFlags.ResolvableDomain, u) + if err != nil { + return err + } + if response.StatusCode != http.StatusOK { + return fmt.Errorf("StatusCode = %d, want %d", response.StatusCode, http.StatusOK) + } + return nil + } + + t.Log("Querying the application to see if the memory limits are enforced.") + if err := pokeCowForMB(100); err != nil { + t.Fatalf("Didn't get a response from bloating cow with %d MBs of Memory: %v", 100, err) + } + + if err := pokeCowForMB(200); err != nil { + t.Fatalf("Didn't get a response from bloating cow with %d MBs of Memory: %v", 200, err) + } + + if err := pokeCowForMB(500); err == nil { + t.Fatalf("We shouldn't have got a response from bloating cow with %d MBs of Memory: %v", 500, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/revision_timeout_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/revision_timeout_test.go new file mode 100644 index 0000000000..89abae5d17 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/revision_timeout_test.go @@ -0,0 +1,242 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "github.com/mattbaird/jsonpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + . "knative.dev/serving/pkg/testing/v1beta1" +) + +// createService creates a service in namespace with the name names.Service +// that uses the image specified by names.Image +func createService(t *testing.T, clients *test.Clients, names test.ResourceNames, revisionTimeoutSeconds int64) (*v1beta1.Service, error) { + service := v1b1test.Service(names, WithRevisionTimeoutSeconds(revisionTimeoutSeconds)) + v1b1test.LogResourceObject(t, v1b1test.ResourceObjects{Service: service}) + svc, err := clients.ServingBetaClient.Services.Create(service) + return svc, err +} + +func updateServiceWithTimeout(clients *test.Clients, names test.ResourceNames, revisionTimeoutSeconds int) error { + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/spec/template/spec/timeoutSeconds", + Value: revisionTimeoutSeconds, + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + return err + } + _, err = clients.ServingBetaClient.Services.Patch(names.Service, types.JSONPatchType, patchBytes, "") + if err != nil { + return err + } + return nil +} + +// sendRequests send a request to "endpoint", returns error if unexpected response code, nil otherwise. +func sendRequest(t *testing.T, clients *test.Clients, endpoint *url.URL, initialSleepSeconds int, sleepSeconds int, expectedResponseCode int) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, endpoint.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Logf("Spoofing client failed: %v", err) + return err + } + + initialSleepMs := initialSleepSeconds * 1000 + sleepMs := sleepSeconds * 1000 + + start := time.Now().UnixNano() + defer func() { + end := time.Now().UnixNano() + t.Logf("URL: %v, initialSleep: %v, sleep: %v, request elapsed %.2f ms", endpoint, initialSleepMs, sleepMs, float64(end-start)/1e6) + }() + u, _ := url.Parse(endpoint.String()) + q := u.Query() + q.Set("initialTimeout", fmt.Sprintf("%d", initialSleepMs)) + q.Set("timeout", fmt.Sprintf("%d", sleepMs)) + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + t.Logf("Failed new request: %v", err) + return err + } + + resp, err := client.Do(req) + if err != nil { + t.Logf("Failed request err: %v", err) + return err + } + + t.Logf("Response status code: %v, expected: %v", resp.StatusCode, expectedResponseCode) + if expectedResponseCode != resp.StatusCode { + return fmt.Errorf("got response status code %v, wanted %v", resp.StatusCode, expectedResponseCode) + } + return nil +} + +func TestRevisionTimeout(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + var rev2s, rev5s test.ResourceNames + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Timeout, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service ") + svc, err := createService(t, clients, names, 2) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + t.Log("The Service will be updated with the name of the Revision once it is created") + revisionName, err := v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the new revision: %v", names.Service, err) + } + rev2s.Revision = revisionName + + t.Log("When the Service reports as Ready, everything should be ready") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic to Revision %s: %v", names.Service, names.Revision, err) + } + + t.Log("Updating the Service to use a different revision timeout") + err = updateServiceWithTimeout(clients, names, 5) + if err != nil { + t.Fatalf("Patch update for Service %s with new timeout 5s failed: %v", names.Service, err) + } + + // getNextRevisionName waits for names.Revision to change, so we set it to the rev2s revision and wait for the (new) rev5s revision. + names.Revision = rev2s.Revision + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + rev5s.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision with timeout 5s: %v", names.Service, err) + } + + t.Logf("Waiting for revision %q to be ready", rev2s.Revision) + if err := v1b1test.WaitForRevisionState(clients.ServingBetaClient, rev2s.Revision, v1b1test.IsRevisionReady, "RevisionIsReady"); err != nil { + t.Fatalf("The Revision %q still can't serve traffic: %v", rev2s.Revision, err) + } + t.Logf("Waiting for revision %q to be ready", rev5s.Revision) + if err := v1b1test.WaitForRevisionState(clients.ServingBetaClient, rev5s.Revision, v1b1test.IsRevisionReady, "RevisionIsReady"); err != nil { + t.Fatalf("The Revision %q still can't serve traffic: %v", rev5s.Revision, err) + } + + // Set names for traffic targets to make them directly routable. + rev2s.TrafficTarget = "rev2s" + rev5s.TrafficTarget = "rev5s" + + t.Log("Updating RouteSpec") + if _, err := v1b1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: rev2s.TrafficTarget, + RevisionName: rev2s.Revision, + Percent: ptr.Int64(50), + }, { + Tag: rev5s.TrafficTarget, + RevisionName: rev5s.Revision, + Percent: ptr.Int64(50), + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + t.Log("Wait for the service domains to be ready") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err := clients.ServingBetaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + var rev2sURL, rev5sURL *url.URL + for _, tt := range service.Status.Traffic { + if tt.Tag == rev2s.TrafficTarget { + rev2sURL = tt.URL.URL() + } + if tt.Tag == rev5s.TrafficTarget { + rev5sURL = tt.URL.URL() + } + } + if rev2sURL == nil || rev5sURL == nil { + t.Fatalf("Unable to fetch URLs from traffic targets: %#v", service.Status.Traffic) + } + + t.Logf("Probing %s", rev5sURL) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + rev5sURL, + v1b1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", rev5sURL, err) + } + + // Quick sanity check + if err := sendRequest(t, clients, rev2sURL, 0, 0, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 0s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 0, 0, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 0s with revision timeout 5s: %v", err) + } + + // Fail by surpassing the initial timeout. + if err := sendRequest(t, clients, rev2sURL, 5, 0, http.StatusServiceUnavailable); err != nil { + t.Errorf("Did not fail request with sleep 5s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 7, 0, http.StatusServiceUnavailable); err != nil { + t.Errorf("Did not fail request with sleep 7s with revision timeout 5s: %v", err) + } + + // Not fail by not surpassing in the initial timeout, but in the overall request duration. + if err := sendRequest(t, clients, rev2sURL, 1, 3, http.StatusOK); err != nil { + t.Errorf("Did not fail request with sleep 1s/3s with revision timeout 2s: %v", err) + } + if err := sendRequest(t, clients, rev5sURL, 3, 3, http.StatusOK); err != nil { + t.Errorf("Failed request with sleep 3s/3s with revision timeout 5s: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/route_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/route_test.go new file mode 100644 index 0000000000..c3ee9fb213 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/route_test.go @@ -0,0 +1,157 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "net/url" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + rtesting "knative.dev/serving/pkg/testing/v1beta1" +) + +func assertResourcesUpdatedWhenRevisionIsReady(t *testing.T, clients *test.Clients, names test.ResourceNames, url *url.URL, expectedGeneration, expectedText string) { + t.Log("When the Route reports as Ready, everything should be ready.") + if err := v1b1test.WaitForRouteState(clients.ServingBetaClient, names.Route, v1b1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("The Route %s was not marked as Ready to serve traffic to Revision %s: %v", names.Route, names.Revision, err) + } + + // TODO(#1178): Remove "Wait" from all checks below this point. + t.Log("Serves the expected data at the endpoint") + + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1b1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve the expected text %q: %v", names.Route, url, expectedText, err) + } + + // We want to verify that the endpoint works as soon as Ready: True, but there are a bunch of other pieces of state that we validate for conformance. + t.Log("The Revision will be marked as Ready when it can serve traffic") + err = v1b1test.CheckRevisionState(clients.ServingBetaClient, names.Revision, v1b1test.IsRevisionReady) + if err != nil { + t.Fatalf("Revision %s did not become ready to serve traffic: %v", names.Revision, err) + } + t.Log("The Revision will be annotated with the generation") + err = v1b1test.CheckRevisionState(clients.ServingBetaClient, names.Revision, v1b1test.IsRevisionAtExpectedGeneration(expectedGeneration)) + if err != nil { + t.Fatalf("Revision %s did not have an expected annotation with generation %s: %v", names.Revision, expectedGeneration, err) + } + t.Log("Updates the Configuration that the Revision is ready") + err = v1b1test.CheckConfigurationState(clients.ServingBetaClient, names.Config, func(c *v1beta1.Configuration) (bool, error) { + return c.Status.LatestReadyRevisionName == names.Revision, nil + }) + if err != nil { + t.Fatalf("The Configuration %s was not updated indicating that the Revision %s was ready: %v", names.Config, names.Revision, err) + } + t.Log("Updates the Route to route traffic to the Revision") + err = v1b1test.CheckRouteState(clients.ServingBetaClient, names.Route, v1b1test.AllRouteTrafficAtRevision(names)) + if err != nil { + t.Fatalf("The Route %s was not updated to route traffic to the Revision %s: %v", names.Route, names.Revision, err) + } +} + +func getRouteURL(clients *test.Clients, names test.ResourceNames) (*url.URL, error) { + var url *url.URL + + err := v1b1test.WaitForRouteState( + clients.ServingBetaClient, + names.Route, + func(r *v1beta1.Route) (bool, error) { + if r.Status.URL == nil { + return false, nil + } + url = r.Status.URL.URL() + return url != nil, nil + }, + "RouteURL", + ) + + return url, err +} + +func TestRouteCreation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + var objects v1b1test.ResourceObjects + svcName := test.ObjectNameForTest(t) + names := test.ResourceNames{ + Config: svcName, + Route: svcName, + TrafficTarget: svcName, + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Route and Configuration") + config, err := v1b1test.CreateConfiguration(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Configuration: %v", err) + } + objects.Config = config + + route, err := v1b1test.CreateRoute(t, clients, names) + if err != nil { + t.Fatalf("Failed to create Route: %v", err) + } + objects.Route = route + + t.Log("The Configuration will be updated with the name of the Revision") + names.Revision, err = v1b1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the new revision: %v", names.Config, err) + } + + url, err := getRouteURL(clients, names) + if err != nil { + t.Fatalf("Failed to get URL from route %s: %v", names.Route, err) + } + + t.Logf("The Route URL is: %s", url) + assertResourcesUpdatedWhenRevisionIsReady(t, clients, names, url, "1", test.PizzaPlanetText1) + + // We start a prober at background thread to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, url) + defer test.AssertProberDefault(t, prober) + + t.Log("Updating the Configuration to use a different image") + objects.Config, err = v1b1test.PatchConfig(t, clients, objects.Config, rtesting.WithConfigImage(pkgTest.ImagePath(test.PizzaPlanet2))) + if err != nil { + t.Fatalf("Patch update for Configuration %s with new image %s failed: %v", names.Config, test.PizzaPlanet2, err) + } + + t.Log("Since the Configuration was updated a new Revision will be created and the Configuration will be updated") + names.Revision, err = v1b1test.WaitForConfigLatestRevision(clients, names) + if err != nil { + t.Fatalf("Configuration %s was not updated with the Revision for image %s: %v", names.Config, test.PizzaPlanet2, err) + } + + assertResourcesUpdatedWhenRevisionIsReady(t, clients, names, url, "2", test.PizzaPlanetText2) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_account_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_account_test.go new file mode 100644 index 0000000000..99aef75f61 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_account_test.go @@ -0,0 +1,57 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "strings" + "testing" + + . "knative.dev/serving/pkg/testing/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" +) + +const ( + invalidServiceAccountName = "foo@bar.baz" +) + +func TestServiceAccountValidation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Service %s", names.Service) + service := v1b1test.Service(names, WithServiceAccountName(invalidServiceAccountName)) + v1b1test.LogResourceObject(t, v1b1test.ResourceObjects{Service: service}) + + _, err := clients.ServingBetaClient.Services.Create(service) + if err == nil { + t.Fatal("Expected Service creation to fail") + } + if got, want := err.Error(), "serviceAccountName: spec.template.spec."+invalidServiceAccountName; !strings.Contains(got, want) { + t.Errorf("Error = %q, want to contain = %q", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_test.go new file mode 100644 index 0000000000..2d229901ca --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/service_test.go @@ -0,0 +1,587 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + rtesting "knative.dev/serving/pkg/testing/v1beta1" +) + +// TestService tests both Creation and Update paths for a service. The test performs a series of Update/Validate steps to ensure that +// the service transitions as expected during each step. +// Currently the test performs the following updates: +// 1. Update Container Image +// 2. Update Metadata +// a. Update Labels +// b. Update Annotations +func TestService(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Setup initial Service + objects, err := v1b1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err = validateLabelsPropagation(t, *objects, names); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // We start a background prober to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, names.URL) + defer test.AssertProberDefault(t, prober) + + // Update Container Image + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image2)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image2, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + + // Validate State after Image Update + if err = validateControlPlane(t, clients, names, "2"); err != nil { + t.Error(err) + } + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil { + t.Error(err) + } + + // Update Metadata (Labels) + t.Logf("Updating labels of the RevisionTemplateSpec for service %s.", names.Service) + metadata := metav1.ObjectMeta{ + Labels: map[string]string{ + "labelX": "abc", + "labelY": "def", + }, + } + if objects.Service, err = v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceTemplateMeta(metadata)); err != nil { + t.Fatalf("Service %s was not updated with labels in its RevisionTemplateSpec: %v", names.Service, err) + } + + t.Log("Waiting for the new revision to appear as LatestRevision.") + if names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s after updating labels in its RevisionTemplateSpec: %v", names.Service, names.Revision, err) + } + + // Update Metadata (Annotations) + t.Logf("Updating annotations of RevisionTemplateSpec for service %s", names.Service) + metadata = metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotationA": "123", + "annotationB": "456", + }, + } + if objects.Service, err = v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceTemplateMeta(metadata)); err != nil { + t.Fatalf("Service %s was not updated with annotation in its RevisionTemplateSpec: %v", names.Service, err) + } + + t.Log("Waiting for the new revision to appear as LatestRevision.") + names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("The new revision has not become ready in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + + // Validate the Service shape. + if err = validateControlPlane(t, clients, names, "4"); err != nil { + t.Error(err) + } + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText2); err != nil { + t.Error(err) + } +} + +func waitForDesiredTrafficShape(t *testing.T, sName string, want map[string]v1.TrafficTarget, clients *test.Clients) error { + return v1b1test.WaitForServiceState( + clients.ServingBetaClient, sName, func(s *v1beta1.Service) (bool, error) { + // IsServiceReady never returns an error. + if ok, _ := v1b1test.IsServiceReady(s); !ok { + return false, nil + } + // Match the traffic shape. + got := map[string]v1.TrafficTarget{} + for _, tt := range s.Status.Traffic { + got[tt.Tag] = tt + } + ignoreURLs := cmpopts.IgnoreFields(v1.TrafficTarget{}, "URL") + if !cmp.Equal(got, want, ignoreURLs) { + t.Logf("For service %s traffic shape mismatch: (-got, +want) %s", + sName, cmp.Diff(got, want, ignoreURLs)) + return false, nil + } + return true, nil + }, "Verify Service Traffic Shape", + ) +} + +func TestServiceBYOName(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + revName := names.Service + "-byoname" + + // Setup initial Service + objects, err := v1b1test.CreateServiceReady(t, clients, &names, func(svc *v1beta1.Service) { + svc.Spec.Template.Name = revName + }) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + if got, want := names.Revision, revName; got != want { + t.Errorf("CreateServiceReady() = %s, wanted %s", got, want) + } + + // Validate State after Creation + + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err = validateLabelsPropagation(t, *objects, names); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // We start a background prober to test if Route is always healthy even during Route update. + prober := test.RunRouteProber(t.Logf, clients, names.URL) + defer test.AssertProberDefault(t, prober) + + // Update Container Image + t.Log("Updating the Service to use a different image.") + names.Image = test.PizzaPlanet2 + image2 := pkgTest.ImagePath(names.Image) + if _, err := v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image2)); err == nil { + t.Fatalf("Patch update for Service %s didn't fail.", names.Service) + } +} + +// TestServiceWithTrafficSplit creates a Service with a variety of "release"-like traffic shapes. +// Currently tests for the following combinations: +// 1. One Revision Specified, current == latest +// 2. One Revision Specified, current != latest +// 3. Two Revisions Specified, 50% rollout, candidate == latest +// 4. Two Revisions Specified, 50% rollout, candidate != latest +// 5. Two Revisions Specified, 50% rollout, candidate != latest, candidate is configurationName. +func TestServiceWithTrafficSplit(t *testing.T) { + t.Parallel() + // Create Initial Service + clients := test.Setup(t) + releaseImagePath2 := pkgTest.ImagePath(test.PizzaPlanet2) + releaseImagePath3 := pkgTest.ImagePath(test.HelloWorld) + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Expected Text for different revisions. + const ( + expectedFirstRev = test.PizzaPlanetText1 + expectedSecondRev = test.PizzaPlanetText2 + expectedThirdRev = test.HelloWorldText + ) + + // Setup initial Service + objects, err := v1b1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + t.Log("Validating service shape.") + if err := validateReleaseServiceShape(objects); err != nil { + t.Fatalf("Release shape is incorrect: %v", err) + } + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + firstRevision := names.Revision + + // 1. One Revision Specified, current == latest. + t.Log("1. Updating Service to ReleaseType using lastCreatedRevision") + objects.Service, err = v1b1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(100), + }, { + Tag: "latest", + Percent: nil, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + desiredTrafficShape := map[string]v1.TrafficTarget{ + "current": { + Tag: "current", + RevisionName: objects.Config.Status.LatestReadyRevisionName, + Percent: ptr.Int64(100), + LatestRevision: ptr.Bool(false), + }, + "latest": { + Tag: "latest", + RevisionName: objects.Config.Status.LatestReadyRevisionName, + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Service traffic should go to the first revision and be available on two names traffic targets: 'current' and 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev}, + []string{"latest", "current"}, + []string{expectedFirstRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 2. One Revision Specified, current != latest. + t.Log("2. Updating the Service Spec with a new image") + if objects.Service, err = v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(releaseImagePath2)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, releaseImagePath2, err) + } + + t.Log("Since the Service was updated a new Revision will be created") + if names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s: %v", names.Service, names.Revision, err) + } + secondRevision := names.Revision + + // Also verify traffic is in the correct shape. + desiredTrafficShape["latest"] = v1.TrafficTarget{ + Tag: "latest", + RevisionName: secondRevision, + LatestRevision: ptr.Bool(true), + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Since the Service is using release the Route will not be updated, but new revision will be available at 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev}, + []string{"latest", "current"}, + []string{expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 3. Two Revisions Specified, 50% rollout, candidate == latest. + t.Log("3. Updating Service to split traffic between two revisions using Release mode") + objects.Service, err = v1b1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + }, { + Tag: "candidate", + RevisionName: secondRevision, + Percent: ptr.Int64(50), + }, { + Tag: "latest", + Percent: nil, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + desiredTrafficShape = map[string]v1.TrafficTarget{ + "current": { + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + "candidate": { + Tag: "candidate", + RevisionName: secondRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(false), + }, + "latest": { + Tag: "latest", + RevisionName: secondRevision, + LatestRevision: ptr.Bool(true), + }, + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Traffic should be split between the two revisions and available on three named traffic targets, 'current', 'candidate', and 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedSecondRev}, + []string{"candidate", "latest", "current"}, + []string{expectedSecondRev, expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // 4. Two Revisions Specified, 50% rollout, candidate != latest. + t.Log("4. Updating the Service Spec with a new image") + if objects.Service, err = v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(releaseImagePath3)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, releaseImagePath3, err) + } + t.Log("Since the Service was updated a new Revision will be created") + if names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names); err != nil { + t.Fatalf("The Service %s was not updated with new revision %s: %v", names.Service, names.Revision, err) + } + thirdRevision := names.Revision + + desiredTrafficShape["latest"] = v1.TrafficTarget{ + Tag: "latest", + RevisionName: thirdRevision, + LatestRevision: ptr.Bool(true), + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + t.Log("Traffic should remain between the two images, and the new revision should be available on the named traffic target 'latest'") + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedSecondRev}, + []string{"latest", "candidate", "current"}, + []string{expectedThirdRev, expectedSecondRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } + + // Now update the service to use `@latest` as candidate. + t.Log("5. Updating Service to split traffic between two `current` and `@latest`") + + objects.Service, err = v1b1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "current", + RevisionName: firstRevision, + Percent: ptr.Int64(50), + }, { + Tag: "candidate", + Percent: ptr.Int64(50), + }, { + Tag: "latest", + Percent: nil, + }}, + }) + if err != nil { + t.Fatalf("Failed to update Service: %v", err) + } + + // Verify in the end it's still the case. + if err := validateAnnotations(objects); err != nil { + t.Errorf("Service annotations are incorrect: %v", err) + } + + // `candidate` now points to the latest. + desiredTrafficShape["candidate"] = v1.TrafficTarget{ + Tag: "candidate", + RevisionName: thirdRevision, + Percent: ptr.Int64(50), + LatestRevision: ptr.Bool(true), + } + t.Log("Waiting for Service to become ready with the new shape.") + if err := waitForDesiredTrafficShape(t, names.Service, desiredTrafficShape, clients); err != nil { + t.Fatal("Service never obtained expected shape") + } + + if err := validateDomains(t, clients, + names.URL, + []string{expectedFirstRev, expectedThirdRev}, + []string{"latest", "candidate", "current"}, + []string{expectedThirdRev, expectedThirdRev, expectedFirstRev}); err != nil { + t.Fatal(err) + } +} + +func TestAnnotationPropagation(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + // Clean up on test failure or interrupt + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + // Setup initial Service + objects, err := v1b1test.CreateServiceReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err := validateAnnotations(objects); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + + if objects.Service, err = v1b1test.PatchService(t, clients, objects.Service, + rtesting.WithServiceAnnotation("juicy", "jamba")); err != nil { + t.Fatalf("Service %s was not updated with new annotation: %v", names.Service, err) + } + + // Updating metadata does not trigger revision or generation + // change, so let's generate a change that we can watch. + t.Log("Updating the Service to use a different image.") + image2 := pkgTest.ImagePath(test.PizzaPlanet2) + if _, err := v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image2)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image2, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + objects, err = v1b1test.GetResourceObjects(clients, names) + if err != nil { + t.Errorf("Error getting objects: %v", err) + } + + // Now we can validate the annotations. + if err := validateAnnotations(objects, "juicy"); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + + if objects.Service, err = v1b1test.PatchService(t, clients, objects.Service, + rtesting.WithServiceAnnotationRemoved("juicy")); err != nil { + t.Fatalf("Service %s was not updated with annotation deleted: %v", names.Service, err) + } + + // Updating metadata does not trigger revision or generation + // change, so let's generate a change that we can watch. + t.Log("Updating the Service to use a different image.") + image3 := pkgTest.ImagePath(test.HelloWorld) + if _, err := v1b1test.PatchService(t, clients, objects.Service, rtesting.WithServiceImage(image3)); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, image3, err) + } + + t.Log("Service should reflect new revision created and ready in status.") + names.Revision, err = v1b1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("New image not reflected in Service: %v", err) + } + + t.Log("Waiting for Service to transition to Ready.") + if err := v1b1test.WaitForServiceState(clients.ServingBetaClient, names.Service, v1b1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("Error waiting for the service to become ready for the latest revision: %v", err) + } + objects, err = v1b1test.GetResourceObjects(clients, names) + if err != nil { + t.Errorf("Error getting objects: %v", err) + } + + // Now we can validate the annotations. + if err := validateAnnotations(objects); err != nil { + t.Errorf("Annotations are incorrect: %v", err) + } + if _, ok := objects.Config.Annotations["juicy"]; ok { + t.Error("Config still has `juicy` annotation") + } + if _, ok := objects.Route.Annotations["juicy"]; ok { + t.Error("Route still has `juicy` annotation") + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/single_threaded_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/single_threaded_test.go new file mode 100644 index 0000000000..e49fb7d9be --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/single_threaded_test.go @@ -0,0 +1,107 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + "time" + + "golang.org/x/sync/errgroup" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + rtesting "knative.dev/serving/pkg/testing/v1beta1" +) + +func TestSingleConcurrency(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.SingleThreadedImage, + } + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + objects, err := v1b1test.CreateServiceReady(t, clients, &names, rtesting.WithContainerConcurrency(1)) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + url := objects.Service.Status.URL.URL() + + // Ready does not actually mean Ready for a Route just yet. + // See https://github.com/knative/serving/issues/1582 + t.Logf("Probing %s", url) + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1b1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", url, err) + } + + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating spoofing client: %v", err) + } + + concurrency := 5 + duration := 20 * time.Second + t.Logf("Maintaining %d concurrent requests for %v.", concurrency, duration) + group, _ := errgroup.WithContext(context.Background()) + for i := 0; i < concurrency; i++ { + group.Go(func() error { + done := time.After(duration) + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return fmt.Errorf("error creating http request: %w", err) + } + + for { + select { + case <-done: + return nil + default: + res, err := client.Do(req) + if err != nil { + return fmt.Errorf("error making request %w", err) + } + if res.StatusCode == http.StatusInternalServerError { + return errors.New("detected concurrent requests") + } else if res.StatusCode != http.StatusOK { + return fmt.Errorf("non 200 response %v", res.StatusCode) + } + } + } + }) + } + t.Log("Waiting for all requests to complete.") + if err := group.Wait(); err != nil { + t.Fatalf("Error making requests for single threaded test: %v.", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/util.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/util.go new file mode 100644 index 0000000000..86efa095cb --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/util.go @@ -0,0 +1,339 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "math" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "golang.org/x/sync/errgroup" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" +) + +func waitForExpectedResponse(t pkgTest.TLegacy, clients *test.Clients, url *url.URL, expectedResponse string) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + return err + } + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return err + } + _, err = client.Poll(req, v1b1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedResponse)))) + return err +} + +func validateDomains(t pkgTest.TLegacy, clients *test.Clients, baseDomain *url.URL, + baseExpected, trafficTargets, targetsExpected []string) error { + var subdomains []*url.URL + for _, target := range trafficTargets { + subdomain, _ := url.Parse(baseDomain.String()) + subdomain.Host = target + "-" + baseDomain.Host + subdomains = append(subdomains, subdomain) + } + + g, _ := errgroup.WithContext(context.Background()) + // We don't have a good way to check if the route is updated so we will wait until a subdomain has + // started returning at least one expected result to key that we should validate percentage splits. + // In order for tests to succeed reliably, we need to make sure that all domains succeed. + for _, resp := range baseExpected { + // Check for each of the responses we expect from the base domain. + resp := resp + g.Go(func() error { + t.Logf("Waiting for route to update %s", baseDomain) + return waitForExpectedResponse(t, clients, baseDomain, resp) + }) + } + for i, s := range subdomains { + i, s := i, s + g.Go(func() error { + t.Logf("Waiting for route to update %s", s) + return waitForExpectedResponse(t, clients, s, targetsExpected[i]) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("error with initial domain probing: %w", err) + } + + g.Go(func() error { + minBasePercentage := test.MinSplitPercentage + if len(baseExpected) == 1 { + minBasePercentage = test.MinDirectPercentage + } + min := int(math.Floor(test.ConcurrentRequests * minBasePercentage)) + return checkDistribution(t, clients, baseDomain, test.ConcurrentRequests, min, baseExpected) + }) + for i, subdomain := range subdomains { + i, subdomain := i, subdomain + g.Go(func() error { + min := int(math.Floor(test.ConcurrentRequests * test.MinDirectPercentage)) + return checkDistribution(t, clients, subdomain, test.ConcurrentRequests, min, []string{targetsExpected[i]}) + }) + } + if err := g.Wait(); err != nil { + return fmt.Errorf("error checking routing distribution: %w", err) + } + return nil +} + +// checkDistribution sends "num" requests to "domain", then validates that +// we see each body in "expectedResponses" at least "min" times. +func checkDistribution(t pkgTest.TLegacy, clients *test.Clients, url *url.URL, num, min int, expectedResponses []string) error { + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + return err + } + + t.Logf("Performing %d concurrent requests to %s", num, url) + actualResponses, err := sendRequests(client, url, num) + if err != nil { + return err + } + + return checkResponses(t, num, min, url.Hostname(), expectedResponses, actualResponses) +} + +// checkResponses verifies that each "expectedResponse" is present in "actualResponses" at least "min" times. +func checkResponses(t pkgTest.TLegacy, num int, min int, domain string, expectedResponses []string, actualResponses []string) error { + // counts maps the expected response body to the number of matching requests we saw. + counts := make(map[string]int) + // badCounts maps the unexpected response body to the number of matching requests we saw. + badCounts := make(map[string]int) + + // counts := eval( + // SELECT body, count(*) AS total + // FROM $actualResponses + // WHERE body IN $expectedResponses + // GROUP BY body + // ) + for _, ar := range actualResponses { + expected := false + for _, er := range expectedResponses { + if strings.Contains(ar, er) { + counts[er]++ + expected = true + } + } + if !expected { + badCounts[ar]++ + } + } + + // Verify that we saw each entry in "expectedResponses" at least "min" times. + // check(SELECT body FROM $counts WHERE total < $min) + totalMatches := 0 + for _, er := range expectedResponses { + count := counts[er] + if count < min { + return fmt.Errorf("domain %s failed: want at least %d, got %d for response %q", domain, min, count, er) + } + + t.Logf("For domain %s: wanted at least %d, got %d requests.", domain, min, count) + totalMatches += count + } + // Verify that the total expected responses match the number of requests made. + for badResponse, count := range badCounts { + t.Logf("Saw unexpected response %q %d times.", badResponse, count) + } + if totalMatches < num { + return fmt.Errorf("domain %s: saw expected responses %d times, wanted %d", domain, totalMatches, num) + } + // If we made it here, the implementation conforms. Congratulations! + return nil +} + +// sendRequests sends "num" requests to "url", returning a string for each spoof.Response.Body. +func sendRequests(client spoof.Interface, url *url.URL, num int) ([]string, error) { + responses := make([]string, num) + + // Launch "num" requests, recording the responses we get in "responses". + g, _ := errgroup.WithContext(context.Background()) + for i := 0; i < num; i++ { + // We don't index into "responses" inside the goroutine to avoid a race, see #1545. + result := &responses[i] + g.Go(func() error { + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return err + } + + resp, err := client.Do(req) + if err != nil { + return err + } + + *result = string(resp.Body) + return nil + }) + } + return responses, g.Wait() +} + +// Validates service health and vended content match for a runLatest Service. +// The checks in this method should be able to be performed at any point in a +// runLatest Service's lifecycle so long as the service is in a "Ready" state. +func validateDataPlane(t pkgTest.TLegacy, clients *test.Clients, names test.ResourceNames, expectedText string) error { + t.Logf("Checking that the endpoint vends the expected text: %s", expectedText) + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + names.URL, + v1b1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", names.Route, names.URL, expectedText, err) + } + + return nil +} + +// Validates the state of Configuration, Revision, and Route objects for a runLatest Service. +// The checks in this method should be able to be performed at any point in a +// runLatest Service's lifecycle so long as the service is in a "Ready" state. +func validateControlPlane(t pkgTest.T, clients *test.Clients, names test.ResourceNames, expectedGeneration string) error { + t.Log("Checking to ensure Revision is in desired state with", "generation", expectedGeneration) + err := v1b1test.CheckRevisionState(clients.ServingBetaClient, names.Revision, func(r *v1beta1.Revision) (bool, error) { + if ready, err := v1b1test.IsRevisionReady(r); !ready { + return false, fmt.Errorf("revision %s did not become ready to serve traffic: %w", names.Revision, err) + } + if r.Status.ImageDigest == "" { + return false, fmt.Errorf("imageDigest not present for revision %s", names.Revision) + } + if validDigest, err := validateImageDigest(names.Image, r.Status.ImageDigest); !validDigest { + return false, fmt.Errorf("imageDigest %s is not valid for imageName %s: %w", r.Status.ImageDigest, names.Image, err) + } + return true, nil + }) + if err != nil { + return err + } + err = v1b1test.CheckRevisionState(clients.ServingBetaClient, names.Revision, v1b1test.IsRevisionAtExpectedGeneration(expectedGeneration)) + if err != nil { + return fmt.Errorf("revision %s did not have an expected annotation with generation %s: %w", names.Revision, expectedGeneration, err) + } + + t.Log("Checking to ensure Configuration is in desired state.") + err = v1b1test.CheckConfigurationState(clients.ServingBetaClient, names.Config, func(c *v1beta1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + return false, fmt.Errorf("the Configuration %s was not updated indicating that the Revision %s was created: %w", names.Config, names.Revision, err) + } + if c.Status.LatestReadyRevisionName != names.Revision { + return false, fmt.Errorf("the Configuration %s was not updated indicating that the Revision %s was ready: %w", names.Config, names.Revision, err) + } + return true, nil + }) + if err != nil { + return err + } + + t.Log("Checking to ensure Route is in desired state with", "generation", expectedGeneration) + err = v1b1test.CheckRouteState(clients.ServingBetaClient, names.Route, v1b1test.AllRouteTrafficAtRevision(names)) + if err != nil { + return fmt.Errorf("the Route %s was not updated to route traffic to the Revision %s: %w", names.Route, names.Revision, err) + } + + return nil +} + +// Validates labels on Revision, Configuration, and Route objects when created by a Service +// see spec here: https://github.com/knative/serving/blob/master/docs/spec/spec.md#revision +func validateLabelsPropagation(t pkgTest.T, objects v1b1test.ResourceObjects, names test.ResourceNames) error { + t.Log("Validate Labels on Revision Object") + revision := objects.Revision + + if revision.Labels["serving.knative.dev/configuration"] != names.Config { + return fmt.Errorf("expect Confguration name in Revision label %q but got %q ", names.Config, revision.Labels["serving.knative.dev/configuration"]) + } + if revision.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Revision label %q but got %q ", names.Service, revision.Labels["serving.knative.dev/service"]) + } + + t.Log("Validate Labels on Configuration Object") + config := objects.Config + if config.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Configuration label %q but got %q ", names.Service, config.Labels["serving.knative.dev/service"]) + } + if config.Labels["serving.knative.dev/route"] != names.Route { + return fmt.Errorf("expect Route name in Configuration label %q but got %q ", names.Route, config.Labels["serving.knative.dev/route"]) + } + + t.Log("Validate Labels on Route Object") + route := objects.Route + if route.Labels["serving.knative.dev/service"] != names.Service { + return fmt.Errorf("expect Service name in Route label %q but got %q ", names.Service, route.Labels["serving.knative.dev/service"]) + } + return nil +} + +func validateAnnotations(objs *v1b1test.ResourceObjects, extraKeys ...string) error { + // This checks whether the annotations are set on the resources that + // expect them to have. + // List of issues listing annotations that we check: #1642. + + anns := objs.Service.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("service expected %s annotation to be set, but was empty", a) + } + } + anns = objs.Route.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("route expected %s annotation to be set, but was empty", a) + } + } + anns = objs.Config.GetAnnotations() + for _, a := range append([]string{serving.CreatorAnnotation, serving.UpdaterAnnotation}, extraKeys...) { + if got := anns[a]; got == "" { + return fmt.Errorf("config expected %s annotation to be set, but was empty", a) + } + } + return nil +} + +func validateReleaseServiceShape(objs *v1b1test.ResourceObjects) error { + // Traffic should be routed to the lastest created revision. + if got, want := objs.Service.Status.Traffic[0].RevisionName, objs.Config.Status.LatestReadyRevisionName; got != want { + return fmt.Errorf("Status.Traffic[0].RevisionsName = %s, want: %s", got, want) + } + return nil +} + +func validateImageDigest(imageName string, imageDigest string) (bool, error) { + ref, err := name.ParseReference(pkgTest.ImagePath(imageName)) + if err != nil { + return false, err + } + + digest, err := name.NewDigest(imageDigest) + if err != nil { + return false, err + } + + return ref.Context().String() == digest.Context().String(), nil +} diff --git a/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/volumes_test.go b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/volumes_test.go new file mode 100644 index 0000000000..24881e303b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/api/v1beta1/volumes_test.go @@ -0,0 +1,419 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "path" + "path/filepath" + "testing" + + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" + v1b1test "knative.dev/serving/test/v1beta1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "knative.dev/serving/pkg/testing/v1beta1" +) + +// TestConfigMapVolume tests that we echo back the appropriate text from the ConfigMap volume. +func TestConfigMapVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.HelloVolume, + } + + text := test.AppendRandomString("hello-volumes-") + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Delete(configMap.Name, nil); err != nil { + t.Errorf("ConfigMaps().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Optional: ptr.Bool(false), + }, + }) + + withOptionalBadVolume := WithVolume("blah", "/does/not/matter", corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "does-not-exist", + }, + Optional: ptr.Bool(true), + }, + }) + + // Setup initial Service + if _, err := v1b1test.CreateServiceReady(t, clients, &names, withVolume, withOptionalBadVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedConfigMapVolume tests that we echo back the appropriate text from the ConfigMap volume. +func TestProjectedConfigMapVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text := test.AppendRandomString("hello-volumes-") + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Delete(configMap.Name, nil); err != nil { + t.Errorf("ConfigMaps().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Optional: ptr.Bool(false), + }, + }, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "does-not-matter", + }, + Optional: ptr.Bool(true), + }, + }}, + }, + }) + + // Setup initial Service + if _, err := v1b1test.CreateServiceReady(t, clients, &names, withVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestSecretVolume tests that we echo back the appropriate text from the Secret volume. +func TestSecretVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.HelloVolume, + } + + text := test.ObjectNameForTest(t) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.Name, + Optional: ptr.Bool(false), + }, + }) + + withOptionalBadVolume := WithVolume("blah", "/does/not/matter", corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "does-not-exist", + Optional: ptr.Bool(true), + }, + }) + + // Setup initial Service + if _, err := v1b1test.CreateServiceReady(t, clients, &names, withVolume, withOptionalBadVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedSecretVolume tests that we echo back the appropriate text from the Secret volume. +func TestProjectedSecretVolume(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text := test.ObjectNameForTest(t) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Optional: ptr.Bool(false), + }, + }}, + }, + }) + withSubpath := func(svc *v1beta1.Service) { + vm := &svc.Spec.Template.Spec.Containers[0].VolumeMounts[0] + vm.MountPath = test.HelloVolumePath + vm.SubPath = filepath.Base(test.HelloVolumePath) + } + + // Setup initial Service + if _, err := v1b1test.CreateServiceReady(t, clients, &names, withVolume, withSubpath); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + if err = validateDataPlane(t, clients, names, text); err != nil { + t.Error(err) + } +} + +// TestProjectedComplex tests that we echo back the appropriate text from the complex Projected volume. +func TestProjectedComplex(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "hellovolume", + } + + text1 := test.ObjectNameForTest(t) + text2 := test.ObjectNameForTest(t) + text3 := test.ObjectNameForTest(t) + + // Create the ConfigMap with random text. + configMap, err := clients.KubeClient.Kube.CoreV1().ConfigMaps(test.ServingNamespace).Create(&corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // Give it the same name as the service. + }, + Data: map[string]string{ + filepath.Base(test.HelloVolumePath): text1, + "other": text2, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: %v", err) + } + t.Logf("Successfully created configMap: %v", configMap) + + // Create the Secret with random text. + secret, err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Service, // name the Secret the same as the Service. + }, + StringData: map[string]string{ + filepath.Base(test.HelloVolumePath): text3, + }, + }) + if err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + t.Logf("Successfully created secret: %v", secret) + + cleanup := func() { + test.TearDown(clients, names) + if err := clients.KubeClient.Kube.CoreV1().Secrets(test.ServingNamespace).Delete(secret.Name, nil); err != nil { + t.Errorf("Secrets().Delete() = %v", err) + } + } + + // Clean up on test failure or interrupt + defer cleanup() + test.CleanupOnInterrupt(cleanup) + + withVolume := WithVolume("asdf", filepath.Dir(test.HelloVolumePath), corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + Items: []corev1.KeyToPath{{ + Key: "other", + Path: "another", + }}, + Optional: ptr.Bool(false), + }, + }, { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secret.Name, + }, + Items: []corev1.KeyToPath{{ + Key: filepath.Base(test.HelloVolumePath), + Path: filepath.Base(test.HelloVolumePath), + }}, + Optional: ptr.Bool(false), + }, + }}, + }, + }) + + // Setup initial Service + if _, err := v1b1test.CreateServiceReady(t, clients, &names, withVolume); err != nil { + t.Fatalf("Failed to create initial Service %v: %v", names.Service, err) + } + + // Validate State after Creation + if err = validateControlPlane(t, clients, names, "1"); err != nil { + t.Error(err) + } + + // Observation shows that when keys collide, the last source listed wins, + // so for the main key, we should get back text3 (vs. text1) + if err = validateDataPlane(t, clients, names, text3); err != nil { + t.Error(err) + } + + // Verify that we get multiple files mounted in, in this case from the + // second source, which was partially shadowed in our check above. + names.URL.Path = path.Join(names.URL.Path, "another") + if err = validateDataPlane(t, clients, names, text2); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/basic_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/basic_test.go new file mode 100644 index 0000000000..278e0af141 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/basic_test.go @@ -0,0 +1,98 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestBasics verifies that a no frills Ingress exposes a simple Pod/Service via the public load balancer. +func TestBasics(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + // Create a simple Ingress over the Service. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + RuntimeRequest(t, client, "http://"+name+".example.com") +} + +// TestBasicsHTTP2 verifies that the same no-frills Ingress over a Service with http/2 configured +// will see a ProtoMajor of 2. +func TestBasicsHTTP2(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameH2C) + defer cancel() + + // Create a simple Ingress over the Service. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + ri := RuntimeRequest(t, client, "http://"+name+".example.com") + if ri == nil { + return + } + + if want, got := 2, ri.Request.ProtoMajor; want != got { + t.Errorf("ProtoMajor = %d, wanted %d", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/grpc_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/grpc_test.go new file mode 100644 index 0000000000..7967c3c25c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/grpc_test.go @@ -0,0 +1,216 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "fmt" + "math/rand" + "net" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" + ping "knative.dev/serving/test/test_images/grpc-ping/proto" +) + +// TestGRPC verifies that GRPC may be used via a simple Ingress. +func TestGRPC(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + const suffix = "- pong" + name, port, cancel := CreateGRPCService(t, clients, suffix) + defer cancel() + + domain := name + ".example.com" + + // Create a simple Ingress over the Service. + _, dialCtx, cancel := CreateIngressReadyDialContext(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{domain}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + conn, err := grpc.Dial( + domain+":80", + grpc.WithInsecure(), + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return dialCtx(ctx, "unused", addr) + }), + ) + if err != nil { + t.Fatalf("Dial() = %v", err) + } + defer conn.Close() + pc := ping.NewPingServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + stream, err := pc.PingStream(ctx) + if err != nil { + t.Fatalf("PingStream() = %v", err) + } + + for i := 0; i < 100; i++ { + checkGRPCRoundTrip(t, stream, suffix) + } +} + +// TestGRPCSplit verifies that websockets may be used across a traffic split. +func TestGRPCSplit(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + const suffixBlue = "- blue" + blueName, bluePort, cancel := CreateGRPCService(t, clients, suffixBlue) + defer cancel() + + const suffixGreen = "- green" + greenName, greenPort, cancel := CreateGRPCService(t, clients, suffixGreen) + defer cancel() + + // The suffixes we expect to see. + want := sets.NewString(suffixBlue, suffixGreen) + + // Create a simple Ingress over the Service. + name := test.ObjectNameForTest(t) + domain := name + ".example.com" + _, dialCtx, cancel := CreateIngressReadyDialContext(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{domain}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: blueName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(bluePort), + }, + Percent: 50, + }, { + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: greenName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(greenPort), + }, + Percent: 50, + }}, + }}, + }, + }}, + }) + defer cancel() + + conn, err := grpc.Dial( + domain+":80", + grpc.WithInsecure(), + grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return dialCtx(ctx, "unused", addr) + }), + ) + if err != nil { + t.Fatalf("Dial() = %v", err) + } + defer conn.Close() + pc := ping.NewPingServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + got := sets.NewString() + for i := 0; i < 10; i++ { + stream, err := pc.PingStream(ctx) + if err != nil { + t.Errorf("PingStream() = %v", err) + continue + } + + suffix := findGRPCSuffix(t, stream) + if suffix == "" { + continue + } + got.Insert(suffix) + + for j := 0; j < 10; j++ { + checkGRPCRoundTrip(t, stream, suffix) + } + } + + if !cmp.Equal(want, got) { + t.Errorf("(-want, +got) = %s", cmp.Diff(want, got)) + } +} + +func findGRPCSuffix(t *testing.T, stream ping.PingService_PingStreamClient) string { + // Establish the suffix that corresponds to this stream. + message := fmt.Sprintf("ping - %d", rand.Intn(1000)) + if err := stream.Send(&ping.Request{Msg: message}); err != nil { + t.Errorf("Error sending request: %v", err) + return "" + } + + resp, err := stream.Recv() + if err != nil { + t.Errorf("Error receiving response: %v", err) + return "" + } + gotMsg := resp.Msg + if !strings.HasPrefix(gotMsg, message) { + t.Errorf("Recv() = %s, wanted %s prefix", gotMsg, message) + return "" + } + return strings.TrimSpace(strings.TrimPrefix(gotMsg, message)) +} + +func checkGRPCRoundTrip(t *testing.T, stream ping.PingService_PingStreamClient, suffix string) { + message := fmt.Sprintf("ping - %d", rand.Intn(1000)) + if err := stream.Send(&ping.Request{Msg: message}); err != nil { + t.Errorf("Error sending request: %v", err) + return + } + + // Read back the echoed message and compared with sent. + if resp, err := stream.Recv(); err != nil { + t.Errorf("Error receiving response: %v", err) + } else if got, want := resp.Msg, message+suffix; got != want { + t.Errorf("Recv() = %s, wanted %s", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/headers_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/headers_test.go new file mode 100644 index 0000000000..601b724255 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/headers_test.go @@ -0,0 +1,177 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "net/http" + "testing" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestPreSplitSetHeaders verifies that an Ingress that specified AppendHeaders pre-split has the appropriate header(s) set. +func TestPreSplitSetHeaders(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + const headerName = "Foo-Bar-Baz" + + // Create a simple Ingress over the 10 Services. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + AppendHeaders: map[string]string{ + headerName: name, + }, + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + t.Run("Check without passing header", func(t *testing.T) { + ri := RuntimeRequest(t, client, "http://"+name+".example.com") + if ri == nil { + return + } + + if got, want := ri.Request.Headers.Get(headerName), name; got != want { + t.Errorf("Headers[%q] = %q, wanted %q", headerName, got, want) + } + }) + + t.Run("Check with passing header", func(t *testing.T) { + ri := RuntimeRequest(t, client, "http://"+name+".example.com", func(req *http.Request) { + // Specify a value for the header to verify that implementations + // use set vs. append semantics. + req.Header.Set(headerName, "bogus") + }) + if ri == nil { + return + } + + if got, want := ri.Request.Headers.Get(headerName), name; got != want { + t.Errorf("Headers[%q] = %q, wanted %q", headerName, got, want) + } + }) +} + +// TestPostSplitSetHeaders verifies that an Ingress that specified AppendHeaders post-split has the appropriate header(s) set. +func TestPostSplitSetHeaders(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + const headerName = "Foo-Bar-Baz" + + backends := make([]v1alpha1.IngressBackendSplit, 0, 10) + names := sets.NewString() + for i := 0; i < 10; i++ { + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + backends = append(backends, v1alpha1.IngressBackendSplit{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: name, + }, + Percent: 10, + }) + names.Insert(name) + } + + // Create a simple Ingress over the 10 Services. + name := test.ObjectNameForTest(t) + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: backends, + }}, + }, + }}, + }) + defer cancel() + + t.Run("Check without passing header", func(t *testing.T) { + // Make enough requests that the likelihood of us seeing each variation is high, + // but don't check the distribution of requests, as that isn't the point of this + // particular test. + seen := sets.NewString() + for i := 0; i < 100; i++ { + ri := RuntimeRequest(t, client, "http://"+name+".example.com") + if ri == nil { + return + } + seen.Insert(ri.Request.Headers.Get(headerName)) + } + // Check what we saw. + if !cmp.Equal(names, seen) { + t.Errorf("(over 100 requests) Header[%q] (-want, +got) = %s", + headerName, cmp.Diff(names, seen)) + } + }) + + t.Run("Check with passing header", func(t *testing.T) { + // Make enough requests that the likelihood of us seeing each variation is high, + // but don't check the distribution of requests, as that isn't the point of this + // particular test. + seen := sets.NewString() + for i := 0; i < 100; i++ { + ri := RuntimeRequest(t, client, "http://"+name+".example.com", func(req *http.Request) { + // Specify a value for the header to verify that implementations + // use set vs. append semantics. + req.Header.Set(headerName, "bogus") + }) + if ri == nil { + return + } + seen.Insert(ri.Request.Headers.Get(headerName)) + } + // Check what we saw. + if !cmp.Equal(names, seen) { + t.Errorf("(over 100 requests) Header[%q] (-want, +got) = %s", + headerName, cmp.Diff(names, seen)) + } + }) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/hosts_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/hosts_test.go new file mode 100644 index 0000000000..5ccaebfc96 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/hosts_test.go @@ -0,0 +1,70 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestMultipleHosts verifies that an Ingress can respond to multiple hosts. +func TestMultipleHosts(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + // TODO(mattmoor): Once .svc.cluster.local stops being a special case + // for Visibility, add it here. + hosts := []string{ + "foo.com", + "www.foo.com", + "a-b-1.something-really-really-long.knative.dev", + "add.your.interesting.domain.here.io", + } + + // Create a simple Ingress over the Service. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: hosts, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + for _, host := range hosts { + RuntimeRequest(t, client, "http://"+host) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/path_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/path_test.go new file mode 100644 index 0000000000..3a41586e52 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/path_test.go @@ -0,0 +1,236 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "math" + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestPath verifies that an Ingress properly dispatches to backends based on the path of the URL. +func TestPath(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + // For /foo + fooName, fooPort, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + // For /bar + barName, barPort, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + // For /baz + bazName, bazPort, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + // Use a post-split injected header to establish which split we are sending traffic to. + const headerName = "Which-Backend" + + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Path: "/foo", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: fooName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(fooPort), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: fooName, + }, + Percent: 100, + }}, + }, { + Path: "/bar", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: barName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(barPort), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: barName, + }, + Percent: 100, + }}, + }, { + Path: "/baz", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: bazName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(bazPort), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: bazName, + }, + Percent: 100, + }}, + }, { + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: name, + }, + Percent: 100, + }}, + }}, + }, + }}, + }) + defer cancel() + + tests := map[string]string{ + "/foo": fooName, + "/bar": barName, + "/baz": bazName, + "": name, + "/asdf": name, + } + + for path, want := range tests { + t.Run(path, func(t *testing.T) { + ri := RuntimeRequest(t, client, "http://"+name+".example.com"+path) + if ri == nil { + return + } + + got := ri.Request.Headers.Get(headerName) + if got != want { + t.Errorf("Header[%q] = %q, wanted %q", headerName, got, want) + } + }) + } +} + +func TestPathAndPercentageSplit(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + fooName, fooPort, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + barName, barPort, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + // Use a post-split injected header to establish which split we are sending traffic to. + const headerName = "Which-Backend" + + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Path: "/foo", + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: fooName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(fooPort), + }, + AppendHeaders: map[string]string{ + headerName: fooName, + }, + Percent: 50, + }, { + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: barName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(barPort), + }, + AppendHeaders: map[string]string{ + headerName: barName, + }, + Percent: 50, + }}, + }, { + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: name, + }, + Percent: 100, + }}, + }}, + }, + }}, + }) + defer cancel() + + const ( + total = 100 + totalHalf = total / 2 + tolerance = total * 0.15 + ) + got := make(map[string]float64, 2) + wantKeys := sets.NewString(fooName, barName) + for i := 0; i < total; i++ { + ri := RuntimeRequest(t, client, "http://"+name+".example.com/foo") + if ri == nil { + return + } + + gotH := ri.Request.Headers.Get(headerName) + got[gotH]++ + } + for k, v := range got { + if !wantKeys.Has(k) { + t.Errorf("%s is not in the expected header say %v", k, wantKeys) + } + if math.Abs(v-totalHalf) > tolerance { + t.Errorf("Header %s got: %v times, want in [%v, %v] range", k, v, totalHalf-tolerance, totalHalf+tolerance) + } + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/percentage_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/percentage_test.go new file mode 100644 index 0000000000..d480f7d415 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/percentage_test.go @@ -0,0 +1,119 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "math" + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestPercentage verifies that an Ingress splitting over multiple backends respects +// the given percentage distribution. +func TestPercentage(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + // Use a post-split injected header to establish which split we are sending traffic to. + const headerName = "Foo-Bar-Baz" + + backends := make([]v1alpha1.IngressBackendSplit, 0, 10) + weights := make(map[string]float64, len(backends)) + + // Double the percentage of the split each iteration until it would overflow, and then + // give the last route the remainder. + percent, total := 1, 0 + for i := 0; i < 10; i++ { + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + backends = append(backends, v1alpha1.IngressBackendSplit{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + headerName: name, + }, + Percent: percent, + }) + weights[name] = float64(percent) + + total += percent + percent *= 2 + // Cap the final non-zero bucket so that we total 100% + // After that, this will zero out remaining buckets. + if total+percent > 100 { + percent = 100 - total + } + } + + // Create a simple Ingress over the 10 Services. + name := test.ObjectNameForTest(t) + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: backends, + }}, + }, + }}, + }) + defer cancel() + + // Create a large enough population of requests that we can reasonably assess how + // well the Ingress respected the percentage split. + seen := make(map[string]float64, len(backends)) + + const ( + // The total number of requests to make (as a float to avoid conversions in later computations). + totalRequests = 1000.0 + // The increment to make for each request, so that the values of seen reflect the + // percentage of the total number of requests we are making. + increment = 100.0 / totalRequests + // Allow the Ingress to be within 5% of the configured value. + margin = 5.0 + ) + for i := 0.0; i < totalRequests; i++ { + ri := RuntimeRequest(t, client, "http://"+name+".example.com") + if ri == nil { + continue + } + seen[ri.Request.Headers.Get(headerName)] += increment + } + + for name, want := range weights { + got := seen[name] + switch { + case want == 0.0 && got > 0.0: + // For 0% targets, we have tighter requirements. + t.Errorf("Target %q received traffic, wanted none (0%% target).", name) + case math.Abs(got-want) > margin: + t.Errorf("Target %q received %f%%, wanted %f +/- %f", name, got, want, margin) + } + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/retry_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/retry_test.go new file mode 100644 index 0000000000..3fd563b8d7 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/retry_test.go @@ -0,0 +1,140 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "fmt" + "net/http" + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestRetry verifies that an Ingress configured to retry N times properly masks transient failures. +func TestRetry(t *testing.T) { + for i := 2; i < 12; i++ { + i := i + t.Run(fmt.Sprintf("period=%d,attempts=%d", i, i), func(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + // When the period matches, then it shouldn't fail. + name, port, cancel := CreateFlakyService(t, clients, i) + defer cancel() + + domain := name + ".example.com" + + // Create a simple Ingress over the Service. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{domain}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Retries: retries(i), + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + for j := 0; j < 5; j++ { + resp, err := client.Get("http://" + domain) + if err != nil { + t.Errorf("Error making GET request: %v", err) + continue + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Errorf("Got non-OK status: %d", resp.StatusCode) + DumpResponse(t, resp) + } + } + }) + + t.Run(fmt.Sprintf("period=%d,attempts=%d", i, i-1), func(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + // When the period matches, then it shouldn't fail. + name, port, cancel := CreateFlakyService(t, clients, i) + defer cancel() + + domain := name + ".example.com" + + // Create a simple Ingress over the Service. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{domain}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Retries: retries(i - 1), + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + // When the period is one more than the number of attempts (retries+1), then what we should see is: + // 500 {count=4} + // 200 {count=5} + // 500 {count=9} + // 200 {count=10} + // 500 {count=14} + // 200 {count=15} + current, next := http.StatusInternalServerError, http.StatusOK + for j := 0; j < 5; j++ { + resp, err := client.Get("http://" + domain) + if err != nil { + t.Errorf("Error making GET request: %v", err) + continue + } + defer resp.Body.Close() + if want, got := current, resp.StatusCode; got != want { + t.Errorf("Status = %d, wanted %d", got, want) + DumpResponse(t, resp) + } + // Swap things. + current, next = next, current + } + }) + } +} + +func retries(attempts int) *v1alpha1.HTTPRetry { + return &v1alpha1.HTTPRetry{ + // retries is one less than the number of attempts + Attempts: attempts - 1, + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/timeout_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/timeout_test.go new file mode 100644 index 0000000000..3636558a87 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/timeout_test.go @@ -0,0 +1,111 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "fmt" + "net/http" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestTimeout verifies that an Ingress configured with a timeout respects that. +func TestTimeout(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + name, port, cancel := CreateTimeoutService(t, clients) + defer cancel() + + // The timeout, and an epsilon value to use as jitter for testing requests + // either hit or miss the timeout (without getting so close that we flake). + const ( + timeout = 1 * time.Second + epsilon = 100 * time.Millisecond + ) + + // Create a simple Ingress over the Service. + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{name + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Timeout: &metav1.Duration{timeout}, + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + tests := []struct { + name string + code int + initialDelay time.Duration + delay time.Duration + }{{ + name: "no delays is OK", + code: http.StatusOK, + }, { + name: "large delay after headers is ok", + code: http.StatusOK, + delay: timeout + timeout, + }, { + name: "initial delay less than timeout is ok", + code: http.StatusOK, + initialDelay: timeout - epsilon, + }, { + name: "initial delay over timeout is NOT ok", + code: http.StatusGatewayTimeout, + initialDelay: timeout + epsilon, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + checkTimeout(t, client, name, test.code, test.initialDelay, test.delay) + }) + } +} + +func checkTimeout(t *testing.T, client *http.Client, name string, code int, initial time.Duration, timeout time.Duration) { + t.Helper() + + resp, err := client.Get(fmt.Sprintf("http://%s.example.com?initialTimeout=%d&timeout=%d", + name, initial.Milliseconds(), timeout.Milliseconds())) + if err != nil { + t.Fatalf("Error making GET request: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != code { + t.Errorf("Unexpected status code: %d, wanted %d", resp.StatusCode, code) + DumpResponse(t, resp) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/tls_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/tls_test.go new file mode 100644 index 0000000000..14a1908e97 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/tls_test.go @@ -0,0 +1,76 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestIngressTLS verifies that the Ingress properly handles the TLS field. +func TestIngressTLS(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + name, port, cancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + defer cancel() + + hosts := []string{name + ".example.com"} + + secretName, cancel := CreateTLSSecret(t, clients, hosts) + defer cancel() + + _, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: hosts, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + TLS: []v1alpha1.IngressTLS{{ + Hosts: hosts, + SecretName: secretName, + SecretNamespace: test.ServingNamespace, + }}, + }) + defer cancel() + + t.Run("verify HTTP", func(t *testing.T) { + RuntimeRequest(t, client, "http://"+name+".example.com") + }) + + t.Run("verify HTTPS", func(t *testing.T) { + RuntimeRequest(t, client, "https://"+name+".example.com") + }) +} + +// TODO(mattmoor): Consider adding variants where we have multiple hosts with distinct certificates. diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/update_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/update_test.go new file mode 100644 index 0000000000..76bda8d5d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/update_test.go @@ -0,0 +1,200 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "context" + "net/http" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// Header to disambiguate what version we're talking to. +const updateHeaderName = "Who-Are-You" + +// TestUpdate verifies that when the network programming changes that traffic isn't dropped. +func TestUpdate(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + firstName, firstPort, firstCancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + + // Create a simple Ingress over the Service. + hostname := test.ObjectNameForTest(t) + ing, client, cancel := CreateIngressReady(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{hostname + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: firstName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(firstPort), + }, + // Append different headers to each split, which lets us identify + // which backend we hit. + AppendHeaders: map[string]string{ + updateHeaderName: firstName, + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + proberCancel := checkOK(t, "http://"+hostname+".example.com", client) + + // Give the prober a chance to get started. + time.Sleep(1 * time.Second) + + // First test with only sentinel changes. + for i := 0; i < 10; i++ { + sentinel := test.ObjectNameForTest(t) + + t.Logf("Rolling out %q w/ %q", firstName, sentinel) + + // Update the Ingress, and wait for it to report ready. + UpdateIngressReady(t, clients, ing.Name, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{hostname + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: firstName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(firstPort), + }, + AppendHeaders: map[string]string{ + updateHeaderName: sentinel, + }, + }}, + }}, + }, + }}, + }) + + // Check that it serves the right message as soon as we get "Ready", + // but before we stop probing. + ri := RuntimeRequest(t, client, "http://"+hostname+".example.com") + if ri != nil { + if got := ri.Request.Headers.Get(updateHeaderName); got != sentinel { + t.Errorf("Header[%q] = %q, wanted %q", updateHeaderName, got, sentinel) + } + } + } + + // Next test with varying sentinels AND fresh services each time. + previousVersionCancel := func() { + t.Logf("Tearing down %q", firstName) + firstCancel() + } + for i := 0; i < 10; i++ { + sentinel := test.ObjectNameForTest(t) + nextName, nextPort, nextCancel := CreateRuntimeService(t, clients, networking.ServicePortNameHTTP1) + + t.Logf("Rolling out %q w/ %q", nextName, sentinel) + + // Update the Ingress, and wait for it to report ready. + UpdateIngressReady(t, clients, ing.Name, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{hostname + ".example.com"}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: nextName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(nextPort), + }, + AppendHeaders: map[string]string{ + updateHeaderName: sentinel, + }, + }}, + }}, + }, + }}, + }) + + // Check that it serves the right message as soon as we get "Ready", + // but before we stop probing. + ri := RuntimeRequest(t, client, "http://"+hostname+".example.com") + if ri != nil { + if got := ri.Request.Headers.Get(updateHeaderName); got != sentinel { + t.Errorf("Header[%q] = %q, wanted %q", updateHeaderName, got, sentinel) + } + } + + // Once we've rolled out, cancel the previous version. + previousVersionCancel() + // Then make ourselves the next to be cancelled. + previousVersionCancel = func() { + t.Logf("Tearing down %q", nextName) + nextCancel() + } + } + + // Stop the prober. + proberCancel() + // Then cleanup the final version. + previousVersionCancel() +} + +func checkOK(t *testing.T, url string, client *http.Client) context.CancelFunc { + stopCh := make(chan struct{}) + doneCh := make(chan struct{}) + + // Launch the prober + go func() { + defer close(doneCh) + for { + // Each iteration check for cancellation. + select { + case <-stopCh: + return + default: + } + // Scope the defer below to avoid leaking until the test completes. + func() { + ri := RuntimeRequest(t, client, url) + if ri != nil { + // Use the updateHeaderName as a debug marker to identify which version + // (of programming) is responding. + t.Logf("[%s] Got OK status!", ri.Request.Headers.Get(updateHeaderName)) + } + }() + } + }() + + // Return a cancel function that stops the prober and then waits for it to complete. + return func() { + close(stopCh) + <-doneCh + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/util.go b/test/vendor/knative.dev/serving/test/conformance/ingress/util.go new file mode 100644 index 0000000000..249ac857b9 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/util.go @@ -0,0 +1,768 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "encoding/pem" + "errors" + "io/ioutil" + "math/big" + "math/rand" + "net" + "net/http" + "net/http/httputil" + "strconv" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" + "knative.dev/serving/test/types" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +var rootCAs = x509.NewCertPool() + +// CreateRuntimeService creates a Kubernetes service that will respond to the protocol +// specified with the given portName. It returns the service name, the port on +// which the service is listening, and a "cancel" function to clean up the +// created resources. +func CreateRuntimeService(t *testing.T, clients *test.Clients, portName string) (string, int, context.CancelFunc) { + t.Helper() + name := test.ObjectNameForTest(t) + + // Avoid zero, but pick a low port number. + port := 50 + rand.Intn(50) + t.Logf("[%s] Using port %d", name, port) + + // Pick a high port number. + containerPort := 8000 + rand.Intn(100) + t.Logf("[%s] Using containerPort %d", name, containerPort) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Image: pkgTest.ImagePath("runtime"), + Ports: []corev1.ContainerPort{{ + Name: portName, + ContainerPort: int32(containerPort), + }}, + // This is needed by the runtime image we are using. + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: strconv.Itoa(containerPort), + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(containerPort), + }, + }, + }, + }}, + }, + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.ServiceSpec{ + Type: "ClusterIP", + Ports: []corev1.ServicePort{{ + Name: portName, + Port: int32(port), + TargetPort: intstr.FromInt(int(containerPort)), + }}, + Selector: map[string]string{ + "test-pod": name, + }, + }, + } + + return name, port, createPodAndService(t, clients, pod, svc) +} + +// CreateTimeoutService creates a Kubernetes service that will respond to the protocol +// specified with the given portName. It returns the service name, the port on +// which the service is listening, and a "cancel" function to clean up the +// created resources. +func CreateTimeoutService(t *testing.T, clients *test.Clients) (string, int, context.CancelFunc) { + t.Helper() + name := test.ObjectNameForTest(t) + + // Avoid zero, but pick a low port number. + port := 50 + rand.Intn(50) + t.Logf("[%s] Using port %d", name, port) + + // Pick a high port number. + containerPort := 8000 + rand.Intn(100) + t.Logf("[%s] Using containerPort %d", name, containerPort) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Image: pkgTest.ImagePath("timeout"), + Ports: []corev1.ContainerPort{{ + Name: networking.ServicePortNameHTTP1, + ContainerPort: int32(containerPort), + }}, + // This is needed by the timeout image we are using. + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: strconv.Itoa(containerPort), + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(containerPort), + }, + }, + }, + }}, + }, + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.ServiceSpec{ + Type: "ClusterIP", + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Port: int32(port), + TargetPort: intstr.FromInt(int(containerPort)), + }}, + Selector: map[string]string{ + "test-pod": name, + }, + }, + } + + return name, port, createPodAndService(t, clients, pod, svc) +} + +// CreateFlakyService creates a Kubernetes service where the backing pod will +// succeed only every Nth request. +func CreateFlakyService(t *testing.T, clients *test.Clients, period int) (string, int, context.CancelFunc) { + t.Helper() + name := test.ObjectNameForTest(t) + + // Avoid zero, but pick a low port number. + port := 50 + rand.Intn(50) + t.Logf("[%s] Using port %d", name, port) + + // Pick a high port number. + containerPort := 8000 + rand.Intn(100) + t.Logf("[%s] Using containerPort %d", name, containerPort) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Image: pkgTest.ImagePath("flaky"), + Ports: []corev1.ContainerPort{{ + Name: networking.ServicePortNameHTTP1, + ContainerPort: int32(containerPort), + }}, + // This is needed by the runtime image we are using. + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: strconv.Itoa(containerPort), + }, { + Name: "PERIOD", + Value: strconv.Itoa(period), + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(containerPort), + }, + }, + }, + }}, + }, + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.ServiceSpec{ + Type: "ClusterIP", + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Port: int32(port), + TargetPort: intstr.FromInt(int(containerPort)), + }}, + Selector: map[string]string{ + "test-pod": name, + }, + }, + } + + return name, port, createPodAndService(t, clients, pod, svc) +} + +// CreateWebsocketService creates a Kubernetes service that will upgrade the connection +// to use websockets and echo back the received messages with the provided suffix. +func CreateWebsocketService(t *testing.T, clients *test.Clients, suffix string) (string, int, context.CancelFunc) { + t.Helper() + name := test.ObjectNameForTest(t) + + // Avoid zero, but pick a low port number. + port := 50 + rand.Intn(50) + t.Logf("[%s] Using port %d", name, port) + + // Pick a high port number. + containerPort := 8000 + rand.Intn(100) + t.Logf("[%s] Using containerPort %d", name, containerPort) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Image: pkgTest.ImagePath("wsserver"), + Ports: []corev1.ContainerPort{{ + Name: networking.ServicePortNameHTTP1, + ContainerPort: int32(containerPort), + }}, + // This is needed by the runtime image we are using. + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: strconv.Itoa(containerPort), + }, { + Name: "SUFFIX", + Value: suffix, + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(containerPort), + }, + }, + }, + }}, + }, + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.ServiceSpec{ + Type: "ClusterIP", + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameHTTP1, + Port: int32(port), + TargetPort: intstr.FromInt(int(containerPort)), + }}, + Selector: map[string]string{ + "test-pod": name, + }, + }, + } + + return name, port, createPodAndService(t, clients, pod, svc) +} + +// CreateGRPCService creates a Kubernetes service that will upgrade the connection +// to use GRPC and echo back the received messages with the provided suffix. +func CreateGRPCService(t *testing.T, clients *test.Clients, suffix string) (string, int, context.CancelFunc) { + t.Helper() + name := test.ObjectNameForTest(t) + + // Avoid zero, but pick a low port number. + port := 50 + rand.Intn(50) + t.Logf("[%s] Using port %d", name, port) + + // Pick a high port number. + containerPort := 8000 + rand.Intn(100) + t.Logf("[%s] Using containerPort %d", name, containerPort) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "foo", + Image: pkgTest.ImagePath("grpc-ping"), + Ports: []corev1.ContainerPort{{ + Name: networking.ServicePortNameH2C, + ContainerPort: int32(containerPort), + }}, + // This is needed by the runtime image we are using. + Env: []corev1.EnvVar{{ + Name: "PORT", + Value: strconv.Itoa(containerPort), + }, { + Name: "SUFFIX", + Value: suffix, + }}, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(containerPort), + }, + }, + }, + }}, + }, + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Labels: map[string]string{ + "test-pod": name, + }, + }, + Spec: corev1.ServiceSpec{ + Type: "ClusterIP", + Ports: []corev1.ServicePort{{ + Name: networking.ServicePortNameH2C, + Port: int32(port), + TargetPort: intstr.FromInt(int(containerPort)), + }}, + Selector: map[string]string{ + "test-pod": name, + }, + }, + } + + return name, port, createPodAndService(t, clients, pod, svc) +} + +// createPodAndService is a helper for creating the pod and service resources, setting +// up their context.CancelFunc, and waiting for it to become ready. +func createPodAndService(t *testing.T, clients *test.Clients, pod *corev1.Pod, svc *corev1.Service) context.CancelFunc { + t.Helper() + + test.CleanupOnInterrupt(func() { clients.KubeClient.Kube.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) }) + pod, err := clients.KubeClient.Kube.CoreV1().Pods(pod.Namespace).Create(pod) + if err != nil { + t.Fatalf("Error creating Pod: %v", err) + } + cancel := func() { + err := clients.KubeClient.Kube.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("Error cleaning up Pod %s", pod.Name) + } + } + + test.CleanupOnInterrupt(func() { + clients.KubeClient.Kube.CoreV1().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{}) + }) + svc, err = clients.KubeClient.Kube.CoreV1().Services(svc.Namespace).Create(svc) + if err != nil { + cancel() + t.Fatalf("Error creating Service: %v", err) + } + + // Wait for the Pod to show up in the Endpoints resource. + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + ep, err := clients.KubeClient.Kube.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + return false, nil + } else if err != nil { + return true, err + } + for _, subset := range ep.Subsets { + if len(subset.Addresses) == 0 { + return false, nil + } + } + return len(ep.Subsets) > 0, nil + }) + if waitErr != nil { + cancel() + t.Fatalf("Error waiting for Endpoints to contain a Pod IP: %v", waitErr) + } + + return func() { + err := clients.KubeClient.Kube.CoreV1().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("Error cleaning up Service %s: %v", svc.Name, err) + } + cancel() + } +} + +// CreateIngress creates a Knative Ingress resource +func CreateIngress(t *testing.T, clients *test.Clients, spec v1alpha1.IngressSpec) (*v1alpha1.Ingress, context.CancelFunc) { + t.Helper() + name := test.ObjectNameForTest(t) + + // Create a simple Ingress over the Service. + ing := &v1alpha1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: test.ServingNamespace, + Annotations: map[string]string{ + networking.IngressClassAnnotationKey: test.ServingFlags.IngressClass, + }, + }, + Spec: spec, + } + test.CleanupOnInterrupt(func() { clients.NetworkingClient.Ingresses.Delete(ing.Name, &metav1.DeleteOptions{}) }) + ing, err := clients.NetworkingClient.Ingresses.Create(ing) + if err != nil { + t.Fatalf("Error creating Ingress: %v", err) + } + + return ing, func() { + err := clients.NetworkingClient.Ingresses.Delete(ing.Name, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("Error cleaning up Ingress %s: %v", ing.Name, err) + } + } +} + +func CreateIngressReadyDialContext(t *testing.T, clients *test.Clients, spec v1alpha1.IngressSpec) (*v1alpha1.Ingress, func(context.Context, string, string) (net.Conn, error), context.CancelFunc) { + t.Helper() + ing, cancel := CreateIngress(t, clients, spec) + + if err := v1a1test.WaitForIngressState(clients.NetworkingClient, ing.Name, v1a1test.IsIngressReady, t.Name()); err != nil { + cancel() + t.Fatalf("Error waiting for ingress state: %v", err) + } + ing, err := clients.NetworkingClient.Ingresses.Get(ing.Name, metav1.GetOptions{}) + if err != nil { + cancel() + t.Fatalf("Error getting Ingress: %v", err) + } + + // Create a dialer based on the Ingress' public load balancer. + return ing, CreateDialContext(t, ing, clients), cancel +} + +func CreateIngressReady(t *testing.T, clients *test.Clients, spec v1alpha1.IngressSpec) (*v1alpha1.Ingress, *http.Client, context.CancelFunc) { + t.Helper() + + // Create a client with a dialer based on the Ingress' public load balancer. + ing, dialer, cancel := CreateIngressReadyDialContext(t, clients, spec) + + // TODO(mattmoor): How to get ing? + var tlsConfig *tls.Config + if len(ing.Spec.TLS) > 0 { + // CAs are added to this as TLS secrets are created. + tlsConfig = &tls.Config{ + RootCAs: rootCAs, + } + } + + return ing, &http.Client{ + Transport: &http.Transport{ + DialContext: dialer, + TLSClientConfig: tlsConfig, + }, + }, cancel +} + +// UpdateIngress updates a Knative Ingress resource +func UpdateIngress(t *testing.T, clients *test.Clients, name string, spec v1alpha1.IngressSpec) { + t.Helper() + + ing, err := clients.NetworkingClient.Ingresses.Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting Ingress: %v", err) + } + + ing.Spec = spec + if _, err := clients.NetworkingClient.Ingresses.Update(ing); err != nil { + t.Fatalf("Error updating Ingress: %v", err) + } +} + +func UpdateIngressReady(t *testing.T, clients *test.Clients, name string, spec v1alpha1.IngressSpec) { + t.Helper() + UpdateIngress(t, clients, name, spec) + + if err := v1a1test.WaitForIngressState(clients.NetworkingClient, name, v1a1test.IsIngressReady, t.Name()); err != nil { + t.Fatalf("Error waiting for ingress state: %v", err) + } +} + +// This is based on https://golang.org/src/crypto/tls/generate_cert.go +func CreateTLSSecret(t *testing.T, clients *test.Clients, hosts []string) (string, context.CancelFunc) { + return CreateTLSSecretWithCertPool(t, clients, hosts, test.ServingNamespace, rootCAs) +} + +// CreateTLSSecretWithCertPool creates TLS certificate with given CertPool. +func CreateTLSSecretWithCertPool(t *testing.T, clients *test.Clients, hosts []string, ns string, cas *x509.CertPool) (string, context.CancelFunc) { + t.Helper() + + priv, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey() = %v", err) + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := cryptorand.Int(cryptorand.Reader, serialNumberLimit) + if err != nil { + t.Fatalf("Failed to generate serial number: %v", err) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Knative Ingress Conformance Testing"}, + }, + + // Only let it live briefly. + NotBefore: time.Now(), + NotAfter: time.Now().Add(5 * time.Minute), + + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + + DNSNames: hosts, + } + + derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + t.Fatalf("x509.CreateCertificate() = %v", err) + } + + cert, err := x509.ParseCertificate(derBytes) + if err != nil { + t.Fatalf("ParseCertificate() = %v", err) + } + // Ideally we'd undo this in "cancel", but there doesn't + // seem to be a mechanism to remove things from a pool. + cas.AddCert(cert) + + certPEM := &bytes.Buffer{} + if err := pem.Encode(certPEM, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + t.Fatalf("Failed to write data to cert.pem: %s", err) + } + + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + t.Fatalf("Unable to marshal private key: %v", err) + } + privPEM := &bytes.Buffer{} + if err := pem.Encode(privPEM, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { + t.Fatalf("Failed to write data to key.pem: %s", err) + } + + name := test.ObjectNameForTest(t) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: map[string]string{ + "test-secret": name, + }, + }, + Type: corev1.SecretTypeTLS, + StringData: map[string]string{ + corev1.TLSCertKey: certPEM.String(), + corev1.TLSPrivateKeyKey: privPEM.String(), + }, + } + test.CleanupOnInterrupt(func() { + clients.KubeClient.Kube.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}) + }) + if _, err := clients.KubeClient.Kube.CoreV1().Secrets(secret.Namespace).Create(secret); err != nil { + t.Fatalf("Error creating Secret: %v", err) + } + return name, func() { + err := clients.KubeClient.Kube.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}) + if err != nil { + t.Errorf("Error cleaning up Secret %s: %v", secret.Name, err) + } + } +} + +// CreateDialContext looks up the endpoint information to create a "dialer" for +// the provided Ingress' public ingress loas balancer. It can be used to +// contact external-visibility services with an HTTP client via: +// +// client := &http.Client{ +// Transport: &http.Transport{ +// DialContext: CreateDialContext(t, ing, clients), +// }, +// } +func CreateDialContext(t *testing.T, ing *v1alpha1.Ingress, clients *test.Clients) func(context.Context, string, string) (net.Conn, error) { + t.Helper() + if ing.Status.PublicLoadBalancer == nil || len(ing.Status.PublicLoadBalancer.Ingress) < 1 { + t.Fatal("Ingress does not have a public load balancer assigned.") + } + + // TODO(mattmoor): I'm open to tricks that would let us cleanly test multiple + // public load balancers or LBs with multiple ingresses (below), but want to + // keep our simple tests simple, thus the [0]s... + + // We expect an ingress LB with the form foo.bar.svc.cluster.local (though + // we aren't strictly sensitive to the suffix, this is just illustrative. + internalDomain := ing.Status.PublicLoadBalancer.Ingress[0].DomainInternal + parts := strings.SplitN(internalDomain, ".", 3) + if len(parts) < 3 { + t.Fatalf("Too few parts in internal domain: %s", internalDomain) + } + name, namespace := parts[0], parts[1] + + svc, err := clients.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Unable to retrieve Kubernetes service %s/%s: %v", namespace, name, err) + } + if len(svc.Status.LoadBalancer.Ingress) < 1 { + t.Fatal("Service does not have any ingresses (not type LoadBalancer?).") + } + ingress := svc.Status.LoadBalancer.Ingress[0] + + return func(_ context.Context, _ string, address string) (net.Conn, error) { + _, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + if ingress.IP != "" { + return net.Dial("tcp", ingress.IP+":"+port) + } + if ingress.Hostname != "" { + return net.Dial("tcp", ingress.Hostname+":"+port) + } + return nil, errors.New("service ingress does not contain dialing information") + } +} + +type RequestOption func(*http.Request) + +func RuntimeRequest(t *testing.T, client *http.Client, url string, opts ...RequestOption) *types.RuntimeInfo { + t.Helper() + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + t.Errorf("Error creating Request: %v", err) + return nil + } + + for _, opt := range opts { + opt(req) + } + + resp, err := client.Do(req) + if err != nil { + t.Errorf("Error making GET request: %v", err) + return nil + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Errorf("Got non-OK status: %d", resp.StatusCode) + DumpResponse(t, resp) + return nil + } + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Unable to read response body: %v", err) + DumpResponse(t, resp) + return nil + } + ri := &types.RuntimeInfo{} + if err := json.Unmarshal(b, ri); err != nil { + t.Errorf("Unable to parse runtime image's response payload: %v", err) + return nil + } + return ri +} + +func DumpResponse(t *testing.T, resp *http.Response) { + t.Helper() + + b, err := httputil.DumpResponse(resp, true) + if err != nil { + t.Errorf("Error dumping response: %v", err) + } + t.Log(string(b)) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/ingress/websocket_test.go b/test/vendor/knative.dev/serving/test/conformance/ingress/websocket_test.go new file mode 100644 index 0000000000..050963d3ce --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/ingress/websocket_test.go @@ -0,0 +1,198 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ingress + +import ( + "fmt" + "math/rand" + "net/http" + "net/url" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/gorilla/websocket" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// TestWebsocket verifies that websockets may be used via a simple Ingress. +func TestWebsocket(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + const suffix = "- pong" + name, port, cancel := CreateWebsocketService(t, clients, suffix) + defer cancel() + + domain := name + ".example.com" + + // Create a simple Ingress over the Service. + _, dialCtx, cancel := CreateIngressReadyDialContext(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{domain}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: name, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(port), + }, + }}, + }}, + }, + }}, + }) + defer cancel() + + dialer := websocket.Dialer{ + NetDialContext: dialCtx, + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, + } + + u := url.URL{Scheme: "ws", Host: domain, Path: "/"} + conn, _, err := dialer.Dial(u.String(), http.Header{"Host": {domain}}) + if err != nil { + t.Fatalf("Dial() = %v", err) + } + defer conn.Close() + + for i := 0; i < 100; i++ { + checkWebsocketRoundTrip(t, conn, suffix) + } +} + +// TestWebsocketSplit verifies that websockets may be used across a traffic split. +func TestWebsocketSplit(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + const suffixBlue = "- blue" + blueName, bluePort, cancel := CreateWebsocketService(t, clients, suffixBlue) + defer cancel() + + const suffixGreen = "- green" + greenName, greenPort, cancel := CreateWebsocketService(t, clients, suffixGreen) + defer cancel() + + // The suffixes we expect to see. + want := sets.NewString(suffixBlue, suffixGreen) + + // Create a simple Ingress over the Service. + name := test.ObjectNameForTest(t) + domain := name + ".example.com" + _, dialCtx, cancel := CreateIngressReadyDialContext(t, clients, v1alpha1.IngressSpec{ + Rules: []v1alpha1.IngressRule{{ + Hosts: []string{domain}, + Visibility: v1alpha1.IngressVisibilityExternalIP, + HTTP: &v1alpha1.HTTPIngressRuleValue{ + Paths: []v1alpha1.HTTPIngressPath{{ + Splits: []v1alpha1.IngressBackendSplit{{ + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: blueName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(bluePort), + }, + Percent: 50, + }, { + IngressBackend: v1alpha1.IngressBackend{ + ServiceName: greenName, + ServiceNamespace: test.ServingNamespace, + ServicePort: intstr.FromInt(greenPort), + }, + Percent: 50, + }}, + }}, + }, + }}, + }) + defer cancel() + + dialer := websocket.Dialer{ + NetDialContext: dialCtx, + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, + } + u := url.URL{Scheme: "ws", Host: domain, Path: "/"} + + got := sets.NewString() + for i := 0; i < 10; i++ { + conn, _, err := dialer.Dial(u.String(), http.Header{"Host": {domain}}) + if err != nil { + t.Fatalf("Dial() = %v", err) + } + defer conn.Close() + + suffix := findWebsocketSuffix(t, conn) + if suffix == "" { + continue + } + got.Insert(suffix) + + for j := 0; j < 10; j++ { + checkWebsocketRoundTrip(t, conn, suffix) + } + } + + if !cmp.Equal(want, got) { + t.Errorf("(-want, +got) = %s", cmp.Diff(want, got)) + } +} + +func findWebsocketSuffix(t *testing.T, conn *websocket.Conn) string { + // Establish the suffix that corresponds to this socket. + message := fmt.Sprintf("ping - %d", rand.Intn(1000)) + if err := conn.WriteMessage(websocket.TextMessage, []byte(message)); err != nil { + t.Errorf("WriteMessage() = %v", err) + return "" + } + + _, recv, err := conn.ReadMessage() + if err != nil { + t.Errorf("ReadMessage() = %v", err) + return "" + } + gotMsg := string(recv) + if !strings.HasPrefix(gotMsg, message) { + t.Errorf("ReadMessage() = %s, wanted %s prefix", gotMsg, message) + return "" + } + return strings.TrimSpace(strings.TrimPrefix(gotMsg, message)) +} + +func checkWebsocketRoundTrip(t *testing.T, conn *websocket.Conn, suffix string) { + message := fmt.Sprintf("ping - %d", rand.Intn(1000)) + if err := conn.WriteMessage(websocket.TextMessage, []byte(message)); err != nil { + t.Errorf("WriteMessage() = %v", err) + return + } + + // Read back the echoed message and compared with sent. + if _, recv, err := conn.ReadMessage(); err != nil { + t.Errorf("ReadMessage() = %v", err) + } else if got, want := string(recv), message+" "+suffix; got != want { + t.Errorf("ReadMessage() = %s, wanted %s", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/cgroup_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/cgroup_test.go new file mode 100644 index 0000000000..638cbec13c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/cgroup_test.go @@ -0,0 +1,138 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "strconv" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "knative.dev/serving/test" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const ( + cpuLimit = 1 // CPU + memoryLimit = 128 // MB + cpuRequest = 0.125 // CPU +) + +func toMilliValue(value float64) string { + return fmt.Sprintf("%dm", int(value*1000)) +} + +// TestMustHaveCgroupConfigured verifies that the Linux cgroups are configured based on the specified +// resource limits and requests as delared by "MUST" in the runtime-contract. +func TestMustHaveCgroupConfigured(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + resources := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(toMilliValue(cpuLimit)), + corev1.ResourceMemory: resource.MustParse(strconv.Itoa(memoryLimit) + "M"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(toMilliValue(cpuRequest)), + }, + } + + // Cgroup settings are based on the CPU and Memory Limits as well as CPU Reuqests + // https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + expectedCgroups := map[string]int{ + "/sys/fs/cgroup/memory/memory.limit_in_bytes": memoryLimit * 1000000, // 128 MB + "/sys/fs/cgroup/cpu/cpu.shares": cpuRequest * 1024} // CPURequests * 1024 + + _, ri, err := fetchRuntimeInfo(t, clients, WithResourceRequirements(resources)) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + cgroups := ri.Host.Cgroups + + // These are used to check the ratio of 'period' to 'quora'. It needs to + // be equal to the 'cpuLimit (limit = period / quota) + var period, quota *int + + for _, cgroup := range cgroups { + if cgroup.Error != "" { + t.Errorf("Error getting cgroup information: %v", cgroup.Error) + continue + } + + // These two are special - just save their values and then continue + if cgroup.Name == "/sys/fs/cgroup/cpu/cpu.cfs_period_us" { + period = cgroup.Value + continue + } + if cgroup.Name == "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" { + quota = cgroup.Value + continue + } + + if _, ok := expectedCgroups[cgroup.Name]; !ok { + // Service returned a value we don't test + t.Logf("%v cgroup returned, but not validated", cgroup.Name) + continue + } + if got, want := *cgroup.Value, expectedCgroups[cgroup.Name]; got != want { + t.Errorf("%s = %d, want: %d", cgroup.Name, *cgroup.Value, expectedCgroups[cgroup.Name]) + } + } + + if period == nil { + t.Errorf("Can't find the 'cpu.cfs_period_us' from cgroups") + } else if quota == nil { + t.Errorf("Can't find the 'cpu.cfs_quota_us' from cgroups") + } else { + percent := (100 * (*period)) / (*quota) + if percent != cpuLimit*100 { + t.Errorf("Percent (%v) is wrong should be %v. Period: %v Quota: %v", + percent, cpuLimit*100, period, quota) + } + } + +} + +// TestShouldHaveCgroupReadOnly verifies that the Linux cgroups are mounted read-only within the +// container. +func TestShouldHaveCgroupReadOnly(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + _, ri, err := fetchRuntimeInfo(t, clients) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + cgroups := ri.Host.Cgroups + + for _, cgroup := range cgroups { + if cgroup.Error != "" { + t.Errorf("Error getting cgroup information: %v", cgroup.Error) + continue + } + if got, want := *cgroup.ReadOnly, true; got != want { + t.Errorf("For cgroup %s cgroup.ReadOnly = %v, want: %v", cgroup.Name, *cgroup.ReadOnly, want) + } + } + +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/cmd_args_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/cmd_args_test.go new file mode 100644 index 0000000000..a858d33c2a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/cmd_args_test.go @@ -0,0 +1,54 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + v1a1options "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" +) + +func withCmdArgs(cmds []string, args []string) v1a1options.ServiceOption { + return func(s *v1alpha1.Service) { + c := &s.Spec.Template.Spec.Containers[0] + c.Command = cmds + c.Args = args + } +} + +func TestCmdArgs(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + cmds := []string{"/ko-app/runtime", "abra"} + args := []string{"cadabra", "do"} + + _, ri, err := fetchRuntimeInfo(t, clients, withCmdArgs(cmds, args)) + if err != nil { + t.Fatalf("Failed to fetch runtime info: %v", err) + } + + want := append(cmds, args...) + if !cmp.Equal(ri.Host.Args, want) { + t.Errorf("args = %v, want: %v", ri.Host.Args, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/container_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/container_test.go new file mode 100644 index 0000000000..8680918969 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/container_test.go @@ -0,0 +1,199 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/pkg/ptr" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +// TestMustNotContainerContraints tests that attempting to set unsupported fields or invalid values as +// defined by "MUST NOT" statements from the runtime contract results in a user facing error. +func TestMustNotContainerConstraints(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + testCases := []struct { + name string + options func(s *v1alpha1.Service) + }{{ + name: "TestArbitraryPortName", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{ + Name: "arbitrary", + }} + }, + }, { + name: "TestMountPropagation", + options: func(s *v1alpha1.Service) { + propagationMode := corev1.MountPropagationHostToContainer + s.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ + Name: "VolumeMount", + MountPath: "/", + MountPropagation: &propagationMode, + }} + }, + }, { + name: "TestReadinessHTTPProbePort", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(8888), + }, + }, + } + }, + }, { + name: "TestLivenessHTTPProbePort", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].LivenessProbe = &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(8888), + }, + }, + } + }, + }, { + name: "TestReadinessTCPProbePort", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{Port: intstr.FromInt(8888)}, + }, + } + }, + }, { + name: "TestLivenessTCPProbePort", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].LivenessProbe = &corev1.Probe{ + Handler: corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{Port: intstr.FromInt(8888)}, + }, + } + }, + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Runtime, + } + if svc, err := v1a1test.CreateLatestService(t, clients, names, tc.options); err == nil { + t.Errorf("CreateService = %v, want: error", spew.Sdump(svc)) + } + }) + } +} + +// TestShouldNotContainerContraints tests that attempting to set unsupported fields or invalid values as +// defined by "SHOULD NOT" statements from the runtime contract results in a user facing error. +func TestShouldNotContainerConstraints(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + testCases := []struct { + name string + options func(s *v1alpha1.Service) + }{{ + name: "TestPoststartHook", + options: func(s *v1alpha1.Service) { + lifecycleHandler := &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "echo Hello from the post start handler > /usr/share/message"}, + } + s.Spec.Template.Spec.Containers[0].Lifecycle = &corev1.Lifecycle{ + PostStart: &corev1.Handler{Exec: lifecycleHandler}, + } + }, + }, { + name: "TestPrestopHook", + options: func(s *v1alpha1.Service) { + lifecycleHandler := &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "echo Hello from the pre stop handler > /usr/share/message"}, + } + s.Spec.Template.Spec.Containers[0].Lifecycle = &corev1.Lifecycle{ + PreStop: &corev1.Handler{Exec: lifecycleHandler}, + } + }, + }, { + name: "TestMultiplePorts", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{ + {ContainerPort: 80}, + {ContainerPort: 81}, + } + }, + }, { + name: "TestHostPort", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{ + HostPort: 80, + }} + }, + }, { + name: "TestStdin", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].Stdin = true + }, + }, { + name: "TestStdinOnce", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].StdinOnce = true + }, + }, { + name: "TestTTY", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].TTY = true + }, + }, { + name: "TestInvalidUID", + options: func(s *v1alpha1.Service) { + s.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{ + RunAsUser: ptr.Int64(-10), + } + }, + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Runtime, + } + if svc, err := v1a1test.CreateLatestService(t, clients, names, tc.options); err == nil { + t.Errorf("CreateLatestService = %v, want: error", spew.Sdump(svc)) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/envpropagation_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/envpropagation_test.go new file mode 100644 index 0000000000..0e77a8ab56 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/envpropagation_test.go @@ -0,0 +1,117 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/test" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// TestSecretsViaEnv verifies propagation of Secrets through environment variables. +func TestSecretsViaEnv(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + t.Run("env", func(t *testing.T) { + err := fetchEnvironmentAndVerify(t, clients, WithEnv(corev1.EnvVar{ + Name: test.EnvKey, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: test.ConformanceSecret, + }, + Key: test.EnvKey, + }, + }, + })) + if err != nil { + t.Fatal(err) + } + }) + + t.Run("envFrom", func(t *testing.T) { + err := fetchEnvironmentAndVerify(t, clients, WithEnvFrom(corev1.EnvFromSource{ + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: test.ConformanceSecret, + }, + }, + })) + if err != nil { + t.Fatal(err) + } + }) +} + +// TestConfigsViaEnv verifies propagation of configs through environment variables. +func TestConfigsViaEnv(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + t.Run("env", func(t *testing.T) { + err := fetchEnvironmentAndVerify(t, clients, WithEnv(corev1.EnvVar{ + Name: test.EnvKey, + ValueFrom: &corev1.EnvVarSource{ + ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: test.ConformanceConfigMap, + }, + Key: test.EnvKey, + }, + }, + })) + if err != nil { + t.Fatal(err) + } + }) + + t.Run("envFrom", func(t *testing.T) { + err := fetchEnvironmentAndVerify(t, clients, WithEnvFrom(corev1.EnvFromSource{ + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: test.ConformanceConfigMap, + }, + }, + })) + if err != nil { + t.Fatal(err) + } + }) +} + +func fetchEnvironmentAndVerify(t *testing.T, clients *test.Clients, opts ...interface{}) error { + _, ri, err := fetchRuntimeInfo(t, clients, opts...) + if err != nil { + return err + } + + if value, ok := ri.Host.EnvVars[test.EnvKey]; ok { + if value != test.EnvValue { + return fmt.Errorf("environment value doesn't match. Expected: %s, Found: %s", test.EnvValue, value) + } + } else { + return fmt.Errorf("%s not found in environment variables", test.EnvKey) + } + return nil +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/envvars_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/envvars_test.go new file mode 100644 index 0000000000..e80aa8bc96 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/envvars_test.go @@ -0,0 +1,78 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "reflect" + "strconv" + "testing" + + "knative.dev/serving/test" + "knative.dev/serving/test/types" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// TestShouldEnvVars verifies environment variables that are declared as "SHOULD be set" in runtime-contract +func TestShouldEnvVars(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + names, ri, err := fetchRuntimeInfo(t, clients) + if err != nil { + t.Fatal(err) + } + r := reflect.ValueOf(names) + for k, v := range types.ShouldEnvVars { + value, exist := ri.Host.EnvVars[k] + if !exist { + t.Fatalf("Runtime contract env variable %q is not set", k) + } + field := reflect.Indirect(r).FieldByName(v) + if value != field.String() { + t.Fatalf("Runtime contract env variable %q value doesn't match with expected: got %q, want %q", k, value, field.String()) + } + } +} + +// TestMustEnvVars verifies environment variables that are declared as "MUST be set" in runtime-contract +func TestMustEnvVars(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + portStr, ok := types.MustEnvVars["PORT"] + if !ok { + t.Fatal("Missing PORT from set of MustEnvVars") + } + port, err := strconv.Atoi(portStr) + if err != nil { + t.Fatal("Invalid PORT value in MustEnvVars") + } + _, ri, err := fetchRuntimeInfo(t, clients, WithNumberedPort(int32(port))) + if err != nil { + t.Fatal(err) + } + for k, v := range types.MustEnvVars { + value, exist := ri.Host.EnvVars[k] + if !exist { + t.Fatalf("Runtime contract env variable %q is not set", k) + } + if v != value { + t.Fatalf("Runtime contract env variable %q value doesn't match with expected: got %q, want %q", k, value, v) + } + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/file_descriptor_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/file_descriptor_test.go new file mode 100644 index 0000000000..7bbc6bf265 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/file_descriptor_test.go @@ -0,0 +1,52 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + "knative.dev/serving/test" +) + +// TestShouldHaveStdinEOF verifies using the runtime test container that reading from the +// stdin file descriptor results in EOF. +func TestShouldHaveStdinEOF(t *testing.T) { + clients := test.Setup(t) + + _, ri, err := fetchRuntimeInfo(t, clients) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + if ri.Host == nil { + t.Fatal("Missing host information from runtime info.") + } + stdin := ri.Host.Stdin + if stdin == nil { + t.Fatal("Missing stdin information from host info.") + } + + if stdin.Error != "" { + t.Fatalf("Error reading stdin: %v", stdin.Error) + } + + if got, want := *stdin.EOF, true; got != want { + t.Errorf("Stdin.EOF = %t, expected: %t", got, want) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/filesystem_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/filesystem_test.go new file mode 100644 index 0000000000..beeb789f78 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/filesystem_test.go @@ -0,0 +1,105 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "testing" + + "knative.dev/serving/test" + "knative.dev/serving/test/types" +) + +func verifyPermissionsString(resp string, expected string) error { + if len(resp) != len(expected) { + return fmt.Errorf("perm = %q (len:%d), want: %q (len:%d)", resp, len(resp), expected, len(expected)) + } + + for index := range expected { + if expected[index] != '*' && expected[index] != resp[index] { + return fmt.Errorf("perm[%d] = %c, want: %c", index, expected[index], resp[index]) + } + } + return nil +} + +func testFiles(t *testing.T, clients *test.Clients, paths map[string]types.FileInfo) error { + _, ri, err := fetchRuntimeInfo(t, clients) + if err != nil { + return err + } + + for path, file := range paths { + riFile, ok := ri.Host.Files[path] + if !ok { + return fmt.Errorf("runtime contract file info not present: %s", path) + } + + if file.Error != "" && file.Error != riFile.Error { + return fmt.Errorf("%s.Error = %s, want: %s", path, riFile.Error, file.Error) + } + + if file.IsDir != nil { + if riFile.IsDir == nil { + return fmt.Errorf("%s.IsDir = nil, want: %t", path, *file.IsDir) + } else if *file.IsDir != *riFile.IsDir { + return fmt.Errorf("%s.IsDir = %t, want: %t", path, *riFile.IsDir, *file.IsDir) + } + } + + if file.SourceFile != "" && file.SourceFile != riFile.SourceFile { + return fmt.Errorf("%s.SourceFile = %s, want: %s", path, riFile.SourceFile, file.SourceFile) + } + + if file.Perm != "" { + if err := verifyPermissionsString(riFile.Perm, file.Perm); err != nil { + return fmt.Errorf("%s has invalid permissions string %s: %w", path, riFile.Perm, err) + } + } + + riFileAccess, ok := ri.Host.FileAccess[path] + if ok && riFileAccess.ReadErr != "" { + return fmt.Errorf("Want no read errors for file %s, got error: %s", path, riFileAccess.ReadErr) + } + if ok && riFileAccess.WriteErr != "" { + return fmt.Errorf("Want no write errors for file %s, got error: %s", path, riFileAccess.WriteErr) + } + } + return nil +} + +// TestMustHaveFiles asserts that the file system has all the MUST have paths and they have appropriate permissions +// and type as applicable. +func TestMustHaveFiles(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + if err := testFiles(t, clients, types.MustFiles); err != nil { + t.Error(err) + } +} + +// TestShouldHaveFiles asserts that the file system has all the SHOULD have paths and that they have the appropriate +// permissions and type as applicable. +func TestShouldHaveFiles(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + if err := testFiles(t, clients, types.ShouldFiles); err != nil { + t.Error(err) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/header_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/header_test.go new file mode 100644 index 0000000000..1bf22b8647 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/header_test.go @@ -0,0 +1,191 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "net" + "net/http" + "regexp" + "strings" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/test" +) + +const ( + userHeaderKey = "this-was-user-set" + userHeaderValue = "a value" +) + +// TestMustHaveHeadersSet verified that all headers declared as "MUST" in the runtime +// contract are present from the point of view of the user container. +func TestMustHaveHeadersSet(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + _, ri, err := fetchRuntimeInfo(t, clients) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + // For incoming requests, the Host header is promoted to the + // Request.Host field and removed from the Header map. Therefore we + // check against the Host field instead of the map. + if ri.Request.Host == "" { + // We just check that the host header exists and is non-empty as the request + // may be made internally or externally which will result in a different host. + t.Error("Header host was not present on request") + } + + expectedHeaders := map[string]stringMatcher{ + // We expect the forwarded header to be key-value pairs separated by commas and semi-colons, where + // the allowed keys are `for`, `by`, `proto` and `host` and values are loosely validated by shape. + // See https://tools.ietf.org/html/rfc7239#section-4 for the full syntax rules. + "forwarded": &checkForwardedHeader{expected: "valid Forwarded header per RFC7239"}, + } + + headers := ri.Request.Headers + + matchHeaders(t, headers, expectedHeaders) +} + +// TestMustHaveHeadersSet verified that all headers declared as "SHOULD" in the runtime +// contract are present from the point of view of the user container. +func TestShouldHaveHeadersSet(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + userHeaders := make(http.Header) + userHeaders.Add(userHeaderKey, userHeaderValue) + + expectedHeaders := map[string]stringMatcher{ + // We expect user headers to be passed through exactly as-is. + userHeaderKey: regexp.MustCompile("^" + userHeaderValue + "$"), + // We expect the protocol to be http for our test image. + "x-forwarded-proto": regexp.MustCompile("https?"), + // We expect the value to be a list of at least one comma separated IP addresses (IPv4 or IPv6). + "x-forwarded-for": &checkIPList{expected: "comma separated IPv4 or IPv6 addresses"}, + + // Trace Headers + // See https://github.com/openzipkin/b3-propagation#overall-process + // We use the multiple header variant for tracing. We do not validate the single header variant. + // We expect the value to be a 64-bit hex string + "x-b3-spanid": regexp.MustCompile("[0-9a-f]{16}"), + // We expect the value to be a 64-bit or 128-bit hex string + "x-b3-traceid": regexp.MustCompile("[0-9a-f]{16}|[0-9a-f]{32}"), + + // "x-b3-parentspanid" and "x-b3-sampled" are often present for tracing, but are not + // required for tracing so we do not validate them. + } + + _, ri, err := fetchRuntimeInfo(t, clients, pkgTest.WithHeader(userHeaders)) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + headers := ri.Request.Headers + + matchHeaders(t, headers, expectedHeaders) +} + +type checkIPList struct { + expected string +} + +// MatchString returns true if the passed string is a list of IPv4 or IPv6 Addresses. Otherwise returns false. +func (*checkIPList) MatchString(s string) bool { + for _, ip := range strings.Split(s, ",") { + if net.ParseIP(strings.TrimSpace(ip)) == nil { + return false + } + } + return true +} + +// String returns the expected string from the object. +func (c *checkIPList) String() string { + return c.expected +} + +type checkForwardedHeader struct { + expected string +} + +var ( + // token as defined in https://tools.ietf.org/html/rfc7230#section-3.2.6 + tokenMatcher = regexp.MustCompile(`^[0-9a-zA-Z!#$%&'*+-.^_|~]+$`) + // approximation of quoted-string as defined in https://tools.ietf.org/html/rfc7230#section-3.2.6 + quotedStringMatcher = regexp.MustCompile(`^"[^"]*"$`) +) + +func isDelimiter(r rune) bool { + return r == ';' || r == ',' +} + +// MatchString returns true if the passed string contains a roughly valid Forwarded header content. +func (*checkForwardedHeader) MatchString(s string) bool { + for _, pair := range strings.FieldsFunc(s, isDelimiter) { + // Allow for a trailing delimiter. Some routers unfortunately do that. + if pair == "" { + continue + } + parts := strings.Split(strings.TrimSpace(pair), "=") + if len(parts) < 2 { + return false + } + token := parts[0] + value := parts[1] + + if !tokenMatcher.MatchString(token) { + return false + } + + if value != "" && !(tokenMatcher.MatchString(value) || quotedStringMatcher.MatchString(value)) { + return false + } + } + return true +} + +// String returns the expected string from the object. +func (c *checkForwardedHeader) String() string { + return c.expected +} + +type stringMatcher interface { + MatchString(string) bool + String() string +} + +func matchHeaders(t *testing.T, headers http.Header, expectedHeaders map[string]stringMatcher) { + for header, match := range expectedHeaders { + hvl, ok := headers[http.CanonicalHeaderKey(header)] + if !ok { + t.Errorf("Header %s was not present on request", header) + continue + } + // Check against each value for the header key + for _, hv := range hvl { + if !match.MatchString(hv) { + t.Errorf("%s = %s; want: %s", header, hv, match.String()) + } + } + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/main_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/main_test.go new file mode 100644 index 0000000000..8f9ad941f6 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/main_test.go @@ -0,0 +1,33 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "flag" + "os" + "testing" + + pkgTest "knative.dev/pkg/test" +) + +func TestMain(m *testing.M) { + flag.Parse() + pkgTest.SetupLoggingFlags() + os.Exit(m.Run()) +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/protocol_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/protocol_test.go new file mode 100644 index 0000000000..0e9b48cbc0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/protocol_test.go @@ -0,0 +1,78 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + v1a1options "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" +) + +func withPort(name string) v1a1options.ServiceOption { + return func(s *v1alpha1.Service) { + if name != "" { + s.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: name}} + } + } +} + +func TestProtocols(t *testing.T) { + t.Parallel() + tests := []struct { + name string + portName string + wantMajor int + wantMinor int + }{{ + name: "h2c", + portName: "h2c", + wantMajor: 2, + wantMinor: 0, + }, { + name: "http1", + portName: "http1", + wantMajor: 1, + wantMinor: 1, + }, { + name: "default", + portName: "", + wantMajor: 1, + wantMinor: 1, + }} + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + clients := test.Setup(t) + _, ri, err := fetchRuntimeInfo(t, clients, withPort(tt.portName)) + if err != nil { + t.Fatalf("Failed to fetch runtime info: %v", err) + } + + if tt.wantMajor != ri.Request.ProtoMajor || tt.wantMinor != ri.Request.ProtoMinor { + t.Errorf("Want HTTP/%d.%d, got HTTP/%d.%d", tt.wantMajor, tt.wantMinor, ri.Request.ProtoMajor, ri.Request.ProtoMinor) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/readiness_probe_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/readiness_probe_test.go new file mode 100644 index 0000000000..a2bf1246f4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/readiness_probe_test.go @@ -0,0 +1,95 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/test/logstream" + revisionresourcenames "knative.dev/serving/pkg/reconciler/revision/resources/names" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + "knative.dev/serving/test/e2e" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestProbeRuntime(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + + clients := test.Setup(t) + + var testCases = []struct { + // name of the test case, which will be inserted in names of routes, configurations, etc. + // Use a short name here to avoid hitting the 63-character limit in names + // (e.g., "service-to-service-call-svc-cluster-local-uagkdshh-frkml-service" is too long.) + name string + // handler to be used for readiness probe in user container. + handler corev1.Handler + }{{ + "httpGet", + corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + }, + }, + }, { + "tcpSocket", + corev1.Handler{ + TCPSocket: &corev1.TCPSocketAction{}, + }, + }, { + "exec", + corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: []string{"/ko-app/runtime", "probe"}, + }, + }, + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.Runtime, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithReadinessProbe( + &corev1.Probe{ + Handler: tc.handler, + })) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + // Check if scaling down works even if access from liveness probe exists. + if err := e2e.WaitForScaleToZero(t, revisionresourcenames.Deployment(resources.Revision), clients); err != nil { + t.Fatalf("Could not scale to zero: %v", err) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/sysctl_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/sysctl_test.go new file mode 100644 index 0000000000..3b153d03b2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/sysctl_test.go @@ -0,0 +1,57 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/serving/test" +) + +// TestShouldHaveSysctlReadOnly verifies that the /proc/sys filesystem mounted within the container +// is read-only. +func TestShouldHaveSysctlReadOnly(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + _, ri, err := fetchRuntimeInfo(t, clients) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + mounts := ri.Host.Mounts + + for _, mount := range mounts { + if mount.Error != "" { + t.Fatalf("Error getting mount information: %s", mount.Error) + } + if mount.Path == "/proc/sys" { + if got, want := mount.Type, "proc"; got != want { + t.Errorf("%s has mount.Type = %s, wanted: %s", mount.Path, mount.Type, want) + } + if got, want := mount.Device, "proc"; got != want { + t.Errorf("%s has mount.Device = %s, wanted: %s", mount.Path, mount.Device, want) + } + if !sets.NewString(mount.Options...).Has("ro") { + t.Errorf("%s has mount.Options = %v, wanted: ro", mount.Path, mount.Options) + } + } + } +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/user_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/user_test.go new file mode 100644 index 0000000000..4d80eabeaa --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/user_test.go @@ -0,0 +1,102 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "knative.dev/serving/test" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +const ( + securityContextUserID = 2020 + unprivilegedUserID = 65532 +) + +// TestMustRunAsUser verifies that a supplied runAsUser through securityContext takes +// effect as declared by "MUST" in the runtime-contract. +func TestMustRunAsUser(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + runAsUser := int64(securityContextUserID) + securityContext := &corev1.SecurityContext{ + RunAsUser: &runAsUser, + } + + // We need to modify the working dir because the specified user cannot access the + // default user's working dir. + _, ri, err := fetchRuntimeInfo(t, clients, WithSecurityContext(securityContext), WithWorkingDir("/")) + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + if ri.Host == nil { + t.Fatal("Missing host information from runtime info.") + } + + if ri.Host.User == nil { + t.Fatal("Missing user information from runtime info.") + } + + if got, want := ri.Host.User.UID, securityContextUserID; got != want { + t.Errorf("uid = %d, want: %d", got, want) + } + + // We expect the effective userID to match the userID as we + // did not use setuid. + if got, want := ri.Host.User.EUID, securityContextUserID; got != want { + t.Errorf("euid = %d, want: %d", got, want) + } +} + +// TestShouldRunAsUserContainerDefault verifies that a container that sets runAsUser +// in the Dockerfile is respected when executed in Knative as declared by "SHOULD" +// in the runtime-contract. +func TestShouldRunAsUserContainerDefault(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + _, ri, err := fetchRuntimeInfo(t, clients) + + if err != nil { + t.Fatalf("Error fetching runtime info: %v", err) + } + + if ri.Host == nil { + t.Fatal("Missing host information from runtime info.") + } + + if ri.Host.User == nil { + t.Fatal("Missing user information from runtime info.") + } + + if got, want := ri.Host.User.UID, unprivilegedUserID; got != want { + t.Errorf("uid = %d, want: %d", got, want) + } + + // We expect the effective userID to match the userID as we + // did not use setuid. + if got, want := ri.Host.User.EUID, unprivilegedUserID; got != want { + t.Errorf("euid = %d, want: %d", got, want) + } + +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/util.go b/test/vendor/knative.dev/serving/test/conformance/runtime/util.go new file mode 100644 index 0000000000..6cf8177d56 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/util.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "encoding/json" + "fmt" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + "knative.dev/serving/test/types" + v1a1test "knative.dev/serving/test/v1alpha1" + + v1alpha1testing "knative.dev/serving/pkg/testing/v1alpha1" +) + +// fetchRuntimeInfo creates a Service that uses the 'runtime' test image, and extracts the returned output into the +// RuntimeInfo object. The 'runtime' image uses uid 65532. +func fetchRuntimeInfo( + t *testing.T, + clients *test.Clients, + opts ...interface{}) (*test.ResourceNames, *types.RuntimeInfo, error) { + + names := &test.ResourceNames{Image: test.Runtime} + t.Helper() + names.Service = test.ObjectNameForTest(t) + + defer test.TearDown(clients, *names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, *names) }) + + serviceOpts, reqOpts, err := splitOpts(opts...) + if err != nil { + return nil, nil, err + } + + serviceOpts = append(serviceOpts, func(svc *v1alpha1.Service) { + // Always fetch the latest runtime image. + svc.Spec.Template.Spec.Containers[0].ImagePullPolicy = "Always" + }) + + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + serviceOpts...) + if err != nil { + return nil, nil, err + } + + resp, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + objects.Service.Status.URL.URL(), + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "RuntimeInfo", + test.ServingFlags.ResolvableDomain, + reqOpts...) + if err != nil { + return nil, nil, err + } + + var ri types.RuntimeInfo + err = json.Unmarshal(resp.Body, &ri) + return names, &ri, err +} + +func splitOpts(opts ...interface{}) ([]v1alpha1testing.ServiceOption, []interface{}, error) { + serviceOpts := []v1alpha1testing.ServiceOption{} + reqOpts := []interface{}{} + for _, opt := range opts { + switch t := opt.(type) { + case v1alpha1testing.ServiceOption: + serviceOpts = append(serviceOpts, opt.(v1alpha1testing.ServiceOption)) + case pkgTest.RequestOption: + reqOpts = append(reqOpts, opt.(pkgTest.RequestOption)) + default: + return nil, nil, fmt.Errorf("invalid option type: %T", t) + } + + } + return serviceOpts, reqOpts, nil +} diff --git a/test/vendor/knative.dev/serving/test/conformance/runtime/workingdir_test.go b/test/vendor/knative.dev/serving/test/conformance/runtime/workingdir_test.go new file mode 100644 index 0000000000..313699b281 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/conformance/runtime/workingdir_test.go @@ -0,0 +1,49 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + "knative.dev/serving/pkg/apis/serving/v1alpha1" + v1a1options "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" +) + +func withWorkingDir(wd string) v1a1options.ServiceOption { + return func(svc *v1alpha1.Service) { + svc.Spec.Template.Spec.Containers[0].WorkingDir = wd + } +} + +func TestWorkingDirService(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + const wd = "/foo/bar/baz" + + _, ri, err := fetchRuntimeInfo(t, clients, withWorkingDir(wd)) + if err != nil { + t.Fatalf("Failed to fetch runtime info: %v", err) + } + + if ri.Host.User.Cwd.Directory != wd { + t.Errorf("cwd = %s, want %s, error=%s", ri.Host.User.Cwd, wd, ri.Host.User.Cwd.Error) + } +} diff --git a/test/vendor/knative.dev/serving/test/crd.go b/test/vendor/knative.dev/serving/test/crd.go new file mode 100644 index 0000000000..52da88266a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/crd.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +// crd contains functions that construct boilerplate CRD definitions. + +import ( + "net/url" + "strings" + + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/helpers" +) + +// ResourceNames holds names of various resources. +type ResourceNames struct { + Config string + Route string + Revision string + Service string + TrafficTarget string + URL *url.URL + Image string +} + +// AppendRandomString will generate a random string that begins with prefix. This is useful +// if you want to make sure that your tests can run at the same time against the same +// environment without conflicting. This method will seed rand with the current time when +// called for the first time. +var AppendRandomString = helpers.AppendRandomString + +// MakeK8sNamePrefix will convert each chunk of non-alphanumeric character into a single dash +// and also convert camelcase tokens into dash-delimited lowercase tokens. +var MakeK8sNamePrefix = helpers.MakeK8sNamePrefix + +// ObjectNameForTest generates a random object name based on the test name. +var ObjectNameForTest = helpers.ObjectNameForTest + +// SubServiceNameForTest generates a random service name based on the test name and +// the given subservice name. +func SubServiceNameForTest(t pkgTest.T, subsvc string) string { + fullPrefix := strings.TrimPrefix(t.Name(), "Test") + "-" + subsvc + return AppendRandomString(MakeK8sNamePrefix(fullPrefix)) +} diff --git a/test/vendor/knative.dev/serving/test/e2e-common.sh b/test/vendor/knative.dev/serving/test/e2e-common.sh new file mode 100644 index 0000000000..1db636b6c6 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e-common.sh @@ -0,0 +1,522 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Temporarily increasing the cluster size for serving tests to rule out +# resource/eviction as causes of flakiness. These env vars are consumed +# in the test-infra/scripts/e2e-tests.sh. Use the existing value, if provided +# with the job config. +E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-4} +E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-4} +E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-8} + +# This script provides helper methods to perform cluster actions. +source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/e2e-tests.sh + +CERT_MANAGER_VERSION="0.12.0" +ISTIO_VERSION="" +GLOO_VERSION="" +KOURIER_VERSION="" +AMBASSADOR_VERSION="" +CONTOUR_VERSION="" +INGRESS_CLASS="" + +HTTPS=0 +MESH=0 +INSTALL_MONITORING=0 + +# List of custom YAMLs to install, if specified (space-separated). +INSTALL_CUSTOM_YAMLS="" + +UNINSTALL_LIST=() + +# Parse our custom flags. +function parse_flags() { + case "$1" in + --istio-version) + [[ $2 =~ ^[0-9]+\.[0-9]+(\.[0-9]+|\-latest)$ ]] || abort "version format must be '[0-9].[0-9].[0-9]' or '[0-9].[0-9]-latest" + readonly ISTIO_VERSION=$2 + readonly INGRESS_CLASS="istio.ingress.networking.knative.dev" + return 2 + ;; + --version) + [[ $2 =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be 'v[0-9].[0-9].[0-9]'" + LATEST_SERVING_RELEASE_VERSION=$2 + return 2 + ;; + --cert-manager-version) + [[ $2 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'" + readonly CERT_MANAGER_VERSION=$2 + return 2 + ;; + --mesh) + readonly MESH=1 + return 1 + ;; + --no-mesh) + readonly MESH=0 + return 1 + ;; + --https) + readonly HTTPS=1 + return 1 + ;; + --install-monitoring) + readonly INSTALL_MONITORING=1 + return 1 + ;; + --custom-yamls) + [[ -z "$2" ]] && fail_test "Missing argument to --custom-yamls" + # Expect a list of comma-separated YAMLs. + INSTALL_CUSTOM_YAMLS="${2//,/ }" + readonly INSTALL_CUSTOM_YAMLS + return 2 + ;; + --gloo-version) + # currently, the value of --gloo-version is ignored + # latest version of Gloo pinned in third_party will be installed + readonly GLOO_VERSION=$2 + readonly INGRESS_CLASS="gloo.ingress.networking.knative.dev" + return 2 + ;; + --kourier-version) + # currently, the value of --kourier-version is ignored + # latest version of Kourier pinned in third_party will be installed + readonly KOURIER_VERSION=$2 + readonly INGRESS_CLASS="kourier.ingress.networking.knative.dev" + return 2 + ;; + --ambassador-version) + # currently, the value of --ambassador-version is ignored + # latest version of Ambassador pinned in third_party will be installed + readonly AMBASSADOR_VERSION=$2 + readonly INGRESS_CLASS="ambassador.ingress.networking.knative.dev" + return 2 + ;; + --contour-version) + # currently, the value of --contour-version is ignored + # latest version of Contour pinned in third_party will be installed + readonly CONTOUR_VERSION=$2 + readonly INGRESS_CLASS="contour.ingress.networking.knative.dev" + return 2 + ;; + esac + return 0 +} + +# Create all manifests required to install Knative Serving. +# This will build everything from the current source. +# All generated YAMLs will be available and pointed by the corresponding +# environment variables as set in /hack/generate-yamls.sh. +function build_knative_from_source() { + local YAML_LIST="$(mktemp)" + + # Generate manifests, capture environment variables pointing to the YAML files. + local FULL_OUTPUT="$( \ + source $(dirname $0)/../hack/generate-yamls.sh ${REPO_ROOT_DIR} ${YAML_LIST} ; \ + set | grep _YAML=/)" + local LOG_OUTPUT="$(echo "${FULL_OUTPUT}" | grep -v _YAML=/)" + local ENV_OUTPUT="$(echo "${FULL_OUTPUT}" | grep '^[_0-9A-Z]\+_YAML=/')" + [[ -z "${LOG_OUTPUT}" || -z "${ENV_OUTPUT}" ]] && fail_test "Error generating manifests" + # Only import the environment variables pointing to the YAML files. + echo "${LOG_OUTPUT}" + echo -e "Generated manifests:\n${ENV_OUTPUT}" + eval "${ENV_OUTPUT}" +} + +# Installs Knative Serving in the current cluster, and waits for it to be ready. +# If no parameters are passed, installs the current source-based build, unless custom +# YAML files were passed using the --custom-yamls flag. +# Parameters: $1 - Knative Serving YAML file +# $2 - Knative Monitoring YAML file (optional) +function install_knative_serving() { + if [[ -z "${INSTALL_CUSTOM_YAMLS}" ]]; then + install_knative_serving_standard "$1" "$2" + return + fi + echo ">> Installing Knative serving from custom YAMLs" + echo "Custom YAML files: ${INSTALL_CUSTOM_YAMLS}" + for yaml in ${INSTALL_CUSTOM_YAMLS}; do + echo "Installing '${yaml}'" + kubectl create -f "${yaml}" || return 1 + done +} + +function install_istio() { + # If no gateway was set on command line, assume Istio + if [[ -z "${ISTIO_VERSION}" ]]; then + echo ">> No gateway set up on command line, using Istio" + readonly ISTIO_VERSION="1.4-latest" + fi + + local istio_base="./third_party/istio-${ISTIO_VERSION}" + INSTALL_ISTIO_CRD_YAML="${istio_base}/istio-crds.yaml" + if (( MESH )); then + INSTALL_ISTIO_YAML="${istio_base}/istio-ci-mesh.yaml" + else + INSTALL_ISTIO_YAML="${istio_base}/istio-ci-no-mesh.yaml" + fi + + echo "Istio CRD YAML: ${INSTALL_ISTIO_CRD_YAML}" + echo "Istio YAML: ${INSTALL_ISTIO_YAML}" + + echo ">> Bringing up Istio" + echo ">> Running Istio CRD installer" + kubectl apply -f "${INSTALL_ISTIO_CRD_YAML}" || return 1 + wait_until_batch_job_complete istio-system || return 1 + UNINSTALL_LIST+=( "${INSTALL_ISTIO_CRD_YAML}" ) + + echo ">> Running Istio" + kubectl apply -f "${INSTALL_ISTIO_YAML}" || return 1 + UNINSTALL_LIST+=( "${INSTALL_ISTIO_YAML}" ) + + echo ">> Patching Istio" + # There are reports of Envoy failing (503) when istio-pilot is overloaded. + # We generously add more pilot instances here to reduce flakes. + if kubectl get hpa -n istio-system istio-pilot 2>/dev/null; then + kubectl patch hpa -n istio-system istio-pilot \ + --patch '{"spec": {"minReplicas": 3, "maxReplicas": 10, "targetCPUUtilizationPercentage": 60}}' || return 1 + else + # Some versions of Istio don't provide an HPA for pilot. + kubectl autoscale -n istio-system deploy istio-pilot --min=3 --max=10 --cpu-percent=60 || return 1 + fi + + # If the yaml for the Istio Ingress controller is passed, then install it. + if [[ -n "$1" ]]; then + echo ">> Installing Istio Ingress" + echo "Istio Ingress YAML: ${1}" + # We apply a filter here because when we're installing from a pre-built + # bundle then the whole bundle it passed here. We use ko because it has + # better filtering support for CRDs. + ko apply -f "${1}" --selector=networking.knative.dev/ingress-provider=istio || return 1 + UNINSTALL_LIST+=( "${1}" ) + fi +} + +function install_gloo() { + local INSTALL_GLOO_YAML="./third_party/gloo-latest/gloo.yaml" + echo "Gloo YAML: ${INSTALL_GLOO_YAML}" + echo ">> Bringing up Gloo" + + kubectl apply -f ${INSTALL_GLOO_YAML} || return 1 + UNINSTALL_LIST+=( "${INSTALL_GLOO_YAML}" ) + + echo ">> Patching Gloo" + # Scale replicas of the Gloo proxies to handle large qps + kubectl scale -n gloo-system deployment knative-external-proxy --replicas=6 + kubectl scale -n gloo-system deployment knative-internal-proxy --replicas=6 +} + +function install_kourier() { + local INSTALL_KOURIER_YAML="./third_party/kourier-latest/kourier.yaml" + echo "Kourier YAML: ${INSTALL_KOURIER_YAML}" + echo ">> Bringing up Kourier" + + kubectl apply -f ${INSTALL_KOURIER_YAML} || return 1 + UNINSTALL_LIST+=( "${INSTALL_KOURIER_YAML}" ) + + echo ">> Patching Kourier" + # Scale replicas of the Kourier gateways to handle large qps + kubectl scale -n kourier-system deployment 3scale-kourier-gateway --replicas=6 +} + +function install_ambassador() { + local AMBASSADOR_MANIFESTS_PATH="./third_party/ambassador-latest/" + echo "Ambassador YAML: ${AMBASSADOR_MANIFESTS_PATH}" + + echo ">> Creating namespace 'ambassador'" + kubectl create namespace ambassador || return 1 + + echo ">> Installing Ambassador" + kubectl apply -n ambassador -f ${AMBASSADOR_MANIFESTS_PATH} || return 1 + UNINSTALL_LIST+=( "${AMBASSADOR_MANIFESTS_PATH}" ) + + echo ">> Fixing Ambassador's permissions" + kubectl patch clusterrolebinding ambassador -p '{"subjects":[{"kind": "ServiceAccount", "name": "ambassador", "namespace": "ambassador"}]}' || return 1 + + echo ">> Enabling Knative support in Ambassador" + kubectl set env --namespace ambassador deployments/ambassador AMBASSADOR_KNATIVE_SUPPORT=true || return 1 + + echo ">> Patching Ambassador" + # Scale replicas of the Ambassador gateway to handle large qps + kubectl scale -n ambassador deployment ambassador --replicas=6 +} + +function install_contour() { + local INSTALL_CONTOUR_YAML="./third_party/contour-latest/contour.yaml" + echo "Contour YAML: ${INSTALL_CONTOUR_YAML}" + echo ">> Bringing up Contour" + + kubectl apply -f ${INSTALL_CONTOUR_YAML} || return 1 + UNINSTALL_LIST+=( "${INSTALL_CONTOUR_YAML}" ) +} + +# Installs Knative Serving in the current cluster, and waits for it to be ready. +# If no parameters are passed, installs the current source-based build. +# Parameters: $1 - Knative Serving YAML file +# $2 - Knative Monitoring YAML file (optional) +function install_knative_serving_standard() { + readonly INSTALL_CERT_MANAGER_YAML="./third_party/cert-manager-${CERT_MANAGER_VERSION}/cert-manager.yaml" + + echo ">> Creating knative-serving namespace if it does not exist" + kubectl get ns knative-serving || kubectl create namespace knative-serving + + echo ">> Installing Knative CRD" + if [[ -z "$1" ]]; then + # If we need to build from source, then kick that off first. + build_knative_from_source + + echo "CRD YAML: ${SERVING_CRD_YAML}" + kubectl apply -f "${SERVING_CRD_YAML}" || return 1 + UNINSTALL_LIST+=( "${SERVING_CRD_YAML}" ) + else + echo "Knative YAML: ${1}" + ko apply -f "${1}" --selector=knative.dev/crd-install=true || return 1 + UNINSTALL_LIST+=( "${1}" ) + SERVING_ISTIO_YAML="${1}" + fi + + echo ">> Installing Ingress" + if [[ -n "${GLOO_VERSION}" ]]; then + install_gloo + elif [[ -n "${KOURIER_VERSION}" ]]; then + install_kourier + elif [[ -n "${AMBASSADOR_VERSION}" ]]; then + install_ambassador + elif [[ -n "${CONTOUR_VERSION}" ]]; then + install_contour + else + install_istio "${SERVING_ISTIO_YAML}" + fi + + echo ">> Installing Cert-Manager" + echo "Cert Manager YAML: ${INSTALL_CERT_MANAGER_YAML}" + kubectl apply -f "${INSTALL_CERT_MANAGER_YAML}" --validate=false || return 1 + UNINSTALL_LIST+=( "${INSTALL_CERT_MANAGER_YAML}" ) + + echo ">> Installing Knative serving" + if [[ -z "$1" ]]; then + echo "Knative YAML: ${SERVING_CORE_YAML} and ${SERVING_HPA_YAML}" + kubectl apply \ + -f "${SERVING_CORE_YAML}" \ + -f "${SERVING_HPA_YAML}" || return 1 + UNINSTALL_LIST+=( "${SERVING_CORE_YAML}" "${SERVING_HPA_YAML}" ) + + # ${SERVING_CERT_MANAGER_YAML} and ${SERVING_NSCERT_YAML} are set when calling + # build_knative_from_source + echo "Knative TLS YAML: ${SERVING_CERT_MANAGER_YAML} and ${SERVING_NSCERT_YAML}" + kubectl apply \ + -f "${SERVING_CERT_MANAGER_YAML}" \ + -f "${SERVING_NSCERT_YAML}" || return 1 + + if (( INSTALL_MONITORING )); then + echo ">> Installing Monitoring" + echo "Knative Monitoring YAML: ${MONITORING_YAML}" + kubectl apply -f "${MONITORING_YAML}" || return 1 + UNINSTALL_LIST+=( "${MONITORING_YAML}" ) + fi + else + echo "Knative YAML: ${1}" + # If we are installing from provided yaml, then only install non-istio bits here, + # and if we choose to install istio below, then pass the whole file as the rest. + # We use ko because it has better filtering support for CRDs. + ko apply -f "${1}" --selector=networking.knative.dev/ingress-provider!=istio || return 1 + UNINSTALL_LIST+=( "${1}" ) + + if (( INSTALL_MONITORING )); then + echo ">> Installing Monitoring" + echo "Knative Monitoring YAML: ${2}" + kubectl apply -f "${2}" || return 1 + UNINSTALL_LIST+=( "${2}" ) + fi + fi + + echo ">> Configuring the default Ingress: ${INGRESS_CLASS}" + cat <> Turning on profiling.enable" + cat <> Patching activator HPA" + # We set min replicas to 2 for testing multiple activator pods. + kubectl -n knative-serving patch hpa activator --patch '{"spec":{"minReplicas":2}}' || return 1 +} + +# Check if we should use --resolvabledomain. In case the ingress only has +# hostname, we doesn't yet have a way to support resolvable domain in tests. +function use_resolvable_domain() { + # Temporarily turning off xip.io tests, as DNS errors aren't always retried. + echo "false" +} + +# Check if we should use --https. +function use_https() { + if (( HTTPS )); then + echo "--https" + else + echo "" + fi +} + +# Check if we should specify --ingressClass +function ingress_class() { + if [[ -z "${INGRESS_CLASS}" ]]; then + echo "" + else + echo "--ingressClass=${INGRESS_CLASS}" + fi +} + +# Uninstalls Knative Serving from the current cluster. +function knative_teardown() { + if [[ -z "${INSTALL_CUSTOM_YAMLS}" && -z "${UNINSTALL_LIST[@]}" ]]; then + echo "install_knative_serving() was not called, nothing to uninstall" + return 0 + fi + if [[ -n "${INSTALL_CUSTOM_YAMLS}" ]]; then + echo ">> Uninstalling Knative serving from custom YAMLs" + for yaml in ${INSTALL_CUSTOM_YAMLS}; do + echo "Uninstalling '${yaml}'" + kubectl delete --ignore-not-found=true -f "${yaml}" || return 1 + done + else + echo ">> Uninstalling Knative serving" + for i in ${!UNINSTALL_LIST[@]}; do + # We uninstall elements in the reverse of the order they were installed. + local YAML="${UNINSTALL_LIST[$(( ${#array[@]} - $i ))]}" + echo ">> Bringing down YAML: ${YAML}" + kubectl delete --ignore-not-found=true -f "${YAML}" || return 1 + done + fi +} + +# Create test resources and images +function test_setup() { + echo ">> Setting up logging..." + + # Install kail if needed. + if ! which kail > /dev/null; then + bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$GOPATH/bin" + fi + + # Capture all logs. + kail > ${ARTIFACTS}/k8s.log.txt & + local kail_pid=$! + # Clean up kail so it doesn't interfere with job shutting down + trap "kill $kail_pid || true" EXIT + + echo ">> Creating test resources (test/config/)" + ko apply ${KO_FLAGS} -f test/config/ || return 1 + if (( MESH )); then + kubectl label namespace serving-tests istio-injection=enabled + kubectl label namespace serving-tests-alt istio-injection=enabled + ko apply ${KO_FLAGS} -f test/config/mtls/ || return 1 + fi + + echo ">> Uploading test images..." + ${REPO_ROOT_DIR}/test/upload-test-images.sh || return 1 + + echo ">> Waiting for Serving components to be running..." + wait_until_pods_running knative-serving || return 1 + + echo ">> Waiting for Ingress provider to be running..." + if [[ -n "${ISTIO_VERSION}" ]]; then + wait_until_pods_running istio-system || return 1 + wait_until_service_has_external_ip istio-system istio-ingressgateway + fi + if [[ -n "${GLOO_VERSION}" ]]; then + # we must set these override values to allow the test spoofing client to work with Gloo + # see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37 + export GATEWAY_OVERRIDE=knative-external-proxy + export GATEWAY_NAMESPACE_OVERRIDE=gloo-system + wait_until_pods_running gloo-system || return 1 + wait_until_service_has_external_ip gloo-system knative-external-proxy + fi + if [[ -n "${KOURIER_VERSION}" ]]; then + # we must set these override values to allow the test spoofing client to work with Kourier + # see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37 + export GATEWAY_OVERRIDE=kourier-external + export GATEWAY_NAMESPACE_OVERRIDE=kourier-system + wait_until_pods_running kourier-system || return 1 + wait_until_service_has_external_ip kourier-system kourier-external + fi + if [[ -n "${AMBASSADOR_VERSION}" ]]; then + # we must set these override values to allow the test spoofing client to work with Ambassador + # see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37 + export GATEWAY_OVERRIDE=ambassador + export GATEWAY_NAMESPACE_OVERRIDE=ambassador + wait_until_pods_running ambassador || return 1 + wait_until_service_has_external_ip ambassador ambassador + fi + if [[ -n "${CONTOUR_VERSION}" ]]; then + # we must set these override values to allow the test spoofing client to work with Contour + # see https://github.com/knative/pkg/blob/release-0.7/test/ingress/ingress.go#L37 + export GATEWAY_OVERRIDE=envoy-external + export GATEWAY_NAMESPACE_OVERRIDE=projectcontour + wait_until_pods_running projectcontour || return 1 + wait_until_service_has_external_ip projectcontour envoy-external + fi + + if (( INSTALL_MONITORING )); then + echo ">> Waiting for Monitoring to be running..." + wait_until_pods_running knative-monitoring || return 1 + fi +} + +# Delete test resources +function test_teardown() { + echo ">> Removing test resources (test/config/)" + ko delete --ignore-not-found=true --now -f test/config/ + if (( MESH )); then + ko delete --ignore-not-found=true --now -f test/config/mtls/ + fi + echo ">> Ensuring test namespaces are clean" + kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests + kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests + kubectl delete all --all --ignore-not-found --now --timeout 60s -n serving-tests-alt + kubectl delete --ignore-not-found --now --timeout 60s namespace serving-tests-alt +} + +# Dump more information when test fails. +function dump_extra_cluster_state() { + echo ">>> Routes:" + kubectl get routes -o yaml --all-namespaces + echo ">>> Configurations:" + kubectl get configurations -o yaml --all-namespaces + echo ">>> Revisions:" + kubectl get revisions -o yaml --all-namespaces + echo ">>> PodAutoscalers:" + kubectl get podautoscalers -o yaml --all-namespaces + echo ">>> SKSs:" + kubectl get serverlessservices -o yaml --all-namespaces +} diff --git a/test/vendor/knative.dev/serving/test/e2e-smoke-tests.sh b/test/vendor/knative.dev/serving/test/e2e-smoke-tests.sh new file mode 100755 index 0000000000..9a22596bf2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e-smoke-tests.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the end-to-end tests against Knative Serving built from source. +# It is started by prow for each PR. For convenience, it can also be executed manually. + +# If you already have a Knative cluster setup and kubectl pointing +# to it, call this script with the --run-tests arguments and it will use +# the cluster and run the tests. + +# Calling this script without arguments will create a new cluster in +# project $PROJECT_ID, start knative in it, run the tests and delete the +# cluster. + +source $(dirname $0)/e2e-common.sh + +function knative_setup() { + # Build serving, create $SERVING_YAML + build_knative_from_source + start_knative_serving "${SERVING_YAML}" + start_knative_monitoring "${MONITORING_YAML}" +} + +# Script entry point. + +initialize $@ + +# Ensure Knative Serving can be uninstalled/reinstalled cleanly +subheader "Uninstalling Knative Serving" +kubectl delete --ignore-not-found=true -f ${SERVING_YAML} || fail_test +wait_until_object_does_not_exist namespaces knative-serving || fail_test +kubectl delete --ignore-not-found=true -f ${MONITORING_YAML} || fail_test +wait_until_object_does_not_exist namespaces knative-monitoring || fail_test +# Specially wait for zipkin to be deleted, as we have them installed in istio-system namespace, see +# https://github.com/knative/serving/blob/4202efc0dc12052edc0630515b101cbf8068a609/config/monitoring/tracing/zipkin/100-zipkin.yaml#L19 +wait_until_object_does_not_exist service zipkin istio-system +wait_until_object_does_not_exist deployment zipkin istio-system + +subheader "Reinstalling Knative Serving" +start_knative_serving "${SERVING_YAML}" || fail_test +subheader "Reinstalling Knative Monitoring" +start_knative_monitoring "${MONITORING_YAML}" || fail_test + +# Run smoke test +subheader "Running smoke test" +go_test_e2e ./test/e2e -run HelloWorld || fail_test + +success diff --git a/test/vendor/knative.dev/serving/test/e2e-tests.sh b/test/vendor/knative.dev/serving/test/e2e-tests.sh new file mode 100755 index 0000000000..cfe23c363b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e-tests.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the end-to-end tests against Knative Serving built from source. +# It is started by prow for each PR. For convenience, it can also be executed manually. + +# If you already have a Knative cluster setup and kubectl pointing +# to it, call this script with the --run-tests arguments and it will use +# the cluster and run the tests. + +# Calling this script without arguments will create a new cluster in +# project $PROJECT_ID, start knative in it, run the tests and delete the +# cluster. + +source $(dirname $0)/e2e-common.sh + +# Helper functions. + +function knative_setup() { + install_knative_serving +} + +# Script entry point. + +# Skip installing istio as an add-on +initialize $@ --skip-istio-addon + +# Run the tests +header "Running tests" + +failed=0 + +# Run tests serially in the mesh scenario +parallelism="" +(( MESH )) && parallelism="-parallel 1" + +# Run conformance and e2e tests. +go_test_e2e -timeout=30m \ + ./test/conformance/... \ + ./test/e2e \ + ${parallelism} \ + "--resolvabledomain=$(use_resolvable_domain)" "$(use_https)" "$(ingress_class)" || failed=1 + +# Run scale tests. +go_test_e2e -timeout=10m \ + ${parallelism} \ + ./test/scale || failed=1 + +# Auto TLS E2E tests mutate the cluster and must be ran separately +go_test_e2e -timeout=10m \ + ./test/e2e/autotls || failed=1 + +# Istio E2E tests mutate the cluster and must be ran separately +if [[ -n "${ISTIO_VERSION}" ]]; then + go_test_e2e -timeout=10m \ + ./test/e2e/istio \ + "--resolvabledomain=$(use_resolvable_domain)" "$(use_https)" || failed=1 +fi + +# Dump cluster state in case of failure +(( failed )) && dump_cluster_state +(( failed )) && fail_test + +success diff --git a/test/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh b/test/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh new file mode 100755 index 0000000000..d185360882 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e-upgrade-tests.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the end-to-end tests against Knative Serving built from source. +# It is started by prow for each PR. For convenience, it can also be executed manually. + +# If you already have the *_OVERRIDE environment variables set, call +# this script with the --run-tests arguments and it will start knative in +# the cluster and run the tests. + +# Calling this script without arguments will create a new cluster in +# project $PROJECT_ID, start knative in it, run the tests and delete the +# cluster. + +# You can specify the version to run against with the --version argument +# (e.g. --version v0.7.0). If this argument is not specified, the script will +# run against the latest tagged version on the current branch. + +source $(dirname $0)/e2e-common.sh + +# Latest serving release. If user does not supply this as a flag, the latest +# tagged release on the current branch will be used. +LATEST_SERVING_RELEASE_VERSION=$(git describe --match "v[0-9]*" --abbrev=0) + +function install_latest_release() { + header "Installing Knative latest public release" + local url="https://github.com/knative/serving/releases/download/${LATEST_SERVING_RELEASE_VERSION}" + local yaml="serving.yaml" + + local RELEASE_YAML="$(mktemp)" + wget "${url}/${yaml}" -O "${RELEASE_YAML}" \ + || fail_test "Unable to download latest Knative release." + + install_knative_serving "${RELEASE_YAML}" \ + || fail_test "Knative latest release installation failed" + wait_until_pods_running knative-serving +} + +function install_head() { + header "Installing Knative head release" + install_knative_serving || fail_test "Knative head release installation failed" + wait_until_pods_running knative-serving +} + +function knative_setup() { + # Build Knative to generate Istio manifests from HEAD for install_latest_release + # We do it here because it's a one-time setup + build_knative_from_source + install_latest_release +} + +# Script entry point. + +initialize $@ --skip-istio-addon + +# TODO(#2656): Reduce the timeout after we get this test to consistently passing. +TIMEOUT=10m + +header "Running preupgrade tests" + +go_test_e2e -tags=preupgrade -timeout=${TIMEOUT} ./test/upgrade \ + --resolvabledomain=$(use_resolvable_domain) "$(use_https)" || fail_test + +header "Starting prober test" + +# Remove this in case we failed to clean it up in an earlier test. +rm -f /tmp/prober-signal + +go_test_e2e -tags=probe -timeout=${TIMEOUT} ./test/upgrade \ + --resolvabledomain=$(use_resolvable_domain) "$(use_https)" & +PROBER_PID=$! +echo "Prober PID is ${PROBER_PID}" + +install_head + +header "Running postupgrade tests" +go_test_e2e -tags=postupgrade -timeout=${TIMEOUT} ./test/upgrade \ + --resolvabledomain=$(use_resolvable_domain) "$(use_https)" || fail_test + +install_latest_release + +header "Running postdowngrade tests" +go_test_e2e -tags=postdowngrade -timeout=${TIMEOUT} ./test/upgrade \ + --resolvabledomain=$(use_resolvable_domain) "$(use_https)" || fail_test + +# The prober is blocking on /tmp/prober-signal to know when it should exit. +# +# This is kind of gross. First attempt was to just send a signal to the go test, +# but "go test" intercepts the signal and always exits with a non-zero code. +echo "done" > /tmp/prober-signal + +header "Waiting for prober test" +wait ${PROBER_PID} || fail_test "Prober failed" + +success diff --git a/test/vendor/knative.dev/serving/test/e2e/README.md b/test/vendor/knative.dev/serving/test/e2e/README.md new file mode 100644 index 0000000000..23bab60880 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/README.md @@ -0,0 +1,23 @@ +# End to end tests + +- [Running e2e tests](../README.md#running-e2e-tests) + +## Adding end to end tests + +Knative Serving e2e tests +[test the end to end functionality of the Knative Serving API](#requirements) to +verify the behavior of this specific implementation. + +These tests use [the test library](../adding_tests.md#test-library). + +### Requirements + +The e2e tests are used to test whether the flow of Knative Serving is performing +as designed from start to finish. + +The e2e tests **MUST**: + +1. Provide frequent output describing what actions they are undertaking, + especially before performing long running operations. Please see the + [Log section](../adding_tests.md#output-log) for detailed instructions. +2. Follow Golang best practices. diff --git a/test/vendor/knative.dev/serving/test/e2e/activator_test.go b/test/vendor/knative.dev/serving/test/e2e/activator_test.go new file mode 100644 index 0000000000..4b26c51751 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/activator_test.go @@ -0,0 +1,124 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "net/http" + "sync" + "testing" + + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + rnames "knative.dev/serving/pkg/reconciler/revision/resources/names" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +// TestActivatorOverload makes sure that activator can handle the load when scaling from 0. +// We need to add a similar test for the User pod overload once the second part of overload handling is done. +func TestActivatorOverload(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + const ( + // The number of concurrent requests to hit the activator with. + concurrency = 100 + // How long the service will process the request in ms. + serviceSleep = 300 + ) + + clients := Setup(t) + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "timeout", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a service with run latest configuration.") + // Create a service with concurrency 1 that sleeps for N ms. + // Limit its maxScale to 10 containers, wait for the service to scale down and hit it with concurrent requests. + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + func(service *v1alpha1.Service) { + service.Spec.ConfigurationSpec.Template.Spec.ContainerConcurrency = ptr.Int64(1) + service.Spec.ConfigurationSpec.Template.Annotations = map[string]string{"autoscaling.knative.dev/maxScale": "10"} + }) + if err != nil { + t.Fatalf("Unable to create resources: %v", err) + } + + // Make sure the service responds correctly before scaling to 0. + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + resources.Route.Status.URL.URL(), + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", resources.Route.Status.URL.URL(), err) + } + + deploymentName := rnames.Deployment(resources.Revision) + if err := WaitForScaleToZero(t, deploymentName, clients); err != nil { + t.Fatalf("Unable to observe the Deployment named %s scaling down: %v", deploymentName, err) + } + + domain := resources.Route.Status.URL.Host + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, domain, test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating the Spoofing client: %v", err) + } + + url := fmt.Sprintf("http://%s/?timeout=%d", domain, serviceSleep) + + t.Log("Starting to send out the requests") + + var group sync.WaitGroup + // Send requests async and wait for the responses. + for i := 0; i < concurrency; i++ { + group.Add(1) + go func() { + defer group.Done() + + // We need to create a new request per HTTP request because + // the spoofing client mutates them. + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + t.Errorf("error creating http request: %v", err) + } + + res, err := client.Do(req) + if err != nil { + t.Errorf("unexpected error sending a request, %v", err) + return + } + + if res.StatusCode != http.StatusOK { + t.Errorf("status = %d, want: %d, response: %s", res.StatusCode, http.StatusOK, res) + } + }() + } + group.Wait() +} diff --git a/test/vendor/knative.dev/serving/test/e2e/autoscale_test.go b/test/vendor/knative.dev/serving/test/e2e/autoscale_test.go new file mode 100644 index 0000000000..371c83dc5d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/autoscale_test.go @@ -0,0 +1,600 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "errors" + "fmt" + "math" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + vegeta "github.com/tsenart/vegeta/lib" + "golang.org/x/sync/errgroup" + "knative.dev/pkg/system" + pkgTest "knative.dev/pkg/test" + ingress "knative.dev/pkg/test/ingress" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/apis/serving" + resourcenames "knative.dev/serving/pkg/reconciler/revision/resources/names" + "knative.dev/serving/pkg/resources" + rtesting "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +const ( + // Concurrency must be high enough to avoid the problems with sampling + // but not high enough to generate scheduling problems. + containerConcurrency = 6.0 + targetUtilization = 0.7 + successRateSLO = 0.999 + autoscaleSleep = 500 +) + +type testContext struct { + t *testing.T + clients *test.Clients + names test.ResourceNames + resources *v1a1test.ResourceObjects + targetUtilization float64 + targetValue float64 + metric string +} + +func getVegetaTarget(kubeClientset *kubernetes.Clientset, domain, endpointOverride string, resolvable bool) (vegeta.Target, error) { + if resolvable { + return vegeta.Target{ + Method: http.MethodGet, + URL: fmt.Sprintf("http://%s?sleep=%d", domain, autoscaleSleep), + }, nil + } + + endpoint := endpointOverride + if endpointOverride == "" { + var err error + // If the domain that the Route controller is configured to assign to Route.Status.Domain + // (the domainSuffix) is not resolvable, we need to retrieve the endpoint and spoof + // the Host in our requests. + if endpoint, err = ingress.GetIngressEndpoint(kubeClientset); err != nil { + return vegeta.Target{}, err + } + } + + h := http.Header{} + h.Set("Host", domain) + return vegeta.Target{ + Method: http.MethodGet, + URL: fmt.Sprintf("http://%s?sleep=%d", endpoint, autoscaleSleep), + Header: h, + }, nil +} + +func generateTraffic( + ctx *testContext, + attacker *vegeta.Attacker, + pacer vegeta.Pacer, + duration time.Duration, + stopChan chan struct{}) error { + + target, err := getVegetaTarget( + ctx.clients.KubeClient.Kube, ctx.resources.Route.Status.URL.URL().Hostname(), pkgTest.Flags.IngressEndpoint, test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("error creating vegeta target: %w", err) + } + + results := attacker.Attack(vegeta.NewStaticTargeter(target), pacer, duration, "load-test") + defer attacker.Stop() + + var ( + totalRequests int32 + successfulRequests int32 + ) + for { + select { + case <-stopChan: + ctx.t.Log("Stopping generateTraffic") + successRate := float64(1) + if totalRequests > 0 { + successRate = float64(successfulRequests) / float64(totalRequests) + } + if successRate < successRateSLO { + return fmt.Errorf("request success rate under SLO: total = %d, errors = %d, rate = %f, SLO = %f", + totalRequests, totalRequests-successfulRequests, successRate, successRateSLO) + } + return nil + case res, ok := <-results: + if !ok { + ctx.t.Log("Time is up; done") + return nil + } + + totalRequests++ + if res.Code != http.StatusOK { + ctx.t.Logf("Status = %d, want: 200", res.Code) + ctx.t.Log("Response:\n" + spew.Sprint(res)) + continue + } + successfulRequests++ + } + } +} + +func generateTrafficAtFixedConcurrency(ctx *testContext, concurrency int, duration time.Duration, stopChan chan struct{}) error { + pacer := vegeta.ConstantPacer{} // Sends requests as quickly as possible, capped by MaxWorkers below. + attacker := vegeta.NewAttacker(vegeta.Timeout(duration), vegeta.Workers(uint64(concurrency)), vegeta.MaxWorkers(uint64(concurrency))) + + ctx.t.Logf("Maintaining %d concurrent requests for %v.", concurrency, duration) + return generateTraffic(ctx, attacker, pacer, duration, stopChan) +} + +func generateTrafficAtFixedRPS(ctx *testContext, rps int, duration time.Duration, stopChan chan struct{}) error { + pacer := vegeta.ConstantPacer{Freq: rps, Per: time.Second} + attacker := vegeta.NewAttacker(vegeta.Timeout(duration)) + + ctx.t.Logf("Maintaining %v RPS requests for %v.", rps, duration) + return generateTraffic(ctx, attacker, pacer, duration, stopChan) +} + +// setup creates a new service, with given service options. +// It returns a testContext that has resources, K8s clients and other needed +// data points. +// It sets up CleanupOnInterrupt as well that will destroy the resources +// when the test terminates. +func setup(t *testing.T, class, metric string, target float64, targetUtilization float64, fopts ...rtesting.ServiceOption) *testContext { + t.Helper() + clients := Setup(t) + + t.Log("Creating a new Route and Configuration") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "autoscale", + } + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + append([]rtesting.ServiceOption{ + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.ClassAnnotationKey: class, + autoscaling.MetricAnnotationKey: metric, + autoscaling.TargetAnnotationKey: strconv.FormatFloat(target, 'f', -1, 64), + autoscaling.TargetUtilizationPercentageKey: strconv.FormatFloat(targetUtilization*100, 'f', -1, 64), + // We run the test for 60s, so make window a bit shorter, + // so that we're operating in sustained mode and the pod actions stopped happening. + autoscaling.WindowAnnotationKey: "50s", + }), rtesting.WithResourceRequirements(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }), + }, fopts...)...) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + url := resources.Route.Status.URL.URL() + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK)), + "CheckingEndpointAfterUpdating", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", url, err) + } + + return &testContext{ + t: t, + clients: clients, + names: names, + resources: resources, + targetUtilization: targetUtilization, + targetValue: target, + metric: metric, + } +} + +func assertScaleDown(ctx *testContext) { + deploymentName := resourcenames.Deployment(ctx.resources.Revision) + if err := WaitForScaleToZero(ctx.t, deploymentName, ctx.clients); err != nil { + ctx.t.Fatalf("Unable to observe the Deployment named %s scaling down: %v", deploymentName, err) + } + + // Account for the case where scaling up uses all available pods. + ctx.t.Log("Wait for all pods to terminate.") + + if err := pkgTest.WaitForPodListState( + ctx.clients.KubeClient, + func(p *corev1.PodList) (bool, error) { + for _, pod := range p.Items { + if strings.Contains(pod.Name, deploymentName) && + !strings.Contains(pod.Status.Reason, "Evicted") { + return false, nil + } + } + return true, nil + }, + "WaitForAvailablePods", test.ServingNamespace); err != nil { + ctx.t.Fatalf("Waiting for Pod.List to have no non-Evicted pods of %q: %v", deploymentName, err) + } + + ctx.t.Log("The Revision should remain ready after scaling to zero.") + if err := v1a1test.CheckRevisionState(ctx.clients.ServingAlphaClient, ctx.names.Revision, v1a1test.IsRevisionReady); err != nil { + ctx.t.Fatalf("The Revision %s did not stay Ready after scaling down to zero: %v", ctx.names.Revision, err) + } + + ctx.t.Log("Scaled down.") +} + +func numberOfPods(ctx *testContext) (float64, error) { + // SKS name matches that of revision. + n := ctx.resources.Revision.Name + sks, err := ctx.clients.NetworkingClient.ServerlessServices.Get(n, metav1.GetOptions{}) + if err != nil { + ctx.t.Logf("Error getting SKS %q: %v", n, err) + return 0, fmt.Errorf("error retrieving sks %q: %w", n, err) + } + if sks.Status.PrivateServiceName == "" { + ctx.t.Logf("SKS %s has not yet reconciled", n) + // Not an error, but no pods either. + return 0, nil + } + eps, err := ctx.clients.KubeClient.Kube.CoreV1().Endpoints(test.ServingNamespace).Get( + sks.Status.PrivateServiceName, metav1.GetOptions{}) + if err != nil { + return 0, fmt.Errorf("failed to get endpoints %s: %w", sks.Status.PrivateServiceName, err) + } + return float64(resources.ReadyAddressCount(eps)), nil +} + +func assertAutoscaleUpToNumPods(ctx *testContext, curPods, targetPods float64, duration time.Duration, quick bool) { + ctx.t.Helper() + // There are two test modes: quick, and not quick. + // 1) Quick mode: succeeds when the number of pods meets targetPods. + // 2) Not Quick (sustaining) mode: succeeds when the number of pods gets scaled to targetPods and + // sustains there for the `duration`. + + // Relax the bounds to reduce the flakiness caused by sampling in the autoscaling algorithm. + // Also adjust the values by the target utilization values. + + minPods := math.Floor(curPods/ctx.targetUtilization) - 1 + maxPods := math.Ceil(targetPods/ctx.targetUtilization) + 1 + + stopChan := make(chan struct{}) + var grp errgroup.Group + grp.Go(func() error { + switch ctx.metric { + case autoscaling.RPS: + return generateTrafficAtFixedRPS(ctx, int(targetPods*ctx.targetValue), duration, stopChan) + default: + return generateTrafficAtFixedConcurrency(ctx, int(targetPods*ctx.targetValue), duration, stopChan) + } + }) + + grp.Go(func() error { + // Short-circuit traffic generation once we exit from the check logic. + defer close(stopChan) + + done := time.After(duration) + timer := time.Tick(2 * time.Second) + for { + select { + case <-timer: + // Each 2 second, check that the number of pods is at least `minPods`. `minPods` is increasing + // to verify that the number of pods doesn't go down while we are scaling up. + got, err := numberOfPods(ctx) + if err != nil { + return err + } + mes := fmt.Sprintf("revision %q #replicas: %v, want at least: %v", ctx.resources.Revision.Name, got, minPods) + ctx.t.Log(mes) + if got < minPods { + return errors.New(mes) + } + if quick { + // A quick test succeeds when the number of pods scales up to `targetPods` + // (and, for sanity check, no more than `maxPods`). + if got >= targetPods && got <= maxPods { + ctx.t.Logf("Got %v replicas, reached target of %v, exiting early", got, targetPods) + return nil + } + } + if minPods < targetPods-1 { + // Increase `minPods`, but leave room to reduce flakiness. + minPods = math.Min(got, targetPods) - 1 + } + case <-done: + // The test duration is over. Do a last check to verify that the number of pods is at `targetPods` + // (with a little room for de-flakiness). + got, err := numberOfPods(ctx) + if err != nil { + return err + } + mes := fmt.Sprintf("got %v replicas, expected between [%v, %v] replicas for revision %s", + got, targetPods-1, maxPods, ctx.resources.Revision.Name) + ctx.t.Log(mes) + if got < targetPods-1 || got > maxPods { + return errors.New(mes) + } + return nil + } + } + }) + + if err := grp.Wait(); err != nil { + ctx.t.Error(err) + } +} + +func TestAutoscaleUpDownUp(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + ctx := setup(t, autoscaling.KPA, autoscaling.Concurrency, containerConcurrency, targetUtilization) + defer test.TearDown(ctx.clients, ctx.names) + + assertAutoscaleUpToNumPods(ctx, 1, 2, 60*time.Second, true) + assertScaleDown(ctx) + assertAutoscaleUpToNumPods(ctx, 0, 2, 60*time.Second, true) +} + +func TestAutoscaleUpCountPods(t *testing.T) { + t.Parallel() + + classes := map[string]string{ + "kpa": autoscaling.KPA, + } + + for name, class := range classes { + name, class := name, class + t.Run(name, func(tt *testing.T) { + tt.Parallel() + cancel := logstream.Start(tt) + defer cancel() + + ctx := setup(tt, class, autoscaling.Concurrency, containerConcurrency, targetUtilization) + defer test.TearDown(ctx.clients, ctx.names) + + ctx.t.Log("The autoscaler spins up additional replicas when traffic increases.") + // note: without the warm-up / gradual increase of load the test is retrieving a 503 (overload) from the envoy + + // Increase workload for 2 replicas for 60s + // Assert the number of expected replicas is between n-1 and n+1, where n is the # of desired replicas for 60s. + // Assert the number of expected replicas is n and n+1 at the end of 60s, where n is the # of desired replicas. + assertAutoscaleUpToNumPods(ctx, 1, 2, 60*time.Second, true) + // Increase workload scale to 3 replicas, assert between [n-1, n+1] during scale up, assert between [n, n+1] after scaleup. + assertAutoscaleUpToNumPods(ctx, 2, 3, 60*time.Second, true) + // Increase workload scale to 4 replicas, assert between [n-1, n+1] during scale up, assert between [n, n+1] after scaleup. + assertAutoscaleUpToNumPods(ctx, 3, 4, 60*time.Second, true) + }) + } +} + +func TestRPSBasedAutoscaleUpCountPods(t *testing.T) { + t.Parallel() + + classes := map[string]string{ + "kpa": autoscaling.KPA, + } + + for name, class := range classes { + name, class := name, class + t.Run(name, func(tt *testing.T) { + tt.Parallel() + cancel := logstream.Start(tt) + defer cancel() + + ctx := setup(tt, class, autoscaling.RPS, 10, targetUtilization) + defer test.TearDown(ctx.clients, ctx.names) + + ctx.t.Log("The autoscaler spins up additional replicas when traffic increases.") + // note: without the warm-up / gradual increase of load the test is retrieving a 503 (overload) from the envoy + + // Increase workload for 2 replicas for 60s + // Assert the number of expected replicas is between n-1 and n+1, where n is the # of desired replicas for 60s. + // Assert the number of expected replicas is n and n+1 at the end of 60s, where n is the # of desired replicas. + assertAutoscaleUpToNumPods(ctx, 1, 2, 60*time.Second, true) + // Increase workload scale to 3 replicas, assert between [n-1, n+1] during scale up, assert between [n, n+1] after scaleup. + assertAutoscaleUpToNumPods(ctx, 2, 3, 60*time.Second, true) + // Increase workload scale to 4 replicas, assert between [n-1, n+1] during scale up, assert between [n, n+1] after scaleup. + assertAutoscaleUpToNumPods(ctx, 3, 4, 60*time.Second, true) + }) + } +} + +func TestAutoscaleSustaining(t *testing.T) { + // When traffic increases, a knative app should scale up and sustain the scale + // as long as the traffic sustains, despite whether it is switching modes between + // normal and panic. + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + ctx := setup(t, autoscaling.KPA, autoscaling.Concurrency, containerConcurrency, targetUtilization) + defer test.TearDown(ctx.clients, ctx.names) + + assertAutoscaleUpToNumPods(ctx, 1, 10, 2*time.Minute, false) +} + +func TestTargetBurstCapacity(t *testing.T) { + // This test sets up a service with CC=10 TU=70% and TBC=7. + // Then sends requests at concurrency causing activator in the path. + // Then at the higher concurrency 10, + // getting spare capacity of 20-10=10, which should remove the + // Activator from the request path. + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + ctx := setup(t, autoscaling.KPA, autoscaling.Concurrency, 10 /* target concurrency*/, targetUtilization, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "7", + autoscaling.PanicThresholdPercentageAnnotationKey: "200", // makes panicking rare + })) + defer test.TearDown(ctx.clients, ctx.names) + + cfg, err := autoscalerCM(ctx.clients) + if err != nil { + t.Fatalf("Error retrieving autoscaler configmap: %v", err) + } + var ( + grp errgroup.Group + stopCh = make(chan struct{}) + ) + defer grp.Wait() + defer close(stopCh) + + // We'll terminate the test via stopCh. + const duration = time.Hour + + grp.Go(func() error { + return generateTrafficAtFixedConcurrency(ctx, 7, duration, stopCh) + }) + + // Wait for the activator endpoints to equalize. + if err := waitForActivatorEndpoints(ctx.resources, ctx.clients); err != nil { + t.Fatalf("Never got Activator endpoints in the service: %v", err) + } + + // Start second load generator. + grp.Go(func() error { + return generateTrafficAtFixedConcurrency(ctx, 5, duration, stopCh) + }) + + // Wait for two stable pods. + if err := wait.Poll(250*time.Millisecond, 2*cfg.StableWindow, func() (bool, error) { + x, err := numberOfPods(ctx) + if err != nil { + return false, err + } + // We want exactly 2. Not 1, not panicing 3, just 2. + return x == 2, nil + }); err != nil { + t.Fatalf("Desired scale of 2 never achieved: %v", err) + } + + // Now read the service endpoints and make sure there are 2 endpoints there. + // We poll, since network programming takes times, but the timeout is set for + // uniformness with one above. + if err := wait.Poll(250*time.Millisecond, 2*cfg.StableWindow, func() (bool, error) { + svcEps, err := ctx.clients.KubeClient.Kube.CoreV1().Endpoints(test.ServingNamespace).Get( + ctx.resources.Revision.Status.ServiceName, metav1.GetOptions{}) + if err != nil { + return false, err + } + t.Logf("resources.ReadyAddressCount(svcEps) = %d", resources.ReadyAddressCount(svcEps)) + return resources.ReadyAddressCount(svcEps) == 2, nil + }); err != nil { + t.Errorf("Never achieved subset of size 2: %v", err) + } +} + +func TestTargetBurstCapacityMinusOne(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + ctx := setup(t, autoscaling.KPA, autoscaling.Concurrency, 10 /* target concurrency*/, targetUtilization, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + })) + defer test.TearDown(ctx.clients, ctx.names) + + _, err := autoscalerCM(ctx.clients) + if err != nil { + t.Fatalf("Error retrieving autoscaler configmap: %v", err) + } + aeps, err := ctx.clients.KubeClient.Kube.CoreV1().Endpoints( + system.Namespace()).Get(networking.ActivatorServiceName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting activator endpoints: %v", err) + } + t.Logf("Activator endpoints: %v", aeps) + + // Wait for the activator endpoints to equalize. + if err := waitForActivatorEndpoints(ctx.resources, ctx.clients); err != nil { + t.Fatalf("Never got Activator endpoints in the service: %v", err) + } +} + +func TestFastScaleToZero(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + ctx := setup(t, autoscaling.KPA, autoscaling.Concurrency, containerConcurrency, targetUtilization, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + autoscaling.WindowAnnotationKey: autoscaling.WindowMin.String(), + })) + defer test.TearDown(ctx.clients, ctx.names) + + cfg, err := autoscalerCM(ctx.clients) + if err != nil { + t.Fatalf("Error retrieving autoscaler configmap: %v", err) + } + + epsL, err := ctx.clients.KubeClient.Kube.CoreV1().Endpoints(test.ServingNamespace).List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", + serving.RevisionLabelKey, ctx.resources.Revision.Name, + networking.ServiceTypeKey, networking.ServiceTypePrivate, + ), + }) + if err != nil || len(epsL.Items) == 0 { + t.Fatalf("No endpoints or error: %v", err) + } + + epsN := epsL.Items[0].Name + t.Logf("Waiting for emptying of %q ", epsN) + + // The first thing that happens when pods are starting to terminate, + // if that they stop being ready and endpoints controller removes them + // from the ready set. + // While pod termination itself can last quite some time (our pod termination + // test allows for up to a minute). The 15s delay is based upon maximum + // of 20 runs (11s) + 4s of buffer for reliability. + st := time.Now() + if err := wait.PollImmediate(1*time.Second, cfg.ScaleToZeroGracePeriod+15*time.Second, func() (bool, error) { + eps, err := ctx.clients.KubeClient.Kube.CoreV1().Endpoints(test.ServingNamespace).Get(epsN, metav1.GetOptions{}) + if err != nil { + return false, err + } + return resources.ReadyAddressCount(eps) == 0, nil + }); err != nil { + t.Fatalf("Did not observe %q to actually be emptied", epsN) + } + + t.Logf("Total time to scale down: %v", time.Since(st)) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/autotls/auto_tls_test.go b/test/vendor/knative.dev/serving/test/e2e/autotls/auto_tls_test.go new file mode 100644 index 0000000000..fbc3ca659f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/autotls/auto_tls_test.go @@ -0,0 +1,248 @@ +// +build e2e + +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package autotls + +import ( + "context" + "crypto/tls" + "crypto/x509" + "net/http" + "testing" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + pkgtest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/networking" + cmclientset "knative.dev/serving/pkg/client/certmanager/clientset/versioned" + routenames "knative.dev/serving/pkg/reconciler/route/resources/names" + "knative.dev/serving/test" + testingress "knative.dev/serving/test/conformance/ingress" + "knative.dev/serving/test/e2e" + v1test "knative.dev/serving/test/v1" + + "github.com/ghodss/yaml" + cmv1alpha2 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" + cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" +) + +const ( + systemNamespace = "knative-serving" +) + +var ( + caClusterIssuer = &cmv1alpha2.ClusterIssuer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-issuer", + }, + Spec: cmv1alpha2.IssuerSpec{ + IssuerConfig: cmv1alpha2.IssuerConfig{ + CA: &cmv1alpha2.CAIssuer{}, + }, + }, + } +) + +type autoTLSClients struct { + clients *test.Clients + cmClient cmclientset.Interface +} + +func TestPerKsvcCert_localCA(t *testing.T) { + tlsClients := initializeClients(t) + disableNamespaceCert(t, tlsClients) + + // Create Knative Service + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "runtime", + } + test.CleanupOnInterrupt(func() { test.TearDown(tlsClients.clients, names) }) + objects, err := v1test.CreateServiceReady(t, tlsClients.clients, &names) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + // Create TLS certificate for the Knative Service. + rootCAs := x509.NewCertPool() + secretName, cancel := testingress.CreateTLSSecretWithCertPool(t, tlsClients.clients, []string{objects.Service.Status.URL.Host}, "cert-manager", rootCAs) + defer cancel() + + // Create ClusterIssuer and update config-certmanager to reference the created ClusterIssuer + clusterIssuer, cancel := createClusterIssuer(t, tlsClients, secretName) + defer cancel() + cancel = updateConfigCertManangerCM(t, tlsClients, clusterIssuer) + defer cancel() + + cancel = turnOnAutoTLS(t, tlsClients) + defer cancel() + + // wait for certificate to be ready + waitForCertificateReady(t, tlsClients, routenames.Certificate(objects.Route)) + + // curl HTTPS + httpsClient := createHTTPSClient(t, tlsClients, objects, rootCAs) + testingress.RuntimeRequest(t, httpsClient, "https://"+objects.Service.Status.URL.Host) +} + +func createHTTPSClient(t *testing.T, tlsClients *autoTLSClients, objects *v1test.ResourceObjects, rootCAs *x509.CertPool) *http.Client { + ing, err := tlsClients.clients.NetworkingClient.Ingresses.Get(routenames.Ingress(objects.Route), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Ingress %s: %v", routenames.Ingress(objects.Route), err) + } + dialer := testingress.CreateDialContext(t, ing, tlsClients.clients) + tlsConfig := &tls.Config{ + RootCAs: rootCAs, + } + return &http.Client{ + Transport: &http.Transport{ + DialContext: dialer, + TLSClientConfig: tlsConfig, + }} +} + +func initializeClients(t *testing.T) *autoTLSClients { + clientConfig, err := test.BuildClientConfig(pkgtest.Flags.Kubeconfig, pkgtest.Flags.Cluster) + if err != nil { + t.Fatalf("Failed to create client config: %v.", err) + } + clients := &autoTLSClients{} + clients.clients = e2e.Setup(t) + clients.cmClient, err = cmclientset.NewForConfig(clientConfig) + if err != nil { + t.Fatalf("Failed to create cert manager client: %v", err) + } + return clients +} + +func disableNamespaceCert(t *testing.T, tlsClients *autoTLSClients) { + namespaces, err := tlsClients.clients.KubeClient.Kube.CoreV1().Namespaces().List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list namespaces: %v", err) + } + for _, ns := range namespaces.Items { + if ns.Labels == nil { + ns.Labels = map[string]string{} + } + ns.Labels[networking.DisableWildcardCertLabelKey] = "true" + if _, err := tlsClients.clients.KubeClient.Kube.CoreV1().Namespaces().Update(&ns); err != nil { + t.Errorf("Fail to disable namespace cert: %v", err) + } + } +} + +func createClusterIssuer(t *testing.T, tlsClients *autoTLSClients, tlsSecretName string) (*cmv1alpha2.ClusterIssuer, context.CancelFunc) { + copy := caClusterIssuer.DeepCopy() + copy.Spec.CA.SecretName = tlsSecretName + test.CleanupOnInterrupt(func() { + tlsClients.cmClient.CertmanagerV1alpha2().ClusterIssuers().Delete(copy.Name, &metav1.DeleteOptions{}) + }) + if _, err := tlsClients.cmClient.CertmanagerV1alpha2().ClusterIssuers().Create(copy); err != nil { + t.Fatalf("Failed to create ClusterIssuer %v: %v", ©, err) + } + return copy, func() { + if err := tlsClients.cmClient.CertmanagerV1alpha2().ClusterIssuers().Delete(copy.Name, &metav1.DeleteOptions{}); err != nil { + t.Errorf("Failed to clean up ClusterIssuer %s: %v", copy.Name, err) + } + } +} + +func updateConfigCertManangerCM(t *testing.T, tlsClients *autoTLSClients, clusterIssuer *cmv1alpha2.ClusterIssuer) context.CancelFunc { + issuerRef := &cmmeta.ObjectReference{ + Name: clusterIssuer.Name, + Kind: "ClusterIssuer", + } + issuerRefBytes, err := yaml.Marshal(issuerRef) + if err != nil { + t.Fatalf("Failed to convert IssuerRef %v to bytes: %v", issuerRef, err) + } + + certManagerCM, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get("config-certmanager", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get config-certmanager ConfigMap: %v", err) + } + certManagerCM.Data["issuerRef"] = string(issuerRefBytes) + test.CleanupOnInterrupt(func() { + cleanUpConfigCertManagerCM(t, tlsClients) + }) + if _, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(certManagerCM.Namespace).Update(certManagerCM); err != nil { + t.Fatalf("Failed to update the config-certmanager ConfigMap: %v", err) + } + return func() { + cleanUpConfigCertManagerCM(t, tlsClients) + } +} + +func cleanUpConfigCertManagerCM(t *testing.T, tlsClients *autoTLSClients) { + certManagerCM, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get("config-certmanager", metav1.GetOptions{}) + if err != nil { + t.Errorf("Failed to get config-certmanager ConfigMap: %v", err) + return + } + delete(certManagerCM.Data, "issuerRef") + if _, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(certManagerCM.Namespace).Update(certManagerCM); err != nil { + t.Errorf("Failed to clean up config-certmanager ConfigMap: %v", err) + } +} + +func turnOnAutoTLS(t *testing.T, tlsClients *autoTLSClients) context.CancelFunc { + configNetworkCM, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get("config-network", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get config-network ConfigMap: %v", err) + } + configNetworkCM.Data["autoTLS"] = "Enabled" + test.CleanupOnInterrupt(func() { + turnOffAutoTLS(t, tlsClients) + }) + if _, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Update(configNetworkCM); err != nil { + t.Fatalf("Failed to update config-network ConfigMap: %v", err) + } + return func() { + turnOffAutoTLS(t, tlsClients) + } +} + +func turnOffAutoTLS(t *testing.T, tlsClients *autoTLSClients) { + configNetworkCM, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get("config-network", metav1.GetOptions{}) + if err != nil { + t.Errorf("Failed to get config-network ConfigMap: %v", err) + return + } + delete(configNetworkCM.Data, "autoTLS") + if _, err := tlsClients.clients.KubeClient.Kube.CoreV1().ConfigMaps(configNetworkCM.Namespace).Update(configNetworkCM); err != nil { + t.Errorf("Failed to turn off Auto TLS: %v", err) + } +} + +func waitForCertificateReady(t *testing.T, tlsClients *autoTLSClients, certName string) { + if err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { + cert, err := tlsClients.clients.NetworkingClient.Certificates.Get(certName, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + t.Logf("Certificate %s has not been created: %v", certName, err) + return false, nil + } + return false, err + } + return cert.Status.IsReady(), nil + }); err != nil { + t.Fatalf("Certificate %s is not ready: %v", certName, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/destroypod_test.go b/test/vendor/knative.dev/serving/test/e2e/destroypod_test.go new file mode 100644 index 0000000000..3714812ea2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/destroypod_test.go @@ -0,0 +1,315 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/davecgh/go-spew/spew" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + timeoutExpectedOutput = "Slept for 0 milliseconds" + revisionTimeoutSeconds = 45 + timeoutRequestDuration = 35 * time.Second +) + +func TestDestroyPodInflight(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + svcName := test.ObjectNameForTest(t) + names := test.ResourceNames{ + Config: svcName, + Route: svcName, + Image: "timeout", + } + + if _, err := v1a1test.CreateConfiguration(t, clients, names, v1a1opts.WithConfigRevisionTimeoutSeconds(revisionTimeoutSeconds)); err != nil { + t.Fatalf("Failed to create Configuration: %v", err) + } + if _, err := v1a1test.CreateRoute(t, clients, names); err != nil { + t.Fatalf("Failed to create Route: %v", err) + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("When the Revision can have traffic routed to it, the Route is marked as Ready") + if err := v1a1test.WaitForRouteState(clients.ServingAlphaClient, names.Route, v1a1test.IsRouteReady, "RouteIsReady"); err != nil { + t.Fatalf("The Route %s was not marked as Ready to serve traffic: %v", names.Route, err) + } + + route, err := clients.ServingAlphaClient.Routes.Get(names.Route, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Route %s: %v", names.Route, err) + } + routeURL := route.Status.URL.URL() + + err = v1a1test.WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + names.Revision = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil + }, "ConfigurationUpdatedWithRevision") + if err != nil { + t.Fatalf("Error obtaining Revision's name %v", err) + } + + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + routeURL, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(timeoutExpectedOutput))), + "TimeoutAppServesText", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve the expected text %q: %v", names.Route, routeURL, timeoutExpectedOutput, err) + } + + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, routeURL.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating spoofing client: %v", err) + } + + // The timeout app sleeps for the time passed via the timeout query parameter in milliseconds + u, _ := url.Parse(routeURL.String()) + q := u.Query() + q.Set("timeout", fmt.Sprintf("%d", timeoutRequestDuration.Milliseconds())) + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + t.Fatalf("Error creating http request: %v", err) + } + + g, _ := errgroup.WithContext(context.Background()) + + g.Go(func() error { + t.Log("Sending in a long running request") + res, err := client.Do(req) + if err != nil { + return err + } + + if res.StatusCode != http.StatusOK { + return fmt.Errorf("expected response to have status 200, had %d", res.StatusCode) + } + expectedBody := fmt.Sprintf("Slept for %d milliseconds", timeoutRequestDuration.Milliseconds()) + gotBody := string(res.Body) + if gotBody != expectedBody { + return fmt.Errorf("unexpected body, expected: %q got: %q", expectedBody, gotBody) + } + return nil + }) + + g.Go(func() error { + // Give the request a bit of time to be established and reach the pod. + time.Sleep(timeoutRequestDuration / 2) + + t.Log("Destroying the configuration (also destroys the pods)") + return clients.ServingAlphaClient.Configs.Delete(names.Config, nil) + }) + + if err := g.Wait(); err != nil { + t.Errorf("Something went wrong with the request: %v", err) + } +} + +// We choose a relatively high upper boundary for the test to give even a busy +// Kubernetes test system plenty of time to remove the pod quicker than this. +const revisionTimeout = 5 * time.Minute + +func TestDestroyPodTimely(t *testing.T) { + // Not running in parallel on purpose. + + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithRevisionTimeoutSeconds(int64(revisionTimeout.Seconds()))) + if err != nil { + t.Fatalf("Failed to create a service: %v", err) + } + routeURL := objects.Route.Status.URL.URL() + + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + routeURL, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "RouteServes", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve correctly: %v", names.Route, routeURL, err) + } + + pods, err := clients.KubeClient.Kube.CoreV1().Pods(test.ServingNamespace).List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", serving.RevisionLabelKey, objects.Revision.Name), + }) + if err != nil || len(pods.Items) == 0 { + t.Fatalf("No pods or error: %v", err) + } + t.Logf("Saw %d pods", len(pods.Items)) + + podToDelete := pods.Items[0].Name + t.Logf("Deleting pod %q", podToDelete) + start := time.Now() + clients.KubeClient.Kube.CoreV1().Pods(test.ServingNamespace).Delete(podToDelete, &metav1.DeleteOptions{}) + + var latestPodState *v1.Pod + if err := wait.PollImmediate(1*time.Second, revisionTimeout, func() (bool, error) { + pod, err := clients.KubeClient.Kube.CoreV1().Pods(test.ServingNamespace).Get(podToDelete, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + latestPodState = pod + for _, status := range pod.Status.ContainerStatuses { + // There are still containers running, keep retrying. + if status.State.Running != nil { + return false, nil + } + } + return true, nil + }); err != nil { + t.Logf("Latest state: %s", spew.Sprint(latestPodState)) + t.Fatalf("Did not observe %q to actually be deleted", podToDelete) + } + + // Make sure the pod was deleted significantly faster than the revision timeout. + timeToDelete := time.Since(start) + if timeToDelete > revisionTimeout-30*time.Second { + t.Errorf("Time to delete pods = %v, want < %v", timeToDelete, revisionTimeout) + } +} + +func TestDestroyPodWithRequests(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "autoscale", + } + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithRevisionTimeoutSeconds(int64(revisionTimeout.Seconds()))) + if err != nil { + t.Fatalf("Failed to create a service: %v", err) + } + routeURL := objects.Route.Status.URL.URL() + + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + routeURL, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "RouteServes", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve correctly: %v", names.Route, routeURL, err) + } + + pods, err := clients.KubeClient.Kube.CoreV1().Pods(test.ServingNamespace).List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", serving.RevisionLabelKey, objects.Revision.Name), + }) + if err != nil || len(pods.Items) == 0 { + t.Fatalf("No pods or error: %v", err) + } + t.Logf("Saw %d pods. Pods: %s", len(pods.Items), spew.Sdump(pods)) + + // The request will sleep for more than 15 seconds. + // NOTE: it needs to be less than TERMINATION_DRAIN_DURATION_SECONDS. + u, _ := url.Parse(routeURL.String()) + q := u.Query() + q.Set("sleep", "15001") + u.RawQuery = q.Encode() + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + t.Fatalf("Error creating HTTP request: %v", err) + } + httpClient, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, u.Hostname(), test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating spoofing client: %v", err) + } + + // Start several requests staggered with 1s delay. + var eg errgroup.Group + for i := 1; i < 7; i++ { + i := i + t.Logf("Starting request %d at %v", i, time.Now()) + eg.Go(func() error { + res, err := httpClient.Do(req) + t.Logf("Request %d done at %v", i, time.Now()) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("request status = %v, want StatusOK", res.StatusCode) + } + return nil + }) + time.Sleep(time.Second) + } + + // And immeditately kill the pod. + podToDelete := pods.Items[0].Name + t.Logf("Deleting pod %q", podToDelete) + clients.KubeClient.Kube.CoreV1().Pods(test.ServingNamespace).Delete(podToDelete, &metav1.DeleteOptions{}) + + // Make sure all the requests succeed. + if err := eg.Wait(); err != nil { + t.Errorf("Not all requests finished with success, eg: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/e2e.go b/test/vendor/knative.dev/serving/test/e2e/e2e.go new file mode 100644 index 0000000000..8d66bc5695 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/e2e.go @@ -0,0 +1,125 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + // Mysteriously required to support GCP auth (required by k8s libs). + // Apparently just importing it is enough. @_@ side effects @_@. + // https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + "github.com/google/go-cmp/cmp" + appsv1 "k8s.io/api/apps/v1" + "knative.dev/pkg/system" + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/pkg/autoscaler" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +// Setup creates the client objects needed in the e2e tests. +func Setup(t *testing.T) *test.Clients { + return SetupWithNamespace(t, test.ServingNamespace) +} + +// SetupAlternativeNamespace creates the client objects needed in e2e tests +// under the alternative namespace. +func SetupAlternativeNamespace(t *testing.T) *test.Clients { + return SetupWithNamespace(t, test.AlternativeServingNamespace) +} + +// SetupWithNamespace creates the client objects needed in the e2e tests under the specified namespace. +func SetupWithNamespace(t *testing.T, namespace string) *test.Clients { + pkgTest.SetupLoggingFlags() + clients, err := test.NewClients( + pkgTest.Flags.Kubeconfig, + pkgTest.Flags.Cluster, + namespace) + if err != nil { + t.Fatalf("Couldn't initialize clients: %v", err) + } + return clients +} + +// autoscalerCM returns the current autoscaler config map deployed to the +// test cluster. +func autoscalerCM(clients *test.Clients) (*autoscaler.Config, error) { + autoscalerCM, err := clients.KubeClient.Kube.CoreV1().ConfigMaps("knative-serving").Get( + autoscaler.ConfigName, + metav1.GetOptions{}) + if err != nil { + return nil, err + } + return autoscaler.NewConfigFromMap(autoscalerCM.Data) +} + +// WaitForScaleToZero will wait for the specified deployment to scale to 0 replicas. +// Will wait up to 6 times the configured ScaleToZeroGracePeriod before failing. +func WaitForScaleToZero(t *testing.T, deploymentName string, clients *test.Clients) error { + t.Helper() + t.Logf("Waiting for %q to scale to zero", deploymentName) + + cfg, err := autoscalerCM(clients) + if err != nil { + return fmt.Errorf("failed to get autoscaler configmap: %w", err) + } + + return pkgTest.WaitForDeploymentState( + clients.KubeClient, + deploymentName, + func(d *appsv1.Deployment) (bool, error) { + return d.Status.ReadyReplicas == 0, nil + }, + "DeploymentIsScaledDown", + test.ServingNamespace, + cfg.ScaleToZeroGracePeriod*6, + ) +} + +// waitForActivatorEndpoints waits for the Service endpoints to match that of activator. +func waitForActivatorEndpoints(resources *v1a1test.ResourceObjects, clients *test.Clients) error { + return wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { + // We need to fetch the activator endpoints at every check, since it can change. + aeps, err := clients.KubeClient.Kube.CoreV1().Endpoints( + system.Namespace()).Get(networking.ActivatorServiceName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + svcEps, err := clients.KubeClient.Kube.CoreV1().Endpoints(test.ServingNamespace).Get( + resources.Revision.Status.ServiceName, metav1.GetOptions{}) + if err != nil { + return false, err + } + if len(svcEps.Subsets) != len(aeps.Subsets) { + return false, nil + } + for i, ss := range svcEps.Subsets { + if !cmp.Equal(ss.Addresses, aeps.Subsets[i].Addresses) { + return false, nil + } + } + return true, nil + }) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/egress_traffic_test.go b/test/vendor/knative.dev/serving/test/e2e/egress_traffic_test.go new file mode 100644 index 0000000000..afb0879670 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/egress_traffic_test.go @@ -0,0 +1,73 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "testing" + + pkgTest "knative.dev/pkg/test" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + corev1 "k8s.io/api/core/v1" +) + +const ( + targetHostEnvName = "TARGET_HOST" + targetHostDomain = "www.google.com" +) + +func TestEgressTraffic(t *testing.T) { + t.Parallel() + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "httpproxy", + } + envVars := []corev1.EnvVar{{ + Name: targetHostEnvName, + Value: targetHostDomain, + }} + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + service, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithEnv(envVars...)) + if err != nil { + t.Fatalf("Failed to create a service: %v", err) + } + if service.Route.Status.URL == nil { + t.Fatalf("Can't get internal request domain: service.Route.Status.URL is nil") + } + t.Log(service.Route.Status.URL.String()) + + url := service.Route.Status.URL.URL() + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "HTTPProxy", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Failed to send request to httpproxy: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/grpc_test.go b/test/vendor/knative.dev/serving/test/e2e/grpc_test.go new file mode 100644 index 0000000000..eb7c9e1d83 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/grpc_test.go @@ -0,0 +1,239 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "io" + "strconv" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/ingress" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/autoscaling" + rtesting "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + ping "knative.dev/serving/test/test_images/grpc-ping/proto" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +type grpcTest func(*testing.T, *v1a1test.ResourceObjects, *test.Clients, string, string) + +// hasPort checks if a URL contains a port number +func hasPort(u string) bool { + parts := strings.Split(u, ":") + _, err := strconv.Atoi(parts[len(parts)-1]) + return err == nil +} + +func dial(host, domain string) (*grpc.ClientConn, error) { + if !hasPort(host) { + host = host + ":80" + } + if !hasPort(domain) { + domain = domain + ":80" + } + + if host != domain { + // The host to connect and the domain accepted differ. + // We need to do grpc.WithAuthority(...) here. + return grpc.Dial( + host, + grpc.WithAuthority(domain), + grpc.WithInsecure(), + // Retrying DNS errors to avoid .xip.io issues. + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + ) + } + // This is a more preferred usage of the go-grpc client. + return grpc.Dial( + host, + grpc.WithInsecure(), + // Retrying DNS errors to avoid .xip.io issues. + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + ) +} + +func unaryTest(t *testing.T, resources *v1a1test.ResourceObjects, clients *test.Clients, host, domain string) { + t.Helper() + t.Logf("Connecting to grpc-ping using host %q and authority %q", host, domain) + conn, err := dial(host, domain) + if err != nil { + t.Fatalf("fail to dial: %v", err) + } + defer conn.Close() + + pc := ping.NewPingServiceClient(conn) + t.Log("Testing unary Ping") + + want := &ping.Request{Msg: "Hello!"} + + got, err := pc.Ping(context.Background(), want) + if err != nil { + t.Fatalf("Couldn't send request: %v", err) + } + + if got.Msg != want.Msg { + t.Errorf("Response = %q, want = %q", got.Msg, want.Msg) + } +} + +func streamTest(t *testing.T, resources *v1a1test.ResourceObjects, clients *test.Clients, host, domain string) { + t.Helper() + t.Logf("Connecting to grpc-ping using host %q and authority %q", host, domain) + conn, err := dial(host, domain) + if err != nil { + t.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + + pc := ping.NewPingServiceClient(conn) + t.Log("Testing streaming Ping") + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + stream, err := pc.PingStream(ctx) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + count := 3 + for i := 0; i < count; i++ { + t.Logf("Sending stream %d of %d", i+1, count) + + want := "This is a short message!" + + err = stream.Send(&ping.Request{Msg: want}) + if err != nil { + t.Fatalf("Error sending request: %v", err) + } + + resp, err := stream.Recv() + if err != nil { + t.Fatalf("Error receiving response: %v", err) + } + + got := resp.Msg + + if want != got { + t.Errorf("Stream %d: response = %q, want = %q", i, got, want) + } + } + + stream.CloseSend() + + _, err = stream.Recv() + if err != io.EOF { + t.Errorf("Expected EOF, got %v", err) + } +} + +func testGRPC(t *testing.T, f grpcTest, fopts ...rtesting.ServiceOption) { + t.Helper() + t.Parallel() + resolvable := false + cancel := logstream.Start(t) + defer cancel() + + // Setup + clients := Setup(t) + + t.Log("Creating service for grpc-ping") + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "grpc-ping", + } + + fopts = append(fopts, rtesting.WithNamedPort("h2c")) + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + fopts...) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + url := resources.Route.Status.URL.URL() + + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "gRPCPingReadyToServe", + resolvable); err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't return success: %v", names.Route, url, err) + } + + host := url.Host + if !resolvable { + host = pkgTest.Flags.IngressEndpoint + if pkgTest.Flags.IngressEndpoint == "" { + host, err = ingress.GetIngressEndpoint(clients.KubeClient.Kube) + if err != nil { + t.Fatalf("Could not get service endpoint: %v", err) + } + } + } + + f(t, resources, clients, host, url.Hostname()) +} + +func TestGRPCUnaryPing(t *testing.T) { + testGRPC(t, unaryTest) +} + +func TestGRPCStreamingPing(t *testing.T) { + testGRPC(t, streamTest) +} + +func TestGRPCUnaryPingViaActivator(t *testing.T) { + testGRPC(t, + func(t *testing.T, resources *v1a1test.ResourceObjects, clients *test.Clients, host, domain string) { + if err := waitForActivatorEndpoints(resources, clients); err != nil { + t.Fatalf("Never got Activator endpoints in the service: %v", err) + } + unaryTest(t, resources, clients, host, domain) + }, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + }), + ) +} + +func TestGRPCStreamingPingViaActivator(t *testing.T) { + testGRPC(t, + func(t *testing.T, resources *v1a1test.ResourceObjects, clients *test.Clients, host, domain string) { + if err := waitForActivatorEndpoints(resources, clients); err != nil { + t.Fatalf("Never got Activator endpoints in the service: %v", err) + } + streamTest(t, resources, clients, host, domain) + }, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + }), + ) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/helloworld_test.go b/test/vendor/knative.dev/serving/test/e2e/helloworld_test.go new file mode 100644 index 0000000000..11c70057d8 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/helloworld_test.go @@ -0,0 +1,205 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/serving" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestHelloWorld(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + if test.ServingFlags.Https { + // Save the current Gateway to restore it after the test + oldGateway, err := clients.IstioClient.NetworkingV1alpha3().Gateways(v1a1test.Namespace).Get(v1a1test.GatewayName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Gateway %s/%s", v1a1test.Namespace, v1a1test.GatewayName) + } + test.CleanupOnInterrupt(func() { v1a1test.RestoreGateway(t, clients, *oldGateway) }) + defer v1a1test.RestoreGateway(t, clients, *oldGateway) + } + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + resources, httpsTransportOption, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, test.ServingFlags.Https) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + url := resources.Route.Status.URL.URL() + var opt interface{} + if test.ServingFlags.Https { + url.Scheme = "https" + if httpsTransportOption == nil { + t.Fatalf("Https transport option is nil") + } + opt = *httpsTransportOption + } + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText))), + "HelloWorldServesText", + test.ServingFlags.ResolvableDomain, + opt); err != nil { + t.Fatalf("The endpoint %s for Route %s didn't serve the expected text %q: %v", url, names.Route, test.HelloWorldText, err) + } + + revision := resources.Revision + if val, ok := revision.Labels["serving.knative.dev/configuration"]; ok { + if val != names.Config { + t.Fatalf("Expect configuration name in revision label %q but got %q ", names.Config, val) + } + } else { + t.Fatalf("Failed to get configuration name from Revision label") + } + if val, ok := revision.Labels["serving.knative.dev/service"]; ok { + if val != names.Service { + t.Fatalf("Expect Service name in revision label %q but got %q ", names.Service, val) + } + } else { + t.Fatalf("Failed to get Service name from Revision label") + } +} + +func TestQueueSideCarResourceLimit(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + t.Log("Creating a new Service") + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithResourceRequirements(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("50m"), + corev1.ResourceName("memory"): resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceName("cpu"): resource.MustParse("100m"), + corev1.ResourceName("memory"): resource.MustParse("258Mi"), + }, + }), v1a1opts.WithConfigAnnotations(map[string]string{ + serving.QueueSideCarResourcePercentageAnnotation: "0.2", + })) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + url := resources.Route.Status.URL.URL() + + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText))), + "HelloWorldServesText", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("The endpoint for Route %s at %s didn't serve the expected text %q: %v", names.Route, url, test.HelloWorldText, err) + } + + revision := resources.Revision + if val, ok := revision.Labels["serving.knative.dev/configuration"]; ok { + if val != names.Config { + t.Fatalf("Expect configuration name in revision label %q but got %q ", names.Config, val) + } + } else { + t.Fatalf("Failed to get configuration name from Revision label") + } + if val, ok := revision.Labels["serving.knative.dev/service"]; ok { + if val != names.Service { + t.Fatalf("Expect Service name in revision label %q but got %q ", names.Service, val) + } + } else { + t.Fatalf("Failed to get Service name from Revision label") + } + + container, err := getContainer(clients.KubeClient, resources.Service.Name, "queue-proxy", resources.Service.Namespace) + if err != nil { + t.Fatalf("Failed to get queue-proxy container in the pod %v in namespace %v: %v", resources.Service.Name, resources.Service.Namespace, err) + } + + if container.Resources.Limits.Cpu().Cmp(resource.MustParse("40m")) != 0 { + t.Fatalf("queue-proxy should have limit.cpu set to 40m got %v", container.Resources.Limits.Cpu()) + } + if container.Resources.Limits.Memory().Cmp(resource.MustParse("200Mi")) != 0 { + t.Fatalf("queue-proxy should have limit.memory set to 200Mi got %v", container.Resources.Limits.Memory()) + } + if container.Resources.Requests.Cpu().Cmp(resource.MustParse("25m")) != 0 { + t.Fatalf("queue-proxy should have request.cpu set to 25m got %v", container.Resources.Requests.Cpu()) + } + if container.Resources.Requests.Memory().Cmp(resource.MustParse("50Mi")) != 0 { + t.Fatalf("queue-proxy should have request.memory set to 50Mi got %v", container.Resources.Requests.Memory()) + } +} + +// Container returns container for given Pod and Container in the namespace +func getContainer(client *pkgTest.KubeClient, podName, containerName, namespace string) (corev1.Container, error) { + pods := client.Kube.CoreV1().Pods(namespace) + podList, err := pods.List(metav1.ListOptions{}) + if err != nil { + return corev1.Container{}, err + } + for _, pod := range podList.Items { + if strings.Contains(pod.Name, podName) { + result, err := pods.Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return corev1.Container{}, err + } + for _, container := range result.Spec.Containers { + if strings.Contains(container.Name, containerName) { + return container, nil + } + } + } + } + return corev1.Container{}, fmt.Errorf("Could not find container for %s/%s", podName, containerName) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/image_pull_error_test.go b/test/vendor/knative.dev/serving/test/e2e/image_pull_error_test.go new file mode 100644 index 0000000000..69bf6c7ed9 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/image_pull_error_test.go @@ -0,0 +1,107 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + "testing" + + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + v1alpha1testing "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestImagePullError(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + // TODO: Replace this when sha256 is broken. + Image: "ubuntu@sha256:0000000000000000000000000000000000000000000000000000000000000000", + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Service %s", names.Image) + var ( + svc *v1alpha1.Service + err error + ) + if svc, err = createLatestService(t, clients, names); err != nil { + t.Fatalf("Failed to create Service %s: %v", names.Service, err) + } + + names.Config = serviceresourcenames.Configuration(svc) + + err = v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, func(r *v1alpha1.Service) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.ServiceConditionConfigurationsReady) + if cond != nil && !cond.IsUnknown() { + if cond.IsFalse() { + if cond.Reason == "RevisionFailed" { + return true, nil + } + } + t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("the service %s was not marked with expected error condition, but with (Reason=%q, Message=%q, Status=%q)", + names.Service, cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ContainerUnpullable") + + if err != nil { + t.Fatalf("Failed to validate service state: %s", err) + } + + revisionName, err := revisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the images are not pulled, the revision should have error status.") + err = v1a1test.CheckRevisionState(clients.ServingAlphaClient, revisionName, func(r *v1alpha1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.RevisionConditionReady) + if cond != nil { + if cond.Reason == "ImagePullBackOff" || cond.Reason == "ErrImagePull" { + return true, nil + } + return true, fmt.Errorf("the revision %s was not marked with expected error condition, but with (Reason=%q, Message=%q)", + revisionName, cond.Reason, cond.Message) + } + return false, nil + }) + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } +} + +// Wrote our own thing so that we can pass in an image by digest. +// knative/pkg/test.ImagePath currently assumes there's a tag, which fails to parse. +func createLatestService(t *testing.T, clients *test.Clients, names test.ResourceNames) (*v1alpha1.Service, error) { + opt := v1alpha1testing.WithInlineConfigSpec(*v1a1test.ConfigurationSpec(names.Image)) + service := v1alpha1testing.ServiceWithoutNamespace(names.Service, opt) + v1a1test.LogResourceObject(t, v1a1test.ResourceObjects{Service: service}) + return clients.ServingAlphaClient.Services.Create(service) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/istio/probing_test.go b/test/vendor/knative.dev/serving/test/e2e/istio/probing_test.go new file mode 100644 index 0000000000..ba1b8ed9cc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/istio/probing_test.go @@ -0,0 +1,471 @@ +// +build e2e istio + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "net/http" + "net/url" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/sync/errgroup" + "k8s.io/apimachinery/pkg/watch" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + istiov1alpha3 "istio.io/api/networking/v1alpha3" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logstream" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/pkg/apis/networking" + "knative.dev/serving/test" + "knative.dev/serving/test/e2e" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +var ( + namespace = "knative-serving" +) + +func TestIstioProbing(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + + clients := e2e.Setup(t) + + // Save the current Gateway to restore it after the test + oldGateway, err := clients.IstioClient.NetworkingV1alpha3().Gateways(namespace).Get(networking.KnativeIngressGateway, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Gateway %s/%s", namespace, networking.KnativeIngressGateway) + } + restore := func() { + curGateway, err := clients.IstioClient.NetworkingV1alpha3().Gateways(namespace).Get(networking.KnativeIngressGateway, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Gateway %s/%s", namespace, networking.KnativeIngressGateway) + } + curGateway.Spec.Servers = oldGateway.Spec.Servers + if _, err := clients.IstioClient.NetworkingV1alpha3().Gateways(namespace).Update(curGateway); err != nil { + t.Fatalf("Failed to restore Gateway %s/%s: %v", namespace, networking.KnativeIngressGateway, err) + } + } + test.CleanupOnInterrupt(restore) + defer restore() + + // Create a dummy service to get the domain name + var domain string + func() { + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */) + if err != nil { + t.Fatalf("Failed to create Service %s: %v", names.Service, err) + } + domain = strings.SplitN(objects.Route.Status.URL.Host, ".", 2)[1] + }() + + tlsOptions := &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + PrivateKey: "/etc/istio/ingressgateway-certs/tls.key", + ServerCertificate: "/etc/istio/ingressgateway-certs/tls.crt", + } + + cases := []struct { + name string + servers []*istiov1alpha3.Server + urls []string + }{{ + name: "HTTP", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-http", + Number: 80, + Protocol: "HTTP", + }, + }}, + urls: []string{"http://%s/"}, + }, { + name: "HTTP2", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-http2", + Number: 80, + Protocol: "HTTP2", + }, + }}, + urls: []string{"http://%s/"}, + }, { + name: "HTTP custom port", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "custom-http", + Number: 443, + Protocol: "HTTP", + }, + }}, + urls: []string{"http://%s:443/"}, + }, { + name: "HTTP & HTTPS", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-http", + Number: 80, + Protocol: "HTTP", + }, + }, { + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-https", + Number: 443, + Protocol: "HTTPS", + }, + Tls: tlsOptions, + }}, + urls: []string{"http://%s/", "https://%s/"}, + }, { + name: "HTTP redirect & HTTPS", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-http", + Number: 80, + Protocol: "HTTP", + }, + }, { + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-https", + Number: 443, + Protocol: "HTTPS", + }, + Tls: tlsOptions, + }}, + urls: []string{"http://%s/", "https://%s/"}, + }, { + name: "HTTPS", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-https", + Number: 443, + Protocol: "HTTPS", + }, + Tls: tlsOptions, + }}, + urls: []string{"https://%s/"}, + }, { + name: "HTTPS non standard port", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "custom-https", + Number: 80, + Protocol: "HTTPS", + }, + Tls: tlsOptions, + }}, + urls: []string{"https://%s:80/"}, + }, { + name: "unsupported protocol (GRPC)", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "custom-grpc", + Number: 80, + Protocol: "GRPC", + }, + }}, + // No URLs to probe, just validates the Knative Service is Ready instead of stuck in NotReady + }, { + name: "unsupported protocol (TCP)", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "custom-tcp", + Number: 80, + Protocol: "TCP", + }, + }}, + // No URLs to probe, just validates the Knative Service is Ready instead of stuck in NotReady + }, { + name: "unsupported protocol (Mongo)", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "custom-mongo", + Number: 80, + Protocol: "Mongo", + }, + }}, + // No URLs to probe, just validates the Knative Service is Ready instead of stuck in NotReady + }, { + name: "port not present in service", + servers: []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "custom-http", + Number: 8090, + Protocol: "HTTP", + }, + }}, + // No URLs to probe, just validates the Knative Service is Ready instead of stuck in NotReady + }} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + var transportOptions []interface{} + if hasHTTPS(c.servers) { + transportOptions = append(transportOptions, setupHTTPS(t, clients.KubeClient, []string{names.Service + "." + domain})) + } + + setupGateway(t, clients, names, domain, c.servers) + + // Create the service and wait for it to be ready + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + _, _, err = v1a1test.CreateRunLatestServiceReady(t, clients, &names, false /* https */) + if err != nil { + t.Fatalf("Failed to create Service %s: %v", names.Service, err) + } + + // Probe the Service on all endpoints + var g errgroup.Group + for _, tmpl := range c.urls { + tmpl := tmpl + g.Go(func() error { + u, err := url.Parse(fmt.Sprintf(tmpl, names.Service+"."+domain)) + if err != nil { + return fmt.Errorf("failed to parse URL: %w", err) + } + if _, err := pkgTest.WaitForEndpointStateWithTimeout( + clients.KubeClient, + t.Logf, + u, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText))), + "HelloWorldServesText", + test.ServingFlags.ResolvableDomain, + 1*time.Minute, + transportOptions...); err != nil { + return fmt.Errorf("failed to probe %s: %w", u, err) + } + return nil + }) + } + err = g.Wait() + if err != nil { + t.Fatalf("Failed to probe the Service: %v", err) + } + }) + } +} + +func hasHTTPS(servers []*istiov1alpha3.Server) bool { + for _, server := range servers { + if server.Port.Protocol == "HTTPS" { + return true + } + } + return false +} + +// setupGateway updates the ingress Gateway to the provided Servers and waits until all Envoy pods have been updated. +func setupGateway(t *testing.T, clients *test.Clients, names test.ResourceNames, domain string, servers []*istiov1alpha3.Server) { + // Get the current Gateway + curGateway, err := clients.IstioClient.NetworkingV1alpha3().Gateways(namespace).Get(networking.KnativeIngressGateway, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Gateway %s/%s: %v", namespace, networking.KnativeIngressGateway, err) + } + + // Update its Spec + newGateway := curGateway.DeepCopy() + newGateway.Spec.Servers = servers + + // Update the Gateway + gw, err := clients.IstioClient.NetworkingV1alpha3().Gateways(namespace).Update(newGateway) + if err != nil { + t.Fatalf("Failed to update Gateway %s/%s: %v", namespace, networking.KnativeIngressGateway, err) + } + + var selectors []string + for k, v := range gw.Spec.Selector { + selectors = append(selectors, k+"="+v) + } + selector := strings.Join(selectors, ",") + + // Restart the Gateway pods: this is needed because Istio without SDS won't refresh the cert when the secret is updated + pods, err := clients.KubeClient.Kube.CoreV1().Pods("istio-system").List(metav1.ListOptions{LabelSelector: selector}) + if err != nil { + t.Fatalf("Failed to list Gateway pods: %v", err) + } + + // TODO(bancel): there is a race condition here if a pod listed in the call above is deleted before calling watch below + + var wg sync.WaitGroup + wg.Add(len(pods.Items)) + wtch, err := clients.KubeClient.Kube.CoreV1().Pods("istio-system").Watch(metav1.ListOptions{LabelSelector: selector}) + if err != nil { + t.Fatalf("Failed to watch Gateway pods: %v", err) + } + defer wtch.Stop() + + done := make(chan struct{}) + go func() { + for { + select { + case event := <-wtch.ResultChan(): + if event.Type == watch.Deleted { + wg.Done() + } + case <-done: + return + } + } + }() + + err = clients.KubeClient.Kube.CoreV1().Pods("istio-system").DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + t.Fatalf("Failed to delete Gateway pods: %v", err) + } + + wg.Wait() + done <- struct{}{} +} + +// setupHTTPS creates a self-signed certificate, installs it as a Secret and returns an *http.Transport +// trusting the certificate as a root CA. +func setupHTTPS(t *testing.T, kubeClient *pkgTest.KubeClient, hosts []string) spoof.TransportOption { + t.Helper() + + cert, key, err := generateCertificate(hosts) + if err != nil { + t.Fatalf("Failed to generate the certificate: %v", err) + } + + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + + if ok := rootCAs.AppendCertsFromPEM(cert); !ok { + t.Fatalf("Failed to add the certificate to the root CA") + } + + kubeClient.Kube.CoreV1().Secrets("istio-system").Delete("istio-ingressgateway-certs", &metav1.DeleteOptions{}) + _, err = kubeClient.Kube.CoreV1().Secrets("istio-system").Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "istio-system", + Name: "istio-ingressgateway-certs", + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "tls.key": key, + "tls.crt": cert, + }, + }) + if err != nil { + t.Fatalf("Failed to set Secret %s/%s: %v", "istio-system", "istio-ingressgateway-certs", err) + } + + return func(transport *http.Transport) *http.Transport { + transport.TLSClientConfig = &tls.Config{RootCAs: rootCAs} + return transport + } +} + +// generateCertificate generates a self-signed certificate for the provided hosts and returns +// the PEM encoded certificate and private key. +func generateCertificate(hosts []string) ([]byte, []byte, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate private key: %w", err) + } + + notBefore := time.Now().Add(-5 * time.Minute) + notAfter := notBefore.Add(2 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate serial number: %w", err) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Knative Serving"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + for _, h := range hosts { + if ip := net.ParseIP(h); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, nil, fmt.Errorf("failed to create the certificate: %w", err) + } + + var certBuf bytes.Buffer + if err := pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return nil, nil, fmt.Errorf("failed to encode the certificate: %w", err) + } + + var keyBuf bytes.Buffer + if err := pem.Encode(&keyBuf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return nil, nil, fmt.Errorf("failed to encode the private key: %w", err) + } + + return certBuf.Bytes(), keyBuf.Bytes(), nil +} diff --git a/test/vendor/knative.dev/serving/test/e2e/minscale_readiness_test.go b/test/vendor/knative.dev/serving/test/e2e/minscale_readiness_test.go new file mode 100644 index 0000000000..535cae3eee --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/minscale_readiness_test.go @@ -0,0 +1,152 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "strconv" + "time" + + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/autoscaling" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestMinScale(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + const minScale = 4 + + clients := Setup(t) + + name := test.ObjectNameForTest(t) + + names := test.ResourceNames{ + Config: name, + Route: name, + Image: "helloworld", + } + + t.Log("Creating configuration") + if _, err := v1a1test.CreateConfiguration(t, clients, names, withMinScale(minScale)); err != nil { + t.Fatalf("Failed to create Configuration: %v", err) + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + revName := latestRevisionName(t, clients, names.Config) + deploymentName := revName + "-deployment" + + // Before becoming ready, observe minScale + t.Log("Waiting for revision to scale to minScale before becoming ready") + if err := waitForDesiredScale(t, clients, deploymentName, gte(minScale)); err != nil { + t.Fatalf("The deployment %q did not scale >= %d before becoming ready: %v", deploymentName, minScale, err) + } + + // Revision becomes ready + if err := v1a1test.WaitForRevisionState( + clients.ServingAlphaClient, revName, v1a1test.IsRevisionReady, "RevisionIsReady", + ); err != nil { + t.Fatalf("The Revision %q did not become ready: %v", revName, err) + } + + // Without a route, ignore minScale + t.Log("Waiting for revision to scale below minScale after becoming ready") + if err := waitForDesiredScale(t, clients, deploymentName, lt(minScale)); err != nil { + t.Fatalf("The deployment %q did not scale < minScale after becoming ready: %v", deploymentName, err) + } + + // Create route + t.Log("Creating route") + if _, err := v1a1test.CreateRoute(t, clients, names); err != nil { + t.Fatalf("Failed to create Route: %v", err) + } + + // Route becomes ready + if err := v1a1test.WaitForRouteState( + clients.ServingAlphaClient, names.Route, v1a1test.IsRouteReady, "RouteIsReady", + ); err != nil { + t.Fatalf("The Route %q is not ready: %v", names.Route, err) + } + + // With a route, observe minScale + t.Log("Waiting for revision to scale to minScale after creating route") + if err := waitForDesiredScale(t, clients, deploymentName, gte(minScale)); err != nil { + t.Fatalf("The deployment %q did not scale >= %d after creating route: %v", deploymentName, minScale, err) + } +} + +func gte(m int) func(int32) bool { + return func(n int32) bool { + return n >= int32(m) + } +} + +func lt(m int) func(int32) bool { + return func(n int32) bool { + return n < int32(m) + } +} + +func withMinScale(minScale int) func(cfg *v1alpha1.Configuration) { + return func(cfg *v1alpha1.Configuration) { + if cfg.Spec.Template.Annotations == nil { + cfg.Spec.Template.Annotations = make(map[string]string) + } + cfg.Spec.Template.Annotations[autoscaling.MinScaleAnnotationKey] = strconv.Itoa(minScale) + } +} + +func latestRevisionName(t *testing.T, clients *test.Clients, configName string) string { + // Wait for the Config have a LatestCreatedRevisionName + if err := v1a1test.WaitForConfigurationState( + clients.ServingAlphaClient, configName, + v1a1test.ConfigurationHasCreatedRevision, "ConfigurationHasCreatedRevision", + ); err != nil { + t.Fatalf("The Configuration %q does not have a LatestCreatedRevisionName: %v", configName, err) + } + + config, err := clients.ServingAlphaClient.Configs.Get(configName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Configuration after it was seen to be live: %v", err) + } + + return config.Status.LatestCreatedRevisionName +} + +func waitForDesiredScale(t *testing.T, clients *test.Clients, deploymentName string, cond func(int32) bool) error { + deployments := clients.KubeClient.Kube.AppsV1().Deployments(test.ServingNamespace) + + return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { + deployment, err := deployments.Get(deploymentName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + return cond(*deployment.Spec.Replicas), nil + }) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/namespace_test.go b/test/vendor/knative.dev/serving/test/e2e/namespace_test.go new file mode 100644 index 0000000000..0ef17b59b4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/namespace_test.go @@ -0,0 +1,133 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + "testing" + + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func checkResponse(t *testing.T, clients *test.Clients, names test.ResourceNames, expectedText string) error { + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + names.URL, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + return fmt.Errorf("the endpoint for Route %s at %s didn't serve the expected text %q: %w", names.Route, names.URL.String(), expectedText, err) + } + + return nil +} + +func TestMultipleNamespace(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + defaultClients := Setup(t) // This one uses the default namespace `test.ServingNamespace` + altClients := SetupAlternativeNamespace(t) + + serviceName := test.ObjectNameForTest(t) + + defaultResources := test.ResourceNames{ + Service: serviceName, + Image: test.PizzaPlanet1, + } + test.CleanupOnInterrupt(func() { test.TearDown(defaultClients, defaultResources) }) + defer test.TearDown(defaultClients, defaultResources) + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, defaultClients, &defaultResources, + false /* https TODO(taragu) turn this on after helloworld test running with https */); err != nil { + t.Fatalf("Failed to create Service %v in namespace %v: %v", defaultResources.Service, test.ServingNamespace, err) + } + + altResources := test.ResourceNames{ + Service: serviceName, + Image: test.PizzaPlanet2, + } + test.CleanupOnInterrupt(func() { test.TearDown(altClients, altResources) }) + defer test.TearDown(altClients, altResources) + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, altClients, &altResources, + false /* https TODO(taragu) turn this on after helloworld test running with https */); err != nil { + t.Fatalf("Failed to create Service %v in namespace %v: %v", altResources.Service, test.AlternativeServingNamespace, err) + } + + if err := checkResponse(t, defaultClients, defaultResources, test.PizzaPlanetText1); err != nil { + t.Error(err) + } + + if err := checkResponse(t, altClients, altResources, test.PizzaPlanetText2); err != nil { + t.Error(err) + } +} + +// This test is to ensure we do not leak deletion of services in other namespaces when deleting a route. +func TestConflictingRouteService(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + names := test.ResourceNames{ + Service: test.AppendRandomString("conflicting-route-service"), + TrafficTarget: "chips", + Image: test.PizzaPlanet1, + } + + // Create a service in a different namespace but route label points to a route in another namespace + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: test.AppendRandomString("conflicting-route-service"), + Namespace: test.AlternativeServingNamespace, + Labels: map[string]string{ + serving.RouteLabelKey: names.Service, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "some-internal-addr", + }, + } + + altClients := SetupAlternativeNamespace(t) + altClients.KubeClient.Kube.CoreV1().Services(test.AlternativeServingNamespace).Create(svc) + cleanup := func() { + altClients.KubeClient.Kube.CoreV1().Services(test.AlternativeServingNamespace).Delete(svc.Name, &metav1.DeleteOptions{}) + } + test.CleanupOnInterrupt(cleanup) + defer cleanup() + + clients := Setup(t) + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */); err != nil { + t.Errorf("Failed to create Service %v in namespace %v: %v", names.Service, test.ServingNamespace, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/pod_schedule_error_test.go b/test/vendor/knative.dev/serving/test/e2e/pod_schedule_error_test.go new file mode 100644 index 0000000000..6358e08287 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/pod_schedule_error_test.go @@ -0,0 +1,124 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestPodScheduleError(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + const ( + errorReason = "RevisionFailed" + errorMsg = "Insufficient cpu" + revisionReason = "Unschedulable" + ) + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + t.Logf("Creating a new Service %s", names.Image) + resources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50000m"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50000m"), + }, + } + var ( + svc *v1alpha1.Service + err error + ) + if svc, err = v1a1test.CreateLatestService(t, clients, names, v1a1opts.WithResourceRequirements(resources)); err != nil { + t.Fatalf("Failed to create Service %s: %v", names.Service, err) + } + + names.Config = serviceresourcenames.Configuration(svc) + + err = v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, func(r *v1alpha1.Service) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.ServiceConditionConfigurationsReady) + if cond != nil && !cond.IsUnknown() { + if strings.Contains(cond.Message, errorMsg) && cond.IsFalse() { + return true, nil + } + t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status) + return true, fmt.Errorf("the service %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", + names.Config, errorReason, errorMsg, "False", cond.Reason, cond.Message, cond.Status) + } + return false, nil + }, "ContainerUnscheduleable") + + if err != nil { + t.Fatalf("Failed to validate service state: %s", err) + } + + revisionName, err := revisionFromConfiguration(clients, names.Config) + if err != nil { + t.Fatalf("Failed to get revision from configuration %s: %v", names.Config, err) + } + + t.Log("When the containers are not scheduled, the revision should have error status.") + err = v1a1test.WaitForRevisionState(clients.ServingAlphaClient, revisionName, func(r *v1alpha1.Revision) (bool, error) { + cond := r.Status.GetCondition(v1alpha1.RevisionConditionReady) + if cond != nil { + if cond.Reason == revisionReason && strings.Contains(cond.Message, errorMsg) { + return true, nil + } + return true, fmt.Errorf("the revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", + revisionName, revisionReason, errorMsg, cond.Reason, cond.Message) + } + return false, nil + }, errorReason) + + if err != nil { + t.Fatalf("Failed to validate revision state: %s", err) + } +} + +// Get revision name from configuration. +func revisionFromConfiguration(clients *test.Clients, configName string) (string, error) { + config, err := clients.ServingAlphaClient.Configs.Get(configName, metav1.GetOptions{}) + if err != nil { + return "", err + } + if config.Status.LatestCreatedRevisionName != "" { + return config.Status.LatestCreatedRevisionName, nil + } + return "", fmt.Errorf("no valid revision name found in configuration %s", configName) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/rollback_byo_test.go b/test/vendor/knative.dev/serving/test/e2e/rollback_byo_test.go new file mode 100644 index 0000000000..c5a47b217d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/rollback_byo_test.go @@ -0,0 +1,138 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "testing" + + "knative.dev/pkg/ptr" + "knative.dev/pkg/test/logstream" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + . "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestRollbackBYOName(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + serviceName := test.ObjectNameForTest(t) + byoNameOld := serviceName + "-byo-foo" + byoNameNew := serviceName + "-byo-foo-new" + names := test.ResourceNames{ + Service: serviceName, + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + withTrafficSpecOld := WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: byoNameOld, + Percent: ptr.Int64(100), + }, + }}, + }) + withTrafficSpecNew := WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + RevisionName: byoNameNew, + Percent: ptr.Int64(100), + }, + }}, + }) + + t.Logf("Creating a new Service with byo config name %q.", byoNameOld) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withTrafficSpecOld, func(svc *v1alpha1.Service) { + svc.Spec.ConfigurationSpec.Template.ObjectMeta.Name = byoNameOld + }) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + originalServiceSpec := resources.Service.Spec + revisionName := resources.Revision.ObjectMeta.Name + if revisionName != byoNameOld { + t.Fatalf("Expect configuration name in revision label %q but got %q ", byoNameOld, revisionName) + } + + // Update service to use a new byo name + t.Logf("Updating the Service to a new revision with a new byo name %q.", byoNameNew) + newSvc := resources.Service.DeepCopy() + newSvc.Spec.ConfigurationSpec.Template.ObjectMeta.Name = byoNameNew + withTrafficSpecNew(newSvc) + svc, err := v1a1test.PatchService(t, clients, resources.Service, newSvc) + resources.Service = svc + if err != nil { + t.Fatalf("Patch update for Service (new byo name %q) failed: %v", byoNameNew, err) + } + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + newRevision, err := v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision for new byo name %s: %v", names.Service, byoNameNew, err) + } + if newRevision != byoNameNew { + t.Fatalf("Expect configuration name in revision label %q but got %q ", byoNameNew, newRevision) + } + + // Now, rollback to the first RevisionSpec + rollbackSvc := resources.Service.DeepCopy() + rollbackSvc.Spec = originalServiceSpec + svc, err = v1a1test.PatchService(t, clients, resources.Service, rollbackSvc) + resources.Service = svc + if err != nil { + t.Fatalf("Patch update for Service (rollback to byo name %q) failed: %v", byoNameOld, err) + } + + t.Logf("We are rolling back to the previous revision (byoNameOld %q).", byoNameOld) + // Wait for the route to become ready, and check that the traffic split between the byoNameOld + // and byoNameNew is 100 and 0, respectively + err = v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, func(s *v1alpha1.Service) (bool, error) { + for _, tr := range s.Status.Traffic { + if tr.RevisionName != byoNameOld { + return false, nil + } + if tr.Percent == nil || *tr.Percent != 100 { + return false, nil + } + } + return true, nil + }, "ServiceRollbackRevision") + if err != nil { + t.Fatalf("Service %s was not rolled back with byo name %s: %v", names.Service, byoNameOld, err) + } + + // Verify that the latest ready revision and latest created revision are both byoNameNew, + // which means no new revision is created in the rollback + err = v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, func(s *v1alpha1.Service) (bool, error) { + return (s.Status.LatestReadyRevisionName == byoNameOld && s.Status.LatestCreatedRevisionName == byoNameOld), nil + }, "ServiceNoNewRevisionCreated") + if err != nil { + t.Fatalf("Service %s was not rolled back with byo name %s: %v", names.Service, byoNameOld, err) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/route_service_test.go b/test/vendor/knative.dev/serving/test/e2e/route_service_test.go new file mode 100644 index 0000000000..4adf29b520 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/route_service_test.go @@ -0,0 +1,199 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "strings" + "testing" + + "knative.dev/pkg/network" + "knative.dev/pkg/ptr" + "knative.dev/pkg/test/logstream" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + . "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +// TestRoutesNotReady tests the scenario that when Route's status is +// Ready == False, the Service's RoutesReady value should change from +// Unknown to False +func TestRoutesNotReady(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + withTrafficSpec := WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{ + { + TrafficTarget: v1.TrafficTarget{ + RevisionName: "foobar", // Invalid revision name. This allows Revision creation to succeed and Route configuration to fail + Percent: ptr.Int64(100), + }, + }, + }, + }) + + t.Log("Creating a new Service with an invalid traffic target.") + svc, err := v1a1test.CreateLatestService(t, clients, names, withTrafficSpec) + if err != nil { + t.Fatalf("Failed to create initial Service %q: %#v", names.Service, err) + } + + t.Logf("Waiting for Service %q ObservedGeneration to match Generation, and status transition to Ready == False.", names.Service) + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceNotReady, "ServiceIsNotReady"); err != nil { + t.Fatalf("Failed waiting for Service %q to transition to Ready == False: %#v", names.Service, err) + } + + t.Logf("Validating Route %q has reconciled to Ready == False.", serviceresourcenames.Route(svc)) + // Check Route is not ready + if err = v1a1test.CheckRouteState(clients.ServingAlphaClient, serviceresourcenames.Route(svc), v1a1test.IsRouteNotReady); err != nil { + t.Fatalf("The Route %q was marked as Ready to serve traffic but it should not be: %#v", serviceresourcenames.Route(svc), err) + } + + // Wait for RoutesReady to become False + t.Logf("Validating Service %q has reconciled to RoutesReady == False.", names.Service) + if err = v1a1test.CheckServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceRoutesNotReady); err != nil { + t.Fatalf("Service %q was not marked RoutesReady == False: %#v", names.Service, err) + } +} + +func TestRouteVisibilityChanges(t *testing.T) { + testCases := []struct { + name string + withTrafficSpec ServiceOption + }{ + { + name: "Route visibility changes from public to private with single traffic", + withTrafficSpec: WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{ + { + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + }, + }, + }, + }), + }, + { + name: "Route visibility changes from public to private with tag only", + withTrafficSpec: WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{ + { + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(100), + Tag: "cow", + }, + }, + }, + }), + }, + { + name: "Route visibility changes from public to private with both tagged and non-tagged traffic", + withTrafficSpec: WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{ + { + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(60), + }, + }, + { + TrafficTarget: v1.TrafficTarget{ + Percent: ptr.Int64(40), + Tag: "cow", + }, + }, + }, + }), + }, + } + + hasPublicRoute := func(r *v1alpha1.Route) (b bool, e error) { + return !strings.HasSuffix(r.Status.URL.Host, network.GetClusterDomainName()), nil + } + + hasPrivateRoute := func(r *v1alpha1.Route) (b bool, e error) { + return strings.HasSuffix(r.Status.URL.Host, network.GetClusterDomainName()), nil + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(st *testing.T) { + st.Parallel() + cancel := logstream.Start(st) + defer cancel() + + clients := Setup(st) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: test.PizzaPlanet1, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + st.Log("Creating a new Service") + svc, err := v1a1test.CreateLatestService(st, clients, names, testCase.withTrafficSpec) + if err != nil { + st.Fatalf("Failed to create initial Service %q: %#v", names.Service, err) + } + + st.Logf("Waiting for Service %q ObservedGeneration to match Generation, and status transition to Ready == True", names.Service) + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + st.Fatalf("Failed waiting for Service %q to transition to Ready == True: %#v", names.Service, err) + } + + st.Logf("Validating Route %q has non cluster-local address", serviceresourcenames.Route(svc)) + // Check Route is not ready + + if err = v1a1test.CheckRouteState(clients.ServingAlphaClient, serviceresourcenames.Route(svc), hasPublicRoute); err != nil { + st.Fatalf("The Route %q should be publicly visible but it was not: %#v", serviceresourcenames.Route(svc), err) + } + + newSvc := svc.DeepCopy() + newSvc.SetLabels(map[string]string{"serving.knative.dev/visibility": "cluster-local"}) + v1a1test.PatchService(st, clients, svc, newSvc) + + st.Logf("Waiting for Service %q ObservedGeneration to match Generation, and status transition to Ready == True", names.Service) + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + st.Fatalf("Failed waiting for Service %q to transition to Ready == True: %#v", names.Service, err) + } + + st.Logf("Validating Route %q has cluster-local address", serviceresourcenames.Route(svc)) + // Check Route is not ready + + if err = v1a1test.WaitForRouteState(clients.ServingAlphaClient, serviceresourcenames.Route(svc), hasPrivateRoute, "RouteIsClusterLocal"); err != nil { + st.Fatalf("The Route %q should be privately visible but it was not: %#v", serviceresourcenames.Route(svc), err) + } + }) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/scale.go b/test/vendor/knative.dev/serving/test/e2e/scale.go new file mode 100644 index 0000000000..83b8b9ae22 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/scale.go @@ -0,0 +1,217 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "math" + "net/url" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/pkg/pool" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + v1alpha1testing "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +// Latencies is an interface for providing mechanisms for recording timings +// for the parts of the scale test. +type Latencies interface { + // Add takes the name of this measurement and the time at which it began. + // This should be called at the moment of completion, so that duration may + // be computed with `time.Since(start)`. We use this signature to that this + // function is suitable for use in a `defer`. + Add(name string, start time.Time) +} + +func abortOnTimeout(ctx context.Context) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + return true, ctx.Err() + } +} + +func ScaleToWithin(t *testing.T, scale int, duration time.Duration, latencies Latencies) { + clients := Setup(t) + + cleanupCh := make(chan test.ResourceNames, scale) + defer close(cleanupCh) + + // These are the local (per-probe) and global (all probes) targets for the scale test. + // 90 = 18/20, so allow two failures with the minimum number of probes, but expect + // us to have 2.5 9s overall. + // + // TODO(#2850): After moving to Istio 1.1 we need to revisit these SLOs. + const ( + localSLO = 0.90 + globalSLO = 0.995 + minProbes = 20 + ) + pm := test.NewProberManager(t.Logf, clients, minProbes) + + ctx, cancel := context.WithTimeout(context.Background(), duration) + defer cancel() + width := int(math.Ceil(math.Log10(float64(scale)))) + + t.Log("Creating new Services") + wg := pool.NewWithCapacity(50 /* maximum in-flight creates */, scale /* capacity */) + for i := 0; i < scale; i++ { + // https://golang.org/doc/faq#closures_and_goroutines + i := i + wg.Go(func() error { + names := test.ResourceNames{ + Service: test.SubServiceNameForTest(t, fmt.Sprintf("%0[1]*[2]d", width, i)), + Image: "helloworld", + } + + // Start the clock for various waypoints towards Service readiness. + start := time.Now() + // Record the overall completion time regardless of success/failure. + defer latencies.Add("time-to-done", start) + + svc, err := v1a1test.CreateLatestService(t, clients, names, + v1alpha1testing.WithResourceRequirements(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20Mi"), + }, + }), + v1alpha1testing.WithConfigAnnotations(map[string]string{ + "autoscaling.knative.dev/maxScale": "1", + }), + v1alpha1testing.WithReadinessProbe(&corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + }, + }, + }), + v1alpha1testing.WithRevisionTimeoutSeconds(10)) + + if err != nil { + t.Errorf("CreateLatestService() = %v", err) + return fmt.Errorf("CreateLatestService() failed: %w", err) + } + // Record the time it took to create the service. + latencies.Add("time-to-create", start) + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + // Send it to our cleanup logic (below) + cleanupCh <- names + + t.Logf("Wait for %s to become ready.", names.Service) + var url *url.URL + err = v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, func(s *v1alpha1.Service) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + if s.Status.URL == nil { + return false, nil + } + url = s.Status.URL.URL() + return v1a1test.IsServiceReady(s) + }, "ServiceUpdatedWithURL") + if err != nil { + t.Errorf("WaitForServiceState(w/ Domain) = %v", err) + return fmt.Errorf("WaitForServiceState(w/ Domain) failed: %w", err) + } + + // Record the time it took to become ready. + latencies.Add("time-to-ready", start) + + _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(test.HelloWorldText), abortOnTimeout(ctx))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Errorf("WaitForEndpointState(expected text) = %v", err) + return fmt.Errorf("WaitForEndpointState(expected text) failed: %w", err) + } + + // Record the time it took to get back a 200 with the expected text. + latencies.Add("time-to-200", start) + + // Start probing the domain until the test is complete. + pm.Spawn(url) + + t.Logf("%s is ready.", names.Service) + return nil + }) + } + + // Wait for all of the service creations to complete (possibly in failure), + // and signal the done channel. + doneCh := make(chan error) + go func() { + defer close(doneCh) + if err := wg.Wait(); err != nil { + doneCh <- err + } + }() + + for { + // As services get created, add logic to clean them up. + // When all of the creations have finished, then stop all of the active probers + // and check our SLIs against our SLOs. + // All of this has to finish within the configured timeout. + select { + case names := <-cleanupCh: + t.Logf("Added %v to cleanup routine.", names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + case err := <-doneCh: + if err != nil { + // If we don't do this first, then we'll see tons of 503s from the ongoing probes + // as we tear down the things they are probing. + defer pm.Stop() + t.Fatalf("Unexpected error: %v", err) + } + + // This ProberManager implementation waits for minProbes before actually stopping. + if err := pm.Stop(); err != nil { + t.Fatalf("Stop() = %v", err) + } + // Check each of the local SLOs + pm.Foreach(func(u *url.URL, p test.Prober) { + if err := test.CheckSLO(localSLO, u.String(), p); err != nil { + t.Errorf("CheckSLO() = %v", err) + } + }) + // Check the global SLO + if err := test.CheckSLO(globalSLO, "aggregate", pm); err != nil { + t.Errorf("CheckSLO() = %v", err) + } + return + } + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/service_to_service_test.go b/test/vendor/knative.dev/serving/test/e2e/service_to_service_test.go new file mode 100644 index 0000000000..4fc1ad37cc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/service_to_service_test.go @@ -0,0 +1,338 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "testing" + + pkgTest "knative.dev/pkg/test" + ingress "knative.dev/pkg/test/ingress" + "knative.dev/pkg/test/logstream" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/pkg/apis/autoscaling" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" + v1alph1testing "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + corev1 "k8s.io/api/core/v1" +) + +const ( + targetHostEnv = "TARGET_HOST" + gatewayHostEnv = "GATEWAY_HOST" + helloworldResponse = "Hello World! How about some tasty noodles?" +) + +// testCases for table-driven testing. +var testCases = []struct { + // name of the test case, which will be inserted in names of routes, configurations, etc. + // Use a short name here to avoid hitting the 63-character limit in names + // (e.g., "service-to-service-call-svc-cluster-local-uagkdshh-frkml-service" is too long.) + name string + // suffix to be trimmed from TARGET_HOST. + suffix string +}{ + {"fqdn", ""}, + {"short", ".cluster.local"}, + {"shortest", ".svc.cluster.local"}, +} + +// testcases for table-driven testing. +var testInjection = []struct { + name string + // injectA indicates whether istio sidecar injection is enabled for httpproxy service + // injectB indicates whether istio sidecar injection is enabled for helloworld service + injectA bool + injectB bool +}{ + {"both-disabled", false, false}, + {"a-disabled", false, true}, + {"b-disabled", true, false}, + {"both-enabled", true, true}, +} + +func sendRequest(t *testing.T, clients *test.Clients, resolvableDomain bool, url *url.URL) (*spoof.Response, error) { + t.Logf("The domain of request is %s.", url.Hostname()) + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, url.Hostname(), resolvableDomain) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + return nil, err + } + return client.Do(req) +} + +func testProxyToHelloworld(t *testing.T, clients *test.Clients, helloworldURL *url.URL, inject bool, accessibleExternal bool) { + // Create envVars to be used in httpproxy app. + envVars := []corev1.EnvVar{{ + Name: targetHostEnv, + Value: helloworldURL.Hostname(), + }} + + // When resolvable domain is not set for external access test, use gateway for the endpoint as xip.io is flaky. + // ref: https://github.com/knative/serving/issues/5389 + if !test.ServingFlags.ResolvableDomain && accessibleExternal { + gatewayTarget := pkgTest.Flags.IngressEndpoint + if gatewayTarget == "" { + var err error + if gatewayTarget, err = ingress.GetIngressEndpoint(clients.KubeClient.Kube); err != nil { + t.Fatalf("Failed to get gateway IP: %v", err) + } + } + envVars = append(envVars, corev1.EnvVar{ + Name: gatewayHostEnv, + Value: gatewayTarget, + }) + } + + // Set up httpproxy app. + t.Log("Creating a Service for the httpproxy test app.") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "httpproxy", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1alph1testing.WithEnv(envVars...), + v1alph1testing.WithConfigAnnotations(map[string]string{ + autoscaling.WindowAnnotationKey: "6s", // shortest permitted; this is not required here, but for uniformity. + "sidecar.istio.io/inject": strconv.FormatBool(inject), + })) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + url := resources.Route.Status.URL.URL() + if _, err = pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "HTTPProxy", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Failed to start endpoint of httpproxy: %v", err) + } + t.Log("httpproxy is ready.") + + // Send request to httpproxy to trigger the http call from httpproxy Pod to internal service of helloworld app. + response, err := sendRequest(t, clients, test.ServingFlags.ResolvableDomain, url) + if err != nil { + t.Fatalf("Failed to send request to httpproxy: %v", err) + } + // We expect the response from httpproxy is equal to the response from helloworld + if helloworldResponse != strings.TrimSpace(string(response.Body)) { + t.Fatalf("The httpproxy response = %q, want: %q.", string(response.Body), helloworldResponse) + } + + // As a final check (since we know they are both up), check that if we can + // (or cannot) access the helloworld app externally. + response, err = sendRequest(t, clients, test.ServingFlags.ResolvableDomain, helloworldURL) + if err != nil { + if test.ServingFlags.ResolvableDomain { + // When we're testing with resolvable domains, we might fail earlier trying + // to resolve the shorter domain(s) off-cluster. + return + } + t.Fatalf("Unexpected error when sending request to helloworld: %v", err) + } + expectedStatus := http.StatusNotFound + if accessibleExternal { + expectedStatus = http.StatusOK + } + if got, want := response.StatusCode, expectedStatus; got != want { + t.Errorf("helloworld response StatusCode = %v, want %v", got, want) + } +} + +// In this test, we set up two apps: helloworld and httpproxy. +// helloworld is a simple app that displays a plaintext string. +// httpproxy is a proxy that redirects request to internal service of helloworld app +// with FQDN {route}.{namespace}.svc.cluster.local, or {route}.{namespace}.svc, or +// {route}.{namespace}. +// The expected result is that the request sent to httpproxy app is successfully redirected +// to helloworld app. +func TestServiceToServiceCall(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + t.Log("Creating a Service for the helloworld test app.") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + withInternalVisibility := v1alph1testing.WithServiceLabel( + routeconfig.VisibilityLabelKey, routeconfig.VisibilityClusterLocal) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withInternalVisibility, + v1alph1testing.WithConfigAnnotations(map[string]string{ + autoscaling.WindowAnnotationKey: "6s", // shortest permitted; this is not required here, but for uniformity. + })) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + if resources.Route.Status.URL.Host == "" { + t.Fatalf("Route is missing .Status.URL: %#v", resources.Route.Status) + } + if resources.Route.Status.Address == nil { + t.Fatalf("Route is missing .Status.Address: %#v", resources.Route.Status) + } + // Check that the target Route's Domain matches its cluster local address. + if want, got := resources.Route.Status.Address.URL, resources.Route.Status.URL; got.String() != want.String() { + t.Errorf("Route.Status.URL.Host = %v, want %v", got, want) + } + t.Logf("helloworld internal domain is %s.", resources.Route.Status.URL.Host) + + // helloworld app and its route are ready. Running the test cases now. + for _, tc := range testCases { + helloworldURL := &url.URL{ + Scheme: resources.Route.Status.URL.Scheme, + Host: strings.TrimSuffix(resources.Route.Status.URL.Host, tc.suffix), + Path: resources.Route.Status.URL.Path, + } + t.Run(tc.name, func(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + testProxyToHelloworld(t, clients, helloworldURL, true /*inject*/, false /*accessible externally*/) + }) + } +} + +func testSvcToSvcCallViaActivator(t *testing.T, clients *test.Clients, injectA bool, injectB bool) { + t.Log("Creating helloworld Service") + + testNames := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + withInternalVisibility := v1alph1testing.WithServiceLabel( + routeconfig.VisibilityLabelKey, routeconfig.VisibilityClusterLocal) + + test.CleanupOnInterrupt(func() { test.TearDown(clients, testNames) }) + defer test.TearDown(clients, testNames) + + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &testNames, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1alph1testing.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + "sidecar.istio.io/inject": strconv.FormatBool(injectB), + }), withInternalVisibility) + if err != nil { + t.Fatalf("Failed to create a service: %v", err) + } + + // Wait for the activator endpoints to equalize. + if err := waitForActivatorEndpoints(resources, clients); err != nil { + t.Fatalf("Never got Activator endpoints in the service: %v", err) + } + + // Send request to helloworld app via httpproxy service + testProxyToHelloworld(t, clients, resources.Route.Status.URL.URL(), injectA, false /*accessible externally*/) +} + +// Same test as TestServiceToServiceCall but before sending requests +// we're waiting for target app to be scaled to zero +func TestServiceToServiceCallViaActivator(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + for _, tc := range testInjection { + t.Run(tc.name, func(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + testSvcToSvcCallViaActivator(t, clients, tc.injectA, tc.injectB) + }) + } +} + +// This test is similar to TestServiceToServiceCall, but creates an external accessible helloworld service instead. +// It verifies that the helloworld service is accessible internally from both internal domain and external domain. +// But it's only accessible from external via the external domain +func TestCallToPublicService(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + t.Log("Creating a Service for the helloworld test app.") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1alph1testing.WithConfigAnnotations(map[string]string{ + autoscaling.WindowAnnotationKey: "6s", // shortest permitted; this is not required here, but for uniformity. + })) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + if resources.Route.Status.URL.Host == "" { + t.Fatalf("Route is missing .Status.URL: %#v", resources.Route.Status) + } + if resources.Route.Status.Address == nil { + t.Fatalf("Route is missing .Status.Address: %#v", resources.Route.Status) + } + + gatewayTestCases := []struct { + name string + url *url.URL + accessibleExternally bool + }{ + {"local_address", resources.Route.Status.Address.URL.URL(), false}, + {"external_address", resources.Route.Status.URL.URL(), true}, + } + + for _, tc := range gatewayTestCases { + t.Run(tc.name, func(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + testProxyToHelloworld(t, clients, tc.url, false /*inject*/, tc.accessibleExternally) + }) + } +} diff --git a/test/vendor/knative.dev/serving/test/e2e/subroutes_test.go b/test/vendor/knative.dev/serving/test/e2e/subroutes_test.go new file mode 100644 index 0000000000..e6b10d8180 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/subroutes_test.go @@ -0,0 +1,399 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "fmt" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/network" + "knative.dev/pkg/ptr" + "knative.dev/pkg/test/logstream" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + routeconfig "knative.dev/serving/pkg/reconciler/route/config" + "knative.dev/serving/pkg/reconciler/route/resources/labels" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + . "knative.dev/serving/pkg/testing/v1alpha1" +) + +// In this test, we set up two apps: helloworld and httpproxy. +// helloworld is a simple app that displays a plaintext string with private visibility. +// httpproxy is a proxy that redirects request to internal service of helloworld app +// with {tag}-{route}.{namespace}.svc.cluster.local, or {tag}-{route}.{namespace}.svc, or {tag}-{route}.{namespace}. +// The expected result is that the request sent to httpproxy app is successfully redirected +// to helloworld app when trying to communicate via local address only. +func TestSubrouteLocalSTS(t *testing.T) { // We can't use a longer more descriptive name because routes will fail DNS checks. (Max 64 characters) + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + t.Log("Creating a Service for the helloworld test app.") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + tag := "current" + + withInternalVisibility := WithServiceLabel(routeconfig.VisibilityLabelKey, routeconfig.VisibilityClusterLocal) + withTrafficSpec := WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{ + { + TrafficTarget: v1.TrafficTarget{ + Tag: tag, + Percent: ptr.Int64(100), + }, + }, + }, + }) + + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withInternalVisibility, withTrafficSpec) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + t.Logf("helloworld internal domain is %s.", resources.Route.Status.URL.Host) + + // helloworld app and its route are ready. Running the test cases now. + for _, tc := range testCases { + domain := fmt.Sprintf("%s-%s", tag, resources.Route.Status.Address.URL.Host) + helloworldURL := resources.Route.Status.Address.URL.URL() + helloworldURL.Host = strings.TrimSuffix(domain, tc.suffix) + t.Run(tc.name, func(t *testing.T) { + testProxyToHelloworld(t, clients, helloworldURL, true, false) + }) + } +} + +func TestSubrouteVisibilityPublicToPrivate(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + t.Log("Creating a Service for the helloworld test app.") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + subrouteTag1 := "my-tag" + subrouteTag2 := "my-tag2" + + withTrafficSpec := WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: subrouteTag1, + Percent: ptr.Int64(100), + }, + }}, + }) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withTrafficSpec) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %w", names.Service, err) + } + + if isClusterLocal, err := isTrafficClusterLocal(resources.Route.Status.Traffic, subrouteTag1); err != nil { + t.Fatalf(err.Error()) + } else if isClusterLocal { + t.Fatalf("Expected subroutes with tag %s to be not cluster local", subrouteTag1) + } + + if isRouteClusterLocal(resources.Route.Status) { + t.Fatalf("Expected route to be not cluster local") + } + + // Update subroute1 to private. + serviceName := serviceNameForRoute(subrouteTag1, resources.Route.Name) + svc, err := clients.KubeClient.Kube.CoreV1().Services(test.ServingNamespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get k8s service to modify: %w", err) + } + + svcCopy := svc.DeepCopy() + labels.SetVisibility(&svcCopy.ObjectMeta, true) + + svcpatchBytes, err := test.CreateBytePatch(svc, svcCopy) + if err != nil { + t.Fatalf(err.Error()) + } + + if _, err = clients.KubeClient.Kube.CoreV1().Services(test.ServingNamespace).Patch(serviceName, types.JSONPatchType, svcpatchBytes); err != nil { + t.Fatalf("Failed to patch service: %w", err) + } + + //Create subroute2 in kservice. + ksvcCopy := resources.Service.DeepCopy() + ksvcCopyRouteTraffic := append(ksvcCopy.Spec.Traffic, + v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: subrouteTag2, + LatestRevision: ptr.Bool(true), + }, + }) + + if _, err = v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{Traffic: ksvcCopyRouteTraffic}); err != nil { + t.Fatalf("Failed to patch service: %w", err) + } + + if err = v1a1test.WaitForRouteState(clients.ServingAlphaClient, resources.Route.Name, func(r *v1alpha1.Route) (bool, error) { + //Check subroute1 is not cluster-local + if isClusterLocal, err := isTrafficClusterLocal(r.Status.Traffic, subrouteTag1); err != nil { + return false, err + } else if !isClusterLocal { + return false, nil + } + //Check subroute2 is cluster local + if isClusterLocal, err := isTrafficClusterLocal(r.Status.Traffic, subrouteTag2); err != nil { + return false, nil + } else if isClusterLocal { + return false, nil + } + return true, nil + }, "Subroutes are not in correct state"); err != nil { + t.Fatalf("Expected subroute1 with tag %s to be not cluster local; subroute2 with tag %s to be cluster local: %w", subrouteTag1, subrouteTag2, err) + } + + //Update route to private. + ksvclabelCopy := resources.Service.DeepCopy() + labels.SetVisibility(&ksvclabelCopy.ObjectMeta, true) + if _, err = v1a1test.PatchService(t, clients, resources.Service, ksvclabelCopy); err != nil { + t.Fatalf("Failed to patch service: %s", err.Error()) + } + + if err = v1a1test.WaitForRouteState(clients.ServingAlphaClient, resources.Route.Name, func(r *v1alpha1.Route) (bool, error) { + return isRouteClusterLocal(r.Status), nil + }, "Route is cluster local"); err != nil { + t.Fatalf("Route did not become cluster local: %s", err.Error()) + } + + clusterLocalRoute, err := clients.ServingAlphaClient.Routes.Get(resources.Route.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf(err.Error()) + } + + for _, tag := range []string{subrouteTag1, subrouteTag2} { + if isClusterLocal, err := isTrafficClusterLocal(clusterLocalRoute.Status.Traffic, tag); err != nil { + t.Fatalf(err.Error()) + } else if !isClusterLocal { + t.Fatalf("Expected subroute with tag %s to be cluster local", tag) + } + } +} + +func TestSubrouteVisibilityPrivateToPublic(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + t.Log("Creating a Service for the helloworld test app.") + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "helloworld", + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + subrouteTag1 := "my-tag" + subrouteTag2 := "my-tag2" + + withInternalVisibility := WithServiceLabel(routeconfig.VisibilityLabelKey, routeconfig.VisibilityClusterLocal) + withTrafficSpec := WithInlineRouteSpec(v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{ + { + TrafficTarget: v1.TrafficTarget{ + Tag: subrouteTag1, + Percent: ptr.Int64(50), + }, + }, + { + TrafficTarget: v1.TrafficTarget{ + Tag: subrouteTag2, + Percent: ptr.Int64(50), + }, + }, + }, + }) + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + withTrafficSpec, withInternalVisibility) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + for _, tag := range []string{subrouteTag1, subrouteTag2} { + if isClusterLocal, err := isTrafficClusterLocal(resources.Route.Status.Traffic, tag); err != nil { + t.Fatalf(err.Error()) + } else if !isClusterLocal { + t.Fatalf("Expected subroute with tag %s to be cluster local", tag) + } + } + + if !isRouteClusterLocal(resources.Route.Status) { + t.Fatalf("Expected route to be cluster local") + } + + //Update subroute1 to private + serviceName := serviceNameForRoute(subrouteTag1, resources.Route.Name) + svc, err := clients.KubeClient.Kube.CoreV1().Services(test.ServingNamespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get k8s service to modify: %s", err.Error()) + } + + svcCopy := svc.DeepCopy() + labels.SetVisibility(&svcCopy.ObjectMeta, true) + + svcpatchBytes, err := test.CreateBytePatch(svc, svcCopy) + if err != nil { + t.Fatalf(err.Error()) + } + + if _, err = clients.KubeClient.Kube.CoreV1().Services(test.ServingNamespace).Patch(serviceName, types.JSONPatchType, svcpatchBytes); err != nil { + t.Fatalf("Failed to patch service: %s", err.Error()) + } + + afterCh := time.After(5 * time.Second) + + // check subroutes are private + if err = v1a1test.WaitForRouteState(clients.ServingAlphaClient, resources.Route.Name, func(r *v1alpha1.Route) (bool, error) { + for _, tag := range []string{subrouteTag1, subrouteTag2} { + if isClusterLocal, err := isTrafficClusterLocal(r.Status.Traffic, tag); err != nil { + return false, err + } else if !isClusterLocal { + return false, fmt.Errorf("Expected sub route with tag %s to be cluster local", tag) + } + } + select { + // consistently check for subroutes to be cluster-local for 5s + case <-afterCh: + return true, nil + default: + return false, nil + } + }, "sub routes are not ready"); err != nil { + t.Fatalf("Expected sub routes are not cluster local: %s", err.Error()) + } + + // check route is private + if !isRouteClusterLocal(resources.Route.Status) { + t.Fatalf("Expected route to be cluster local") + } + + // change route - public (Updating ksvc as it will reconcile the route) + // check route = public + ksvclabelCopy := resources.Service.DeepCopy() + labels.SetVisibility(&ksvclabelCopy.ObjectMeta, false) + if _, err = v1a1test.PatchService(t, clients, resources.Service, ksvclabelCopy); err != nil { + t.Fatalf("Failed to patch service: %s", err.Error()) + } + + if err = v1a1test.WaitForRouteState(clients.ServingAlphaClient, resources.Route.Name, func(r *v1alpha1.Route) (b bool, e error) { + return !isRouteClusterLocal(r.Status), nil + }, "Route is public"); err != nil { + t.Fatalf("Route is not public: %s", err.Error()) + } + + publicRoute, err := clients.ServingAlphaClient.Routes.Get(resources.Route.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf(err.Error()) + } + + //Check subroute 2 is public. + if isClusterLocal, err := isTrafficClusterLocal(publicRoute.Status.Traffic, subrouteTag2); err != nil { + t.Fatalf(err.Error()) + } else if isClusterLocal { + t.Fatalf("Expected subroute with tag %s to be not cluster local", subrouteTag2) + } + + //Check subroute1 is private. This check is expected to fail on v0.8.1 and earlier as subroute1 becomes public) + if isClusterLocal, err := isTrafficClusterLocal(publicRoute.Status.Traffic, subrouteTag1); err != nil { + t.Fatalf(err.Error()) + } else if !isClusterLocal { + t.Fatalf("Expected subroute with tag %s to be cluster local", subrouteTag1) + } + + //Update and check subroute 1 to private. + serviceName1 := serviceNameForRoute(subrouteTag1, resources.Route.Name) + svc1, err := clients.KubeClient.Kube.CoreV1().Services(test.ServingNamespace).Get(serviceName1, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get k8s service to modify: %w", err) + } + + svc1Copy := svc1.DeepCopy() + labels.SetVisibility(&svcCopy.ObjectMeta, true) + + svc1patchBytes, err := test.CreateBytePatch(svc1, svc1Copy) + if err != nil { + t.Fatalf(err.Error()) + } + + if _, err = clients.KubeClient.Kube.CoreV1().Services(test.ServingNamespace).Patch(serviceName1, types.JSONPatchType, svc1patchBytes); err != nil { + t.Fatalf("Failed to patch service: %w", err) + } + + if err = v1a1test.WaitForRouteState(clients.ServingAlphaClient, resources.Route.Name, v1a1test.IsRouteReady, "Route is ready"); err != nil { + t.Fatalf("Route did not become ready: %w", err) + } + + if isClusterLocal, err := isTrafficClusterLocal(publicRoute.Status.Traffic, subrouteTag1); err != nil { + t.Fatalf(err.Error()) + } else if !isClusterLocal { + t.Fatalf("Expected subroute with tag %s to be cluster local", subrouteTag1) + } +} + +// Function check whether traffic with tag is cluster local or +func isTrafficClusterLocal(tt []v1alpha1.TrafficTarget, tag string) (bool, error) { + for _, traffic := range tt { + if traffic.Tag == tag { + return strings.HasSuffix(traffic.TrafficTarget.URL.Host, network.GetClusterDomainName()), nil + } + } + return false, fmt.Errorf("Unable to find traffic target with tag %s", tag) +} + +func isRouteClusterLocal(rs v1alpha1.RouteStatus) bool { + return strings.HasSuffix(rs.URL.Host, network.GetClusterDomainName()) +} + +func serviceNameForRoute(subrouteTag, routeName string) string { + return fmt.Sprintf("%s-%s", subrouteTag, routeName) +} diff --git a/test/vendor/knative.dev/serving/test/e2e/websocket_test.go b/test/vendor/knative.dev/serving/test/e2e/websocket_test.go new file mode 100644 index 0000000000..b5ff493dbd --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e/websocket_test.go @@ -0,0 +1,359 @@ +// +build e2e + +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + "time" + + "github.com/gorilla/websocket" + "golang.org/x/sync/errgroup" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + ingress "knative.dev/pkg/test/ingress" + "knative.dev/pkg/test/logstream" + "knative.dev/serving/pkg/apis/autoscaling" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + rtesting "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +const ( + connectRetryInterval = 1 * time.Second + connectTimeout = 1 * time.Minute + wsServerTestImageName = "wsserver" +) + +// connect attempts to establish WebSocket connection with the Service. +// It will retry until reaching `connectTimeout` duration. +func connect(t *testing.T, clients *test.Clients, domain string) (*websocket.Conn, error) { + var ( + err error + address string + ) + + if test.ServingFlags.ResolvableDomain { + address = domain + } else if pkgTest.Flags.IngressEndpoint != "" { + address = pkgTest.Flags.IngressEndpoint + } else if address, err = ingress.GetIngressEndpoint(clients.KubeClient.Kube); err != nil { + return nil, err + } + + u := url.URL{Scheme: "ws", Host: address, Path: "/"} + var conn *websocket.Conn + waitErr := wait.PollImmediate(connectRetryInterval, connectTimeout, func() (bool, error) { + t.Logf("Connecting using websocket: url=%s, host=%s", u.String(), domain) + c, resp, err := websocket.DefaultDialer.Dial(u.String(), http.Header{"Host": {domain}}) + if err == nil { + t.Log("WebSocket connection established.") + conn = c + return true, nil + } + if resp == nil { + // We don't have an HTTP response, probably TCP errors. + t.Logf("Connection failed: %v", err) + return false, nil + } + body := &bytes.Buffer{} + defer resp.Body.Close() + if _, readErr := body.ReadFrom(resp.Body); readErr != nil { + t.Logf("Connection failed: %v. Failed to read HTTP response: %v", err, readErr) + return false, nil + } + t.Logf("HTTP connection failed: %v. Response=%+v. ResponseBody=%q", err, resp, body.String()) + return false, nil + }) + return conn, waitErr +} + +const message = "Hello, websocket" + +func validateWebSocketConnection(t *testing.T, clients *test.Clients, names test.ResourceNames) error { + t.Helper() + // Establish the websocket connection. + conn, err := connect(t, clients, names.URL.Hostname()) + if err != nil { + return err + } + defer conn.Close() + + // Send a message. + t.Logf("Sending message %q to server.", message) + if err = conn.WriteMessage(websocket.TextMessage, []byte(message)); err != nil { + return err + } + t.Log("Message sent.") + + // Read back the echoed message and compared with sent. + _, recv, err := conn.ReadMessage() + if err != nil { + return err + } else if strings.HasPrefix(string(recv), message) { + t.Logf("Received message %q from echo server.", recv) + return nil + } + return fmt.Errorf("expected to receive back the message: %q but received %q", message, string(recv)) +} + +// Connects to a WebSocket target and executes `numReqs` requests. +// Collects the answer frequences and returns them. +// Returns nil map and error if any of the requests fails. +func webSocketResponseFreqs(t *testing.T, clients *test.Clients, url string, numReqs int) (map[string]int, error) { + t.Helper() + var g errgroup.Group + respCh := make(chan string, numReqs) + resps := map[string]int{} + for i := 0; i < numReqs; i++ { + g.Go(func() error { + // Establish the websocket connection. Since they are persistent + // we can't reuse. + conn, err := connect(t, clients, url) + if err != nil { + return err + } + defer conn.Close() + + // Send a message. + t.Logf("Sending message %q to server.", message) + if err = conn.WriteMessage(websocket.TextMessage, []byte(message)); err != nil { + return err + } + t.Log("Message sent.") + + // Read back the echoed message and put it into the channel. + _, recv, err := conn.ReadMessage() + if err != nil { + return err + } + t.Logf("Received message %q from echo server.", string(recv)) + respCh <- string(recv) + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + close(respCh) + for r := range respCh { + resps[r]++ + } + + return resps, nil +} + +// TestWebSocket +// (1) creates a service based on the `wsserver` image, +// (2) connects to the service using websocket, +// (3) sends a message, and +// (4) verifies that we receive back the same message. +func TestWebSocket(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: wsServerTestImageName, + } + + // Clean up in both abnormal and normal exits. + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + if _, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */); err != nil { + t.Fatalf("Failed to create WebSocket server: %v", err) + } + + // Validate the websocket connection. + if err := validateWebSocketConnection(t, clients, names); err != nil { + t.Error(err) + } +} + +// and with -1 as target burst capacity and then validates that we can still serve. +func TestWebSocketViaActivator(t *testing.T) { + t.Parallel() + cancel := logstream.Start(t) + defer cancel() + + clients := Setup(t) + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: wsServerTestImageName, + } + + // Clean up in both abnormal and normal exits. + defer test.TearDown(clients, names) + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + + resources, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.TargetBurstCapacityKey: "-1", + }), + ) + if err != nil { + t.Fatalf("Failed to create WebSocket server: %v", err) + } + + // Wait for the activator endpoints to equalize. + if err := waitForActivatorEndpoints(resources, clients); err != nil { + t.Fatalf("Never got Activator endpoints in the service: %v", err) + } + if err := validateWebSocketConnection(t, clients, names); err != nil { + t.Error(err) + } +} + +func TestWebSocketBlueGreenRoute(t *testing.T) { + t.Parallel() + clients := test.Setup(t) + + names := test.ResourceNames{ + // Set Service and Image for names to create the initial service + Service: test.ObjectNameForTest(t), + Image: wsServerTestImageName, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Setup Initial Service + t.Log("Creating a new Service in runLatest") + objects, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with HTTPS */ + func(s *v1alpha1.Service) { + s.Spec.ConfigurationSpec.Template.Spec.Containers[0].Env = []corev1.EnvVar{{ + Name: "SUFFIX", + Value: "Blue", + }} + }, + ) + if err != nil { + t.Fatalf("Failed to create initial Service: %v: %v", names.Service, err) + } + + blue := names + blue.TrafficTarget = "blue" + + t.Log("Updating the Service to use a different suffix") + greenSvc := objects.Service.DeepCopy() + greenSvc.Spec.ConfigurationSpec.Template.Spec.Containers[0].Env[0].Value = "Green" + greenSvc, err = v1a1test.PatchService(t, clients, objects.Service, greenSvc) + if err != nil { + t.Fatalf("Patch update for Service %s with new env var failed: %v", names.Service, err) + } + objects.Service = greenSvc + green := names + green.TrafficTarget = "green" + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + green.Revision, err = v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the new Revision: %v", names.Service, err) + } + + t.Log("Updating RouteSpec") + if _, err := v1a1test.UpdateServiceRouteSpec(t, clients, names, v1alpha1.RouteSpec{ + Traffic: []v1alpha1.TrafficTarget{{ + TrafficTarget: v1.TrafficTarget{ + Tag: blue.TrafficTarget, + RevisionName: blue.Revision, + Percent: ptr.Int64(50), + }, + }, { + TrafficTarget: v1.TrafficTarget{ + Tag: green.TrafficTarget, + RevisionName: green.Revision, + Percent: ptr.Int64(50), + }, + }}, + }); err != nil { + t.Fatalf("Failed to update Service route spec: %v", err) + } + + t.Log("Wait for the service domains to be ready") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic: %v", names.Service, err) + } + + service, err := clients.ServingAlphaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error fetching Service %s: %v", names.Service, err) + } + + // Update the names + for _, tt := range service.Status.Traffic { + if tt.Tag == green.TrafficTarget { + green.URL = tt.URL.URL() + } + } + if green.URL == nil { + t.Fatalf("Unable to fetch Green URL from traffic targets: %#v", service.Status.Traffic) + } + // Since we just validate that Network layer can properly route requests to different targets + // We'll just use the service URL. + tealURL := service.Status.URL.URL().Hostname() + + // But since Istio network programming takes some time to take effect + // and it doesn't have a Status, we'll probe `green` until it's ready first. + if err := validateWebSocketConnection(t, clients, green); err != nil { + t.Fatalf("Error initializing WS connection: %v", err) + } + + // The actual test. + const ( + numReqs = 200 + // Quite high, but makes sure we didn't get a one-off successful response from either target. + tolerance = 25 + ) + resps, err := webSocketResponseFreqs(t, clients, tealURL, numReqs) + if err != nil { + t.Errorf("Failed to send and receive websocket messages: %v", err) + } + if len(resps) != 2 { + t.Errorf("Number of responses: %d, want: 2", len(resps)) + } + for k, f := range resps { + if got, want := abs(f-numReqs/2), tolerance; got > want { + t.Errorf("Target %s got %d responses, expect in [%d, %d] interval", k, f, numReqs/2-5, numReqs/2+tolerance) + } + } +} + +func abs(a int) int { + if a < 0 { + return -a + } + return a +} diff --git a/test/vendor/knative.dev/serving/test/e2e_flags.go b/test/vendor/knative.dev/serving/test/e2e_flags.go new file mode 100644 index 0000000000..f8f0bc640b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/e2e_flags.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains logic to encapsulate flags which are needed to specify +// what cluster, etc. to use for e2e tests. + +package test + +import ( + "flag" + + "knative.dev/serving/pkg/network" +) + +const ( + // ServingNamespace is the default namespace for serving e2e tests + ServingNamespace = "serving-tests" + + // AlternativeServingNamespace is a different namepace to run cross- + // namespace tests in. + AlternativeServingNamespace = "serving-tests-alt" + + // Environment propagation conformance test objects + + // ConformanceConfigMap is the name of the configmap to propagate env variables from + ConformanceConfigMap = "conformance-test-configmap" + // ConformanceSecret is the name of the secret to propagate env variables from + ConformanceSecret = "conformance-test-secret" + // EnvKey is the configmap/secret key which contains test value + EnvKey = "testKey" + // EnvValue is the configmap/secret test value to match env variable with + EnvValue = "testValue" +) + +// ServingFlags holds the flags or defaults for knative/serving settings in the user's environment. +var ServingFlags = initializeServingFlags() + +// ServingEnvironmentFlags holds the e2e flags needed only by the serving repo. +type ServingEnvironmentFlags struct { + ResolvableDomain bool // Resolve Route controller's `domainSuffix` + Https bool // Indicates where the test service will be created with https + IngressClass string // Indicates the class of Ingress provider to test. +} + +func initializeServingFlags() *ServingEnvironmentFlags { + var f ServingEnvironmentFlags + + // Only define and set flags here. Flag values cannot be read at package init time. + flag.BoolVar(&f.ResolvableDomain, "resolvabledomain", false, + "Set this flag to true if you have configured the `domainSuffix` on your Route controller to a domain that will resolve to your test cluster.") + flag.BoolVar(&f.Https, "https", false, + "Set this flag to true to run all tests with https.") + + flag.StringVar(&f.IngressClass, "ingressClass", network.IstioIngressClassName, + "Set this flag to the ingress class to test against.") + + return &f +} diff --git a/test/vendor/knative.dev/serving/test/example-build.yaml b/test/vendor/knative.dev/serving/test/example-build.yaml new file mode 100644 index 0000000000..a8d472c251 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/example-build.yaml @@ -0,0 +1,8 @@ +apiVersion: testing.internal.knative.dev/v1alpha1 +kind: Build +metadata: + name: example +spec: + # failure: + # reason: hello + # message: world diff --git a/test/vendor/knative.dev/serving/test/performance/Benchmarks.md b/test/vendor/knative.dev/serving/test/performance/Benchmarks.md new file mode 100644 index 0000000000..825676c579 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/Benchmarks.md @@ -0,0 +1,164 @@ +# Benchmarks + +Knative performance benchmarks are tests geared towards producing useful +performance metrics of the knative system. All the raw metrics are stored in +[mako](https://github.com/google/mako) + +## Writing new benchmarks + +For creating new benchmarks, follow the steps: + +1. Create a new directory under `./test/performance/benchmarks`. +2. Create two benchmarks in the benchmark directory using + [mako](https://github.com/google/mako/blob/master/docs/GUIDE.md#preparing-your-benchmark) + as mentioned in [benchmark configs](#benchmark-configs). +3. Create a `kodata` directory and add the [four symlinks](#Benchmark-Symlinks). +4. Write a `Go` program that runs the test and stores the result in + [mako](##Writing-to-mako) +5. (Optional)Create a + [setup.yaml](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe-setup.yaml) + that includes all the K8S and Knative objects needed to run the test. +6. Create a + [cron.yaml](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml) + that defines how to run and capture metrics as mentioned in + [benchmark cronjobs](#Benchmark-cronjobs). +7. Test and confirm the dev config works on your personal cluster. +8. Add a + [cluster.yaml](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/cluster.yaml) + that includes the configuration information for a GKE cluster, which will be + created to run this benchmark. If the file is not added, a minimum cluster + will be created. +9. Create a PR with all the changes and get it merged. +10. Once the cluster is created, the hourly job will build, push and apply all + the updates and the SUT cronjobs will start running. The metrics can be + viewed at [mako.dev](https://mako.dev/) + +## Benchmark Configs + +We will be using two +[mako benchmarks](https://github.com/google/mako/blob/master/docs/GUIDE.md#preparing-your-benchmark) +with the same config. + +1. [dev.config](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/dev.config) + This will be only used for development and testing changes and will have less + restrictions on who can write to the benchmark. +2. [prod.config](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/prod.config) + This will be used for looking at the state of the project and will be + restricted to prod-only robots(and some admins). + +## Benchmark Symlinks + +Every benchmark directory under `/test/performance/benchmarks` has a `kodata` +directory and it should have four symlinks. + +1. [dev.config](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/kodata/dev.config) + Points to the dev.config file in the bechmark directory. + + ```sh + ln -r -s ./test/performance/benchmarks/dataplane-probe/dev.config test/performance/benchmarks/dataplane-probe/continuous/kodata/ + ``` + +2. [prod.config](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/kodata/prod.config) + Points to the prod.config file in the benchmark directory. + + ```sh + ln -r -s ./test/performance/benchmarks/dataplane-probe/prod.config test/performance/benchmarks/dataplane-probe/continuous/kodata/ + ``` + +3. [HEAD](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/kodata/HEAD) + Points to `.git/HEAD`. + + ```sh + ln -r -s .git/HEAD test/performance/benchmarks/dataplane-probe/continuous/kodata/ + ``` + +4. [refs](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/kodata/HEAD) + Points to `.git/refs`. + + ```sh + ln -r -s .git/refs test/performance/benchmarks/dataplane-probe/continuous/kodata/ + ``` + +These files will be packaged with the benchmark and pushed to the test image. +The prod and dev configs define which benchmark the SUT will be writing to at +runtime. The HEAD and refs are used to get the serving commitId at which the SUT +is running. + +## Benchmark CronJobs + +Every benchmark will have one or more +[cronjob](https://github.com/knative/serving/blob/master/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml) +that defines how to run the benchmark SUT. In addition to the SUT container, we +need to add the following: + +1. [Mako sidecar](https://github.com/knative/serving/blob/d73bb8378cab8bb0c1825aa9802bea9ea2e6cb26/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml#L71-L78) + This allows mako to capture the metrics and write to its server. +2. [Mako Secrets volume](https://github.com/knative/serving/blob/d73bb8378cab8bb0c1825aa9802bea9ea2e6cb26/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml#L80-L82) + Robot ACL permissions to write to mako. +3. [Config Map](https://github.com/knative/serving/blob/d73bb8378cab8bb0c1825aa9802bea9ea2e6cb26/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml#L83-L85) + Config map that defines which config to use at runtime. + +```yaml +- name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako +``` + +## Writing to mako + +Knative uses [mako](https://github.com/google/mako) to store all the performance +metrics. To store these metrics in the test, follow these steps: + +1. Import the mako package + + ```go + import ( + "knative.dev/pkg/test/mako" + ) + ``` + +2. Create the mako client handle. By default, the mako package adds the + commitId, environment(dev/prod) and K8S version for each run of the SUT. If + you want to add any additional + [tags](https://github.com/google/mako/blob/master/docs/TAGS.md), you can + define them and pass them through + [setup](https://github.com/knative/pkg/blob/3588ed3e5c74b25740bbc535a2a43dfac998fa8a/test/mako/sidecar.go#L178). + + ```go + tag1 := "test" + tag2 := "test2" + mc, err := mako.Setup(ctx) + defer mc.ShutDownFunc(context.Background()) + ``` + +3. Store metrics in + [mako](https://github.com/google/mako/blob/master/docs/GUIDE.md) +4. Add + [analyzers](https://github.com/google/mako/blob/master/docs/GUIDE.md#add-regression-detection) + to analyze regressions(if any) +5. Visit [mako](https://mako.dev/project?name=Knative) to look at the benchmark + runs + +## Testing Existing Benchmarks + +For testing existing benchmarks, use +[dev.md](https://github.com/knative/serving/blob/master/test/performance/dev.md) + +## Admins + +We currently have two admins for benchmarking with mako. + +1. `[vagababov](https://github.com/vagababov)` +2. `[chizhg](https://github.com/chizhg)` diff --git a/test/vendor/knative.dev/serving/test/performance/README.md b/test/vendor/knative.dev/serving/test/performance/README.md new file mode 100644 index 0000000000..71b97c944f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/README.md @@ -0,0 +1,24 @@ +# Performance tests + +Knative performance tests are tests geared towards producing useful performance +metrics of the knative system. As such they can choose to take a blackbox +point-of-view of the system and use it just like an end-user might see it. They +can also go more whiteboxy to narrow down the components under test. + +## Load Generator + +Knative uses [vegeta](https://github.com/tsenart/vegeta) to generate HTTP load. +It can be configured to generate load at a predefined rate. Officially it +supports constant rate and sine rate, but if you want to generate load at a +different rate, you can write your own pacer by implementing +[Pacer](https://github.com/tsenart/vegeta/blob/ab06ddb56e2f6097bba8c5a6d168621088867949/lib/pacer.go#L13) +interface. Custom pacer implementations used in Knative tests are under +[pacers](https://github.com/knative/pkg/tree/master/test/vegeta/pacers). + +## Benchmarking + +Knative uses [mako](https://github.com/google/mako) for benchmarking. It +provides a set of tools for metrics data storage, charting, statistical +aggregation and performance regression analysis. To use it to create a benchmark +for Knative and run it continuously, please refer to +[Benchmarks.md](https://github.com/knative/serving/blob/master/test/performance/Benchmarks.md). diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/cluster.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/cluster.yaml new file mode 100644 index 0000000000..ede4de24a5 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/cluster.yaml @@ -0,0 +1,21 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration file for the cluster that runs this benchmark continuously. + +GKECluster: + location: "us-central1" + nodeCount: 10 + nodeType: "n1-standard-4" + addons: "HorizontalPodAutoscaling,HttpLoadBalancing" diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe-setup.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe-setup.yaml new file mode 100644 index 0000000000..e20954fc30 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe-setup.yaml @@ -0,0 +1,214 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: activator +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "5" + autoscaling.knative.dev/maxScale: "5" + # Always hook the activator in. + autoscaling.knative.dev/targetBurstCapacity: "-1" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: activator-with-cc +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "5" + autoscaling.knative.dev/maxScale: "5" + # Always hook the activator in. + autoscaling.knative.dev/targetBurstCapacity: "-1" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale + containerConcurrency: 100 +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: activator-with-cc-10 +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "15" + autoscaling.knative.dev/maxScale: "15" + # Always hook the activator in. + autoscaling.knative.dev/targetBurstCapacity: "-1" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale + containerConcurrency: 10 +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: activator-with-cc-1 +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "150" + autoscaling.knative.dev/maxScale: "150" + # Always hook the activator in. + autoscaling.knative.dev/targetBurstCapacity: "-1" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale + containerConcurrency: 1 +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: queue-proxy +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "5" + autoscaling.knative.dev/maxScale: "5" + # Only hook the activator in when scaled to zero. + autoscaling.knative.dev/targetBurstCapacity: "0" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: queue-proxy-with-cc +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "5" + autoscaling.knative.dev/maxScale: "5" + # Only hook the activator in when scaled to zero. + autoscaling.knative.dev/targetBurstCapacity: "0" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale + containerConcurrency: 100 +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: queue-proxy-with-cc-10 +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "15" + autoscaling.knative.dev/maxScale: "15" + # Only hook the activator in when scaled to zero. + autoscaling.knative.dev/targetBurstCapacity: "0" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale + containerConcurrency: 10 +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: queue-proxy-with-cc-1 +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/minScale: "150" + autoscaling.knative.dev/maxScale: "150" + # Only hook the activator in when scaled to zero. + autoscaling.knative.dev/targetBurstCapacity: "0" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale + containerConcurrency: 1 +--- +apiVersion: v1 +kind: Service +metadata: + name: deployment +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app: blah +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: deployment +spec: + replicas: 5 + selector: + matchLabels: + app: blah + template: + metadata: + labels: + app: blah + spec: + containers: + - name: user-container + image: knative.dev/serving/test/test_images/autoscale + ports: + - name: blah + containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: istio +spec: + externalName: istio-ingressgateway.istio-system.svc.cluster.local + sessionAffinity: None + type: ExternalName +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: dataplane-probe +spec: + gateways: + - knative-ingress-gateway.knative-serving.svc.cluster.local + hosts: + - istio.default + - istio.default.svc + - istio.default.svc.cluster.local + http: + - match: + - authority: + regex: ^istio\.default(?::\d{1,5})?$ + - authority: + regex: ^istio\.default\.svc(?::\d{1,5})?$ + - authority: + regex: ^istio\.default\.svc\.cluster\.local(?::\d{1,5})?$ + route: + - destination: + host: deployment.default.svc.cluster.local + port: + number: 80 + weight: 100 diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml new file mode 100644 index 0000000000..7248009741 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml @@ -0,0 +1,466 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prober +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prober +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.internal.knative.dev"] + resources: ["serverlessservices"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prober +subjects: + - kind: ServiceAccount + name: prober + namespace: default +roleRef: + kind: ClusterRole + name: prober + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-deployment +spec: + schedule: "0,30 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=deployment", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-istio +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "3,33 * * * *" + # Run every 30 minutes, offset from other jobs. + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=istio", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-queue +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "6,36 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=queue", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-activator +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "9,39 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=activator", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-activator-with-cc +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "12,42 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=activator-with-cc", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-queue-with-cc +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "15,45 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=queue-with-cc", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-queue-with-cc-10 +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "18,48 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=queue-with-cc-10", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-queue-with-cc-1 +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "21,51 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=queue-with-cc-1", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-activator-with-cc-10 +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "24,54 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=activator-with-cc-10", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: dataplane-probe-activator-with-cc-1 +spec: + # Run every thirty minutes, offset from other jobs. + schedule: "27,57 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: dataplane-probe + image: knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous + args: ["-target=activator-with-cc-1", "--duration=3m"] + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/HEAD b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/HEAD new file mode 120000 index 0000000000..a41d326440 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/HEAD @@ -0,0 +1 @@ +../../../../../../.git/HEAD \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/dev.config new file mode 120000 index 0000000000..ee953f22c0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/dev.config @@ -0,0 +1 @@ +../../dev.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/prod.config new file mode 120000 index 0000000000..c8b39a6de1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/prod.config @@ -0,0 +1 @@ +../../prod.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/refs b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/refs new file mode 120000 index 0000000000..37881f203f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/kodata/refs @@ -0,0 +1 @@ +../../../../../../.git/refs/ \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/main.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/main.go new file mode 100644 index 0000000000..feaf7a4f10 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/main.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "log" + "time" + + vegeta "github.com/tsenart/vegeta/lib" + + "knative.dev/pkg/signals" + "knative.dev/pkg/system" + "knative.dev/pkg/test/mako" + "knative.dev/serving/test/performance" + "knative.dev/serving/test/performance/metrics" +) + +var ( + target = flag.String("target", "", "The target to attack.") + duration = flag.Duration("duration", 5*time.Minute, "The duration of the probe") +) + +func main() { + flag.Parse() + + // We want this for properly handling Kubernetes container lifecycle events. + ctx := signals.NewContext() + + // We cron quite often, so make sure that we don't severely overrun to + // limit how noisy a neighbor we can be. + ctx, cancel := context.WithTimeout(ctx, *duration+time.Minute) + defer cancel() + + // Use the benchmark key created + mc, err := mako.Setup(ctx) + if err != nil { + log.Fatalf("Failed to setup mako: %v", err) + } + q, qclose, ctx := mc.Quickstore, mc.ShutDownFunc, mc.Context + // Use a fresh context here so that our RPC to terminate the sidecar + // isn't subject to our timeout (or we won't shut it down when we time out) + defer qclose(context.Background()) + + // Wrap fatalf in a helper or our sidecar will live forever. + fatalf := func(f string, args ...interface{}) { + qclose(context.Background()) + log.Fatalf(f, args...) + } + + // Validate flags after setting up "fatalf" or our sidecar will run forever. + if *target == "" { + fatalf("Missing flag: -target") + } + + // Based on the "target" flag, load up our target benchmark. + // We only run one variation per run to avoid the runs being noisy neighbors, + // which in early iterations of the benchmark resulted in latency bleeding + // across the different workload types. + t, ok := targets[*target] + if !ok { + fatalf("Unrecognized target: %s", *target) + } + + // Make sure the target is ready before sending the large amount of requests. + if err := performance.ProbeTargetTillReady(t.target.URL, *duration); err != nil { + fatalf("Failed to get target ready for attacking: %v", err) + } + + // Set up the threshold analyzers for the selected benchmark. This will + // cause Mako/Quickstore to analyze the results we are storing and flag + // things that are outside of expected bounds. + q.Input.ThresholdInputs = append(q.Input.ThresholdInputs, t.analyzers...) + + // Send 1000 QPS (1 per ms) for the given duration with a 30s request timeout. + rate := vegeta.Rate{Freq: 1, Per: time.Millisecond} + targeter := vegeta.NewStaticTargeter(t.target) + attacker := vegeta.NewAttacker(vegeta.Timeout(30 * time.Second)) + + // Create a new aggregateResult to accumulate the results. + ar := metrics.NewAggregateResult(int(duration.Seconds())) + + // Start the attack! + results := attacker.Attack(targeter, rate, *duration, "load-test") + deploymentStatus := metrics.FetchDeploymentStatus(ctx, system.Namespace(), "activator", time.Second) +LOOP: + for { + select { + case <-ctx.Done(): + // If we timeout or the pod gets shutdown via SIGTERM then start to + // clean thing up. + break LOOP + + case ds := <-deploymentStatus: + // Report number of ready activators. + q.AddSamplePoint(mako.XTime(ds.Time), map[string]float64{ + "ap": float64(ds.ReadyReplicas), + }) + case res, ok := <-results: + if !ok { + // Once we have read all of the request results, break out of + // our loop. + break LOOP + } + // Handle the result for this request + metrics.HandleResult(q, *res, t.stat, ar) + } + } + + // Walk over our accumulated per-second error rates and report them as + // sample points. The key is seconds since the Unix epoch, and the value + // is the number of errors observed in that second. + for ts, count := range ar.ErrorRates { + q.AddSamplePoint(mako.XTime(time.Unix(ts, 0)), map[string]float64{ + t.estat: float64(count), + }) + } + + // Commit data to Mako and handle the result. + if err := mc.StoreAndHandleResult(); err != nil { + fatalf("Failed to store and handle benchmarking result: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/sla.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/sla.go new file mode 100644 index 0000000000..c4716e8d6a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/continuous/sla.go @@ -0,0 +1,186 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "net/http" + "time" + + "github.com/golang/protobuf/proto" + tpb "github.com/google/mako/clients/proto/analyzers/threshold_analyzer_go_proto" + mpb "github.com/google/mako/spec/proto/mako_go_proto" + vegeta "github.com/tsenart/vegeta/lib" + "knative.dev/pkg/test/mako" +) + +// This function constructs an analyzer that validates the p95 aggregate value of the given metric. +func new95PercentileLatency(name, valueKey string, min, max time.Duration) *tpb.ThresholdAnalyzerInput { + return &tpb.ThresholdAnalyzerInput{ + Name: proto.String(name), + Configs: []*tpb.ThresholdConfig{{ + Min: bound(min), + Max: bound(max), + DataFilter: &mpb.DataFilter{ + DataType: mpb.DataFilter_METRIC_AGGREGATE_PERCENTILE.Enum(), + PercentileMilliRank: proto.Int32(95000), + ValueKey: proto.String(valueKey), + }, + }}, + CrossRunConfig: mako.NewCrossRunConfig(10), + } +} + +// This analyzer validates that the p95 latency talking to pods through a Kubernetes +// Service falls in the +5ms range. This does not have Knative or Istio components +// on the dataplane, and so it is intended as a canary to flag environmental +// problems that might be causing contemporaneous Knative or Istio runs to fall out of SLA. +func newKubernetes95PercentileLatency(valueKey string) *tpb.ThresholdAnalyzerInput { + return new95PercentileLatency("Kubernetes baseline", valueKey, 100*time.Millisecond, 105*time.Millisecond) +} + +// This analyzer validates that the p95 latency talking to pods through Istio +// falls in the +8ms range. This does not actually have Knative components +// on the dataplane, and so it is intended as a canary to flag environmental +// problems that might be causing contemporaneous Knative runs to fall out of SLA. +func newIstio95PercentileLatency(valueKey string) *tpb.ThresholdAnalyzerInput { + return new95PercentileLatency("Istio baseline", valueKey, 100*time.Millisecond, 108*time.Millisecond) +} + +// This analyzer validates that the p95 latency hitting a Knative Service +// going through JUST the queue-proxy falls in the +10ms range. +func newQueue95PercentileLatency(valueKey string) *tpb.ThresholdAnalyzerInput { + return new95PercentileLatency("Queue p95 latency", valueKey, 100*time.Millisecond, 110*time.Millisecond) +} + +// This analyzer validates that the p95 latency hitting a Knative Service +// going through BOTH the activator and queue-proxy falls in the +10ms range. +func newActivator95PercentileLatency(valueKey string) *tpb.ThresholdAnalyzerInput { + return new95PercentileLatency("Activator p95 latency", valueKey, 100*time.Millisecond, 110*time.Millisecond) +} + +var ( + // Map the above to our benchmark targets. + targets = map[string]struct { + target vegeta.Target + stat string + estat string + analyzers []*tpb.ThresholdAnalyzerInput + }{ + "deployment": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://deployment.default.svc.cluster.local?sleep=100", + }, + stat: "kd", + estat: "ke", + analyzers: []*tpb.ThresholdAnalyzerInput{newKubernetes95PercentileLatency("kd")}, + }, + "istio": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://istio.default.svc.cluster.local?sleep=100", + }, + stat: "id", + estat: "ie", + analyzers: []*tpb.ThresholdAnalyzerInput{newIstio95PercentileLatency("id")}, + }, + "queue": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://queue-proxy.default.svc.cluster.local?sleep=100", + }, + stat: "qp", + estat: "qe", + analyzers: []*tpb.ThresholdAnalyzerInput{newQueue95PercentileLatency("qp")}, + }, + "queue-with-cc": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://queue-proxy-with-cc.default.svc.cluster.local?sleep=100", + }, + stat: "qc", + estat: "re", + // We use the same threshold analyzer, since we want Breaker to exert minimal latency impact. + analyzers: []*tpb.ThresholdAnalyzerInput{newQueue95PercentileLatency("qc")}, + }, + "queue-with-cc-10": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://queue-proxy-with-cc-10.default.svc.cluster.local?sleep=100", + }, + stat: "qct", + estat: "ret", + // TODO(vagababov): determine values here. + analyzers: []*tpb.ThresholdAnalyzerInput{}, + }, + "queue-with-cc-1": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://queue-proxy-with-cc-1.default.svc.cluster.local?sleep=100", + }, + stat: "qc1", + estat: "re1", + // TODO(vagababov): determine values here. + analyzers: []*tpb.ThresholdAnalyzerInput{}, + }, + "activator": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://activator.default.svc.cluster.local?sleep=100", + }, + stat: "a", + estat: "ae", + analyzers: []*tpb.ThresholdAnalyzerInput{newActivator95PercentileLatency("a")}, + }, + "activator-with-cc": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://activator-with-cc.default.svc.cluster.local?sleep=100", + }, + stat: "ac", + estat: "be", + // We use the same threshold analyzer, since we want Throttler/Breaker to exert minimal latency impact. + analyzers: []*tpb.ThresholdAnalyzerInput{newActivator95PercentileLatency("ac")}, + }, + "activator-with-cc-10": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://activator-with-cc-10.default.svc.cluster.local?sleep=100", + }, + stat: "act", + estat: "bet", + // TODO(vagababov): determine values here. + analyzers: []*tpb.ThresholdAnalyzerInput{}, + }, + "activator-with-cc-1": { + target: vegeta.Target{ + Method: http.MethodGet, + URL: "http://activator-with-cc-1.default.svc.cluster.local?sleep=100", + }, + stat: "ac1", + estat: "be1", + // TODO(vagababov): determine values here. + analyzers: []*tpb.ThresholdAnalyzerInput{}, + }, + } +) + +// bound is a helper for making the inline SLOs more readable by expressing +// them as durations. +func bound(d time.Duration) *float64 { + return proto.Float64(d.Seconds()) +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/dev.config new file mode 100644 index 0000000000..234e7aa999 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/dev.config @@ -0,0 +1,116 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/dataplane-probe/continuous/dev.config +# Updating this benchmark +# mako update_benchmark \ +# test/performance/benchmarks/dataplane-probe/continuous/dev.config +project_name: "Knative" +benchmark_name: "Development - Serving dataplane probe" +description: "Measure dataplane component latency and reliability." +benchmark_key: '6316266134437888' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Anyone can add their IAM robot here to publish to this benchmark. +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" +# This is kleung's robot: +owner_list: "mako-upload@kleung-knative.iam.gserviceaccount.com" +# This is vagababov's robot: +owner_list: "mako-upload@dm-vagababov.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "kd" + label: "kube-deployment" +} +metric_info_list: { + value_key: "id" + label: "istio-deployment" +} +metric_info_list: { + value_key: "qp" + label: "queue-proxy" +} +metric_info_list: { + value_key: "a" + label: "activator" +} +metric_info_list: { + value_key: "qc" + label: "queue-proxy-with-cc-100" +} +metric_info_list: { + value_key: "qc1" + label: "queue-proxy-with-cc-1" +} +metric_info_list: { + value_key: "qct" + label: "queue-proxy-with-cc-10" +} +metric_info_list: { + value_key: "ac" + label: "activator-with-cc-100" +} +metric_info_list: { + value_key: "act" + label: "activator-with-cc-10" +} +metric_info_list: { + value_key: "ac1" + label: "activator-with-cc-1" +} + +metric_info_list: { + value_key: "ke" + label: "kube-errors" +} +metric_info_list: { + value_key: "ie" + label: "istio-errors" +} +metric_info_list: { + value_key: "qe" + label: "queue-errors" +} +metric_info_list: { + value_key: "ae" + label: "activator-errors" +} +metric_info_list: { + value_key: "re" + label: "queue-errors-with-cc-100" +} +metric_info_list: { + value_key: "ret" + label: "queue-errors-with-cc-10" +} +metric_info_list: { + value_key: "re1" + label: "queue-errors-with-cc-1" +} +metric_info_list: { + value_key: "be" + label: "activator-errors-with-cc-100" +} +metric_info_list: { + value_key: "bet" + label: "activator-errors-with-cc-10" +} +metric_info_list: { + value_key: "be1" + label: "activator-errors-with-cc-1" +} +metric_info_list: { + value_key: "ap" + label: "activator-pod-count" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/prod.config new file mode 100644 index 0000000000..839fa30c08 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/dataplane-probe/prod.config @@ -0,0 +1,112 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/dataplane-probe/continuous/prod.config +# Updating this benchmark +# mako update_benchmark \ +# test/performance/benchmarks/dataplane-probe/continuous/prod.config +project_name: "Knative" +benchmark_name: "Serving dataplane probe" +description: "Measure dataplane component latency and reliability." +benchmark_key: '5142965274017792' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Only this robot should publish data to Mako for this key! +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "kd" + label: "kube-deployment" +} +metric_info_list: { + value_key: "id" + label: "istio-deployment" +} +metric_info_list: { + value_key: "qp" + label: "queue-proxy" +} +metric_info_list: { + value_key: "a" + label: "activator" +} +metric_info_list: { + value_key: "qc" + label: "queue-proxy-with-cc-100" +} +metric_info_list: { + value_key: "qc1" + label: "queue-proxy-with-cc-1" +} +metric_info_list: { + value_key: "qct" + label: "queue-proxy-with-cc-10" +} +metric_info_list: { + value_key: "ac" + label: "activator-with-cc-100" +} +metric_info_list: { + value_key: "act" + label: "activator-with-cc-10" +} +metric_info_list: { + value_key: "ac1" + label: "activator-with-cc-1" +} + +metric_info_list: { + value_key: "ke" + label: "kube-errors" +} +metric_info_list: { + value_key: "ie" + label: "istio-errors" +} +metric_info_list: { + value_key: "qe" + label: "queue-errors" +} +metric_info_list: { + value_key: "ae" + label: "activator-errors" +} +metric_info_list: { + value_key: "re" + label: "queue-errors-with-cc-100" +} +metric_info_list: { + value_key: "ret" + label: "queue-errors-with-cc-10" +} +metric_info_list: { + value_key: "re1" + label: "queue-errors-with-cc-1" +} +metric_info_list: { + value_key: "be" + label: "activator-errors-with-cc-100" +} +metric_info_list: { + value_key: "bet" + label: "activator-errors-with-cc-10" +} +metric_info_list: { + value_key: "be1" + label: "activator-errors-with-cc-1" +} +metric_info_list: { + value_key: "ap" + label: "activator-pod-count" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/cluster.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/cluster.yaml new file mode 100644 index 0000000000..6f63aee35d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/cluster.yaml @@ -0,0 +1,21 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration file for the cluster that runs this benchmark continuously. + +GKECluster: + location: "us-west1" + nodeCount: 4 + nodeType: "n1-standard-4" + addons: "HorizontalPodAutoscaling,HttpLoadBalancing" diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/benchmark.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/benchmark.yaml new file mode 100644 index 0000000000..10d1884bec --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/benchmark.yaml @@ -0,0 +1,96 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prober +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: service-creator +rules: + - apiGroups: [""] + resources: ["pods", "nodes"] + verbs: ["create", "update", "get", "list", "watch", "delete", "deletecollection"] + - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev"] + resources: ["*"] + verbs: ["create", "update", "get", "list", "watch", "deletecollection"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: service-creator +subjects: + - kind: ServiceAccount + name: prober + namespace: default +roleRef: + kind: ClusterRole + name: service-creator + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: deployment-probe +spec: + # Run 15 minutes past the hour for 35 minutes. + schedule: "15 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: prober + containers: + - name: probe + image: knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous + args: + - "-template=basic" + - "-duration=35m" + - "-frequency=5s" + resources: + requests: + cpu: 100m + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/HEAD b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/HEAD new file mode 120000 index 0000000000..a41d326440 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/HEAD @@ -0,0 +1 @@ +../../../../../../.git/HEAD \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/basic-template.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/basic-template.yaml new file mode 100644 index 0000000000..dbe7f7742b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/basic-template.yaml @@ -0,0 +1,32 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + generateName: basic- + namespace: default +spec: + template: + spec: + containers: + - image: gcr.io/knative-samples/autoscale-go:0.1 + # Limit resources so that we can pack more on-cluster. + resources: + requests: + cpu: 10m + memory: 50Mi + limits: + cpu: 30m + memory: 50Mi diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/dev.config new file mode 120000 index 0000000000..ee953f22c0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/dev.config @@ -0,0 +1 @@ +../../dev.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/prod.config new file mode 120000 index 0000000000..c8b39a6de1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/prod.config @@ -0,0 +1 @@ +../../prod.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/refs b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/refs new file mode 120000 index 0000000000..37881f203f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/kodata/refs @@ -0,0 +1 @@ +../../../../../../.git/refs/ \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/main.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/main.go new file mode 100644 index 0000000000..4dc2074cac --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/main.go @@ -0,0 +1,295 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "io/ioutil" + "log" + "os" + "path/filepath" + "time" + + "github.com/ghodss/yaml" + "github.com/google/mako/go/quickstore" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/ptr" + "knative.dev/pkg/signals" + + "knative.dev/pkg/test/mako" + asv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + servingclient "knative.dev/serving/pkg/client/injection/client" +) + +var ( + template = flag.String("template", "", "The service template to load from kodata/") + duration = flag.Duration("duration", 25*time.Minute, "The duration of the benchmark to run.") + frequency = flag.Duration("frequency", 5*time.Second, "The frequency at which to create services.") +) + +func readTemplate() (*v1beta1.Service, error) { + path := filepath.Join(os.Getenv("KO_DATA_PATH"), *template+"-template.yaml") + b, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + svc := &v1beta1.Service{} + if err := yaml.Unmarshal([]byte(b), svc); err != nil { + return nil, err + } + + svc.OwnerReferences = []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "Pod", + Name: os.Getenv("POD_NAME"), + UID: types.UID(os.Getenv("POD_UID")), + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }} + + return svc, nil +} + +func handle(q *quickstore.Quickstore, svc kmeta.Accessor, status duckv1.Status, + seen *sets.String, metric string) { + if seen.Has(svc.GetName()) { + return + } + cc := status.GetCondition(apis.ConditionReady) + if cc == nil || cc.Status == corev1.ConditionUnknown { + return + } + seen.Insert(svc.GetName()) + created := svc.GetCreationTimestamp().Time + ready := cc.LastTransitionTime.Inner.Time + elapsed := ready.Sub(created) + + if cc.Status == corev1.ConditionTrue { + q.AddSamplePoint(mako.XTime(created), map[string]float64{ + metric: elapsed.Seconds(), + }) + log.Printf("Ready: %s", svc.GetName()) + } else if cc.Status == corev1.ConditionFalse { + q.AddError(mako.XTime(created), cc.Message) + log.Printf("Not Ready: %s; %s: %s", svc.GetName(), cc.Reason, cc.Message) + } +} + +func main() { + flag.Parse() + + // We want this for properly handling Kubernetes container lifecycle events. + ctx := signals.NewContext() + + tmpl, err := readTemplate() + if err != nil { + log.Fatalf("Unable to read template %s: %v", *template, err) + } + + // We cron every 30 minutes, so make sure that we don't severely overrun to + // limit how noisy a neighbor we can be. + ctx, cancel := context.WithTimeout(ctx, *duration) + defer cancel() + + // Tag this run with the various flag values. + tags := []string{ + "template=" + *template, + "duration=" + duration.String(), + "frequency=" + frequency.String(), + } + mc, err := mako.Setup(ctx, tags...) + if err != nil { + log.Fatalf("Failed to setup mako: %v", err) + } + q, qclose, ctx := mc.Quickstore, mc.ShutDownFunc, mc.Context + // Use a fresh context here so that our RPC to terminate the sidecar + // isn't subject to our timeout (or we won't shut it down when we time out) + defer qclose(context.Background()) + + sc := servingclient.Get(ctx) + cleanup := func() error { + return sc.ServingV1beta1().Services(tmpl.Namespace).DeleteCollection( + &metav1.DeleteOptions{}, metav1.ListOptions{}) + } + defer cleanup() + + // Wrap fatalf in a helper or our sidecar will live forever. + fatalf := func(f string, args ...interface{}) { + qclose(context.Background()) + cleanup() + log.Fatalf(f, args...) + } + + // Set up the threshold analyzers for the selected benchmark. This will + // cause Mako/Quickstore to analyze the results we are storing and flag + // things that are outside of expected bounds. + q.Input.ThresholdInputs = append(q.Input.ThresholdInputs, + newDeploy95PercentileLatency(tags...), + newReadyDeploymentCount(tags...), + ) + + if err := cleanup(); err != nil { + fatalf("Error cleaning up services: %v", err) + } + + lo := metav1.ListOptions{TimeoutSeconds: ptr.Int64(int64(duration.Seconds()))} + + // TODO(mattmoor): We could maybe use a duckv1.KResource to eliminate this boilerplate. + + serviceWI, err := sc.ServingV1beta1().Services(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch services: %v", err) + } + defer serviceWI.Stop() + serviceSeen := sets.String{} + + configurationWI, err := sc.ServingV1beta1().Configurations(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch configurations: %v", err) + } + defer configurationWI.Stop() + configurationSeen := sets.String{} + + routeWI, err := sc.ServingV1beta1().Routes(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch routes: %v", err) + } + defer routeWI.Stop() + routeSeen := sets.String{} + + revisionWI, err := sc.ServingV1beta1().Revisions(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch revisions: %v", err) + } + defer revisionWI.Stop() + revisionSeen := sets.String{} + + ingressWI, err := sc.NetworkingV1alpha1().Ingresses(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch ingresss: %v", err) + } + defer ingressWI.Stop() + ingressSeen := sets.String{} + + sksWI, err := sc.NetworkingV1alpha1().ServerlessServices(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch skss: %v", err) + } + defer sksWI.Stop() + sksSeen := sets.String{} + + paWI, err := sc.AutoscalingV1alpha1().PodAutoscalers(tmpl.Namespace).Watch(lo) + if err != nil { + fatalf("Unable to watch pas: %v", err) + } + defer paWI.Stop() + paSeen := sets.String{} + + tick := time.NewTicker(*frequency) + func() { + for { + select { + case <-ctx.Done(): + // If we timeout or the pod gets shutdown via SIGTERM then start to + // clean thing up. + return + + case ts := <-tick.C: + svc, err := sc.ServingV1beta1().Services(tmpl.Namespace).Create(tmpl) + if err != nil { + q.AddError(mako.XTime(ts), err.Error()) + log.Printf("Error creating service: %v", err) + break + } + log.Printf("Created: %s", svc.Name) + + case event := <-serviceWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + svc := event.Object.(*v1beta1.Service) + handle(q, svc, svc.Status.Status, &serviceSeen, "dl") + + case event := <-configurationWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + cfg := event.Object.(*v1beta1.Configuration) + handle(q, cfg, cfg.Status.Status, &configurationSeen, "cl") + + case event := <-routeWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + rt := event.Object.(*v1beta1.Route) + handle(q, rt, rt.Status.Status, &routeSeen, "rl") + + case event := <-revisionWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + rev := event.Object.(*v1beta1.Revision) + handle(q, rev, rev.Status.Status, &revisionSeen, "rvl") + + case event := <-ingressWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + ing := event.Object.(*netv1alpha1.Ingress) + handle(q, ing, ing.Status.Status, &ingressSeen, "il") + + case event := <-sksWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + ing := event.Object.(*netv1alpha1.ServerlessService) + handle(q, ing, ing.Status.Status, &sksSeen, "sksl") + + case event := <-paWI.ResultChan(): + if event.Type != watch.Modified { + // Skip events other than modifications + break + } + pa := event.Object.(*asv1alpha1.PodAutoscaler) + handle(q, pa, pa.Status.Status, &paSeen, "pal") + } + } + }() + + // Commit this benchmark run to Mako! + out, err := q.Store() + if err != nil { + fatalf("q.Store error: %v: %v", out, err) + } + log.Printf("Done! Run: %s\n", out.GetRunChartLink()) +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/sla.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/sla.go new file mode 100644 index 0000000000..d27f5b6b39 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/continuous/sla.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "time" + + "github.com/golang/protobuf/proto" + tpb "github.com/google/mako/clients/proto/analyzers/threshold_analyzer_go_proto" + mpb "github.com/google/mako/spec/proto/mako_go_proto" + + "knative.dev/pkg/test/mako" +) + +// This analyzer validates that the p95 latency deploying a new service takes up +// to 25 seconds. +func newDeploy95PercentileLatency(tags ...string) *tpb.ThresholdAnalyzerInput { + return &tpb.ThresholdAnalyzerInput{ + Name: proto.String("Deploy p95 latency"), + Configs: []*tpb.ThresholdConfig{{ + Min: bound(0 * time.Second), + Max: bound(25 * time.Second), + DataFilter: &mpb.DataFilter{ + DataType: mpb.DataFilter_METRIC_AGGREGATE_PERCENTILE.Enum(), + PercentileMilliRank: proto.Int32(95000), + ValueKey: proto.String("dl"), + }, + }}, + CrossRunConfig: mako.NewCrossRunConfig(10, tags...), + } +} + +// This analyzer validates that the number of services deployed to "Ready=True". +// Configured to run for 35m with a frequency of 5s, the theoretical limit is 420 +// if deployments take 0s. Factoring in deployment latency, we will miss a +// handful of the trailing deployments, so we relax this to 410. +func newReadyDeploymentCount(tags ...string) *tpb.ThresholdAnalyzerInput { + return &tpb.ThresholdAnalyzerInput{ + Name: proto.String("Ready deployment count"), + Configs: []*tpb.ThresholdConfig{{ + Min: proto.Float64(410), + Max: proto.Float64(420), + DataFilter: &mpb.DataFilter{ + DataType: mpb.DataFilter_METRIC_AGGREGATE_COUNT.Enum(), + ValueKey: proto.String("dl"), + }, + }}, + CrossRunConfig: mako.NewCrossRunConfig(10, tags...), + } +} + +// bound is a helper for making the inline SLOs more readable by expressing +// them as durations. +func bound(d time.Duration) *float64 { + return proto.Float64(d.Seconds()) +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/dev.config new file mode 100644 index 0000000000..b48889f351 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/dev.config @@ -0,0 +1,59 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/deployment-probe/continuous/dev.config +# Updating this benchmark +# mako update_benchmark \ +# test/performance/benchmarks/deployment-probe/continuous/dev.config +project_name: "Knative" +benchmark_name: "Development - Serving deployment probe" +description: "Measure deployment latency." +benchmark_key: '5915474038620160' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Anyone can add their IAM robot here to publish to this benchmark. +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" +# This is kleung's robot: +owner_list: "mako-upload@kleung-knative.iam.gserviceaccount.com" +# This is vagababov's robot: +owner_list: "mako-upload@dm-vagababov.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "dl" + label: "deployment-latency" +} +metric_info_list: { + value_key: "cl" + label: "configuration-latency" +} +metric_info_list: { + value_key: "rl" + label: "route-latency" +} +metric_info_list: { + value_key: "rvl" + label: "revision-latency" +} +metric_info_list: { + value_key: "il" + label: "ingress-latency" +} +metric_info_list: { + value_key: "sksl" + label: "sks-latency" +} +metric_info_list: { + value_key: "pal" + label: "podautoscaler-latency" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/prod.config new file mode 100644 index 0000000000..7f4bf2c474 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/deployment-probe/prod.config @@ -0,0 +1,55 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/deployment-probe/continuous/prod.config +# Updating this benchmark +# mako update_benchmark \ +# test/performance/benchmarks/deployment-probe/continuous/prod.config +project_name: "Knative" +benchmark_name: "Serving deployment probe" +description: "Measure deployment latency." +benchmark_key: '5143375149793280' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Only this robot should publish data to Mako for this key! +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "dl" + label: "deployment-latency" +} +metric_info_list: { + value_key: "cl" + label: "configuration-latency" +} +metric_info_list: { + value_key: "rl" + label: "route-latency" +} +metric_info_list: { + value_key: "rvl" + label: "revision-latency" +} +metric_info_list: { + value_key: "il" + label: "ingress-latency" +} +metric_info_list: { + value_key: "sksl" + label: "sks-latency" +} +metric_info_list: { + value_key: "pal" + label: "podautoscaler-latency" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/cluster.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/cluster.yaml new file mode 100644 index 0000000000..a26f1d3c23 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/cluster.yaml @@ -0,0 +1,21 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration file for the cluster that runs this benchmark continuously. + +GKECluster: + location: "us-central1" + nodeCount: 5 + nodeType: "n1-standard-4" + addons: "HorizontalPodAutoscaling,HttpLoadBalancing" diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/HEAD b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/HEAD new file mode 120000 index 0000000000..a41d326440 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/HEAD @@ -0,0 +1 @@ +../../../../../../.git/HEAD \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/dev.config new file mode 120000 index 0000000000..ee953f22c0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/dev.config @@ -0,0 +1 @@ +../../dev.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/prod.config new file mode 120000 index 0000000000..c8b39a6de1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/prod.config @@ -0,0 +1 @@ +../../prod.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/refs b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/refs new file mode 120000 index 0000000000..37881f203f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/kodata/refs @@ -0,0 +1 @@ +../../../../../../.git/refs/ \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test-setup.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test-setup.yaml new file mode 100644 index 0000000000..a5a657e2db --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test-setup.yaml @@ -0,0 +1,55 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: load-test-always +spec: + template: + metadata: + annotations: + # Always hook the activator in. + autoscaling.knative.dev/targetBurstCapacity: "-1" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: load-test-zero +spec: + template: + metadata: + annotations: + # Only hook the activator in at zero + autoscaling.knative.dev/targetBurstCapacity: "0" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: load-test-200 +spec: + template: + metadata: + annotations: + # Hook the activator in until we reach a higher capacity. + autoscaling.knative.dev/targetBurstCapacity: "200" + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test.yaml new file mode 100644 index 0000000000..d774f08e49 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/load-test.yaml @@ -0,0 +1,178 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: loader +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: load-testing +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.internal.knative.dev"] + resources: ["serverlessservices"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: load-testing-loader +subjects: + - kind: ServiceAccount + name: loader + namespace: default +roleRef: + kind: ClusterRole + name: load-testing + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: load-test-zero +spec: + schedule: "0,30 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: loader + containers: + - name: load-test + image: knative.dev/serving/test/performance/benchmarks/load-test/continuous + args: + - "-flavor=zero" + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako-secrets + mountPath: /var/secret + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: load-test-always +spec: + schedule: "10,40 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: loader + containers: + - name: load-test + image: knative.dev/serving/test/performance/benchmarks/load-test/continuous + args: + - "-flavor=always" + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako-secrets + mountPath: /var/secret + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: load-test-200 +spec: + schedule: "20,50 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: loader + containers: + - name: load-test + image: knative.dev/serving/test/performance/benchmarks/load-test/continuous + args: + - "-flavor=200" + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako-secrets + mountPath: /var/secret + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/main.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/main.go new file mode 100644 index 0000000000..6574fa2e72 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/main.go @@ -0,0 +1,172 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net/http" + "time" + + "github.com/google/mako/go/quickstore" + vegeta "github.com/tsenart/vegeta/lib" + "k8s.io/apimachinery/pkg/labels" + + "knative.dev/pkg/signals" + "knative.dev/pkg/test/mako" + pkgpacers "knative.dev/pkg/test/vegeta/pacers" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/test/performance" + "knative.dev/serving/test/performance/metrics" +) + +const namespace = "default" + +var ( + flavor = flag.String("flavor", "", "The flavor of the benchmark to run.") + selector labels.Selector +) + +func processResults(ctx context.Context, q *quickstore.Quickstore, results <-chan *vegeta.Result) { + // Create a new aggregateResult to accumulate the results. + ar := metrics.NewAggregateResult(0) + + // When the benchmark completes, iterate over the accumulated rates + // and add them as sample points. + defer func() { + for t, req := range ar.RequestRates { + q.AddSamplePoint(mako.XTime(time.Unix(t, 0)), map[string]float64{ + "rs": float64(req), + }) + } + for t, err := range ar.ErrorRates { + q.AddSamplePoint(mako.XTime(time.Unix(t, 0)), map[string]float64{ + "es": float64(err), + }) + } + }() + + ctx, cancel := context.WithCancel(ctx) + deploymentStatus := metrics.FetchDeploymentsStatus(ctx, namespace, selector, time.Second) + sksMode := metrics.FetchSKSMode(ctx, namespace, selector, time.Second) + defer cancel() + + for { + select { + case res, ok := <-results: + // If there are no more results, then we're done! + if !ok { + return + } + // Handle the result for this request + metrics.HandleResult(q, *res, "l", ar) + case ds := <-deploymentStatus: + // Add a sample point for the deployment status + q.AddSamplePoint(mako.XTime(ds.Time), map[string]float64{ + "dp": float64(ds.DesiredReplicas), + "ap": float64(ds.ReadyReplicas), + }) + case sksm := <-sksMode: + // Add a sample point for the serverless service mode + mode := float64(0) + if sksm.Mode == netv1alpha1.SKSOperationModeProxy { + mode = 1.0 + } + q.AddSamplePoint(mako.XTime(sksm.Time), map[string]float64{ + "sks": mode, + }) + } + } +} + +func main() { + flag.Parse() + + if *flavor == "" { + log.Fatalf("-flavor is a required flag.") + } + selector = labels.SelectorFromSet(labels.Set{ + serving.ServiceLabelKey: "load-test-" + *flavor, + }) + + // We want this for properly handling Kubernetes container lifecycle events. + ctx := signals.NewContext() + + // We cron every 10 minutes, so give ourselves 8 minutes to complete. + ctx, cancel := context.WithTimeout(ctx, 8*time.Minute) + defer cancel() + + // Use the benchmark key created. + tbcTag := "tbc=" + *flavor + mc, err := mako.Setup(ctx, tbcTag) + if err != nil { + log.Fatalf("failed to setup mako: %v", err) + } + q, qclose, ctx := mc.Quickstore, mc.ShutDownFunc, mc.Context + // Use a fresh context here so that our RPC to terminate the sidecar + // isn't subject to our timeout (or we won't shut it down when we time out) + defer qclose(context.Background()) + + // Wrap fatalf in a helper or our sidecar will live forever. + fatalf := func(f string, args ...interface{}) { + qclose(context.Background()) + log.Fatalf(f, args...) + } + + q.Input.ThresholdInputs = append(q.Input.ThresholdInputs, + newLoadTest95PercentileLatency(tbcTag), + newLoadTestMaximumLatency(tbcTag), + newLoadTestMaximumErrorRate(tbcTag)) + + log.Print("Starting the load test.") + // Ramp up load from 1k to 3k in 2 minute steps. + const duration = 2 * time.Minute + url := fmt.Sprintf("http://load-test-%s.default.svc.cluster.local?sleep=100", *flavor) + targeter := vegeta.NewStaticTargeter(vegeta.Target{ + Method: http.MethodGet, + URL: url, + }) + + // Make sure the target is ready before sending the large amount of requests. + if err := performance.ProbeTargetTillReady(url, duration); err != nil { + fatalf("Failed to get target ready for attacking: %v", err) + } + // Wait for scale back to 0 + if err := performance.WaitForScaleToZero(ctx, namespace, selector, 2*time.Minute); err != nil { + fatalf("Failed to wait for scale-to-0: %v", err) + } + + pacers := make([]vegeta.Pacer, 3) + durations := make([]time.Duration, 3) + for i := 1; i < 4; i++ { + pacers[i-1] = vegeta.Rate{Freq: i, Per: time.Millisecond} + durations[i-1] = duration + } + pacer, err := pkgpacers.NewCombined(pacers, durations) + if err != nil { + fatalf("Error creating the pacer: %v", err) + } + results := vegeta.NewAttacker().Attack(targeter, pacer, 3*duration, "load-test") + processResults(ctx, q, results) + + if err := mc.StoreAndHandleResult(); err != nil { + fatalf("Failed to store and handle benchmarking result: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/sla.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/sla.go new file mode 100644 index 0000000000..0a244cdf6c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/continuous/sla.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "time" + + "github.com/golang/protobuf/proto" + tpb "github.com/google/mako/clients/proto/analyzers/threshold_analyzer_go_proto" + mpb "github.com/google/mako/spec/proto/mako_go_proto" + "knative.dev/pkg/test/mako" +) + +// This analyzer validates that the p95 latency over the 0->3k stepped burst +// falls in the +15ms range. This includes a mix of cold-starts and steady +// state (once the autoscaling decisions have leveled off). +func newLoadTest95PercentileLatency(tags ...string) *tpb.ThresholdAnalyzerInput { + return &tpb.ThresholdAnalyzerInput{ + Name: proto.String("95p latency"), + Configs: []*tpb.ThresholdConfig{{ + Min: bound(100 * time.Millisecond), + Max: bound(115 * time.Millisecond), + DataFilter: &mpb.DataFilter{ + DataType: mpb.DataFilter_METRIC_AGGREGATE_PERCENTILE.Enum(), + PercentileMilliRank: proto.Int32(95000), + ValueKey: proto.String("l"), + }, + }}, + CrossRunConfig: mako.NewCrossRunConfig(10, tags...), + } +} + +// This analyzer validates that the maximum request latency observed over the 0->3k +// stepped burst is no more than +10 seconds. This is not strictly a cold-start +// metric, but it is a superset that includes steady state latency and the latency +// of non-cold-start overload requests. +func newLoadTestMaximumLatency(tags ...string) *tpb.ThresholdAnalyzerInput { + return &tpb.ThresholdAnalyzerInput{ + Name: proto.String("Maximum latency"), + Configs: []*tpb.ThresholdConfig{{ + Min: bound(100 * time.Millisecond), + Max: bound(100*time.Millisecond + 10*time.Second), + DataFilter: &mpb.DataFilter{ + DataType: mpb.DataFilter_METRIC_AGGREGATE_MAX.Enum(), + ValueKey: proto.String("l"), + }, + }}, + CrossRunConfig: mako.NewCrossRunConfig(10, tags...), + } +} + +// This analyzer validates that the mean error rate observed over the 0->3k +// stepped burst is 0. +func newLoadTestMaximumErrorRate(tags ...string) *tpb.ThresholdAnalyzerInput { + return &tpb.ThresholdAnalyzerInput{ + Name: proto.String("Mean error rate"), + Configs: []*tpb.ThresholdConfig{{ + Max: proto.Float64(0), + DataFilter: &mpb.DataFilter{ + DataType: mpb.DataFilter_METRIC_AGGREGATE_MEAN.Enum(), + ValueKey: proto.String("es"), + }, + }}, + CrossRunConfig: mako.NewCrossRunConfig(10, tags...), + } +} + +// bound is a helper for making the inline SLOs more readable by expressing +// them as durations. +func bound(d time.Duration) *float64 { + return proto.Float64(d.Seconds()) +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/dev.config new file mode 100644 index 0000000000..0d46f23411 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/dev.config @@ -0,0 +1,59 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/load-test/continuous/dev.config +# Updating this benchmark: +# mako update_benchmark \ +# test/performance/benchmarks/load-test/continuous/dev.config +project_name: "Knative" +benchmark_name: "Development - Serving load testing" +description: "Load test 0->1k->2k->3k against a ksvc (with several TBC values)." +benchmark_key: '6297841731371008' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Anyone can add their IAM robot here to publish to this benchmark. +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" +# This is kleung's robot: +owner_list: "mako-upload@kleung-knative.iam.gserviceaccount.com" +# This is vagababov's robot: +owner_list: "mako-upload@dm-vagababov.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "l" + label: "latency" +} + +# Used to track errors/sec and requests/sec alongside latency +metric_info_list: { + value_key: "es" + label: "errs-sec" +} +metric_info_list: { + value_key: "rs" + label: "requests-sec" +} + +# Used to track desired and actual pod counts alongside latency +metric_info_list: { + value_key: "dp" + label: "desired-pods" +} +metric_info_list: { + value_key: "ap" + label: "available-pods" +} +metric_info_list: { + value_key: "sks" + label: "sks-proxy" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/prod.config new file mode 100644 index 0000000000..91fc19f668 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/load-test/prod.config @@ -0,0 +1,55 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/load-test/continuous/prod.config +# Updating this benchmark: +# mako update_benchmark \ +# test/performance/benchmarks/load-test/continuous/prod.config +project_name: "Knative" +benchmark_name: "Serving load testing" +description: "Load test 0->1k->2k->3k against a ksvc (with several TBC values)." +benchmark_key: '5352009922248704' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Only this robot should publish data to Mako for this key! +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "l" + label: "latency" +} + +# Used to track errors/sec and requests/sec alongside latency +metric_info_list: { + value_key: "es" + label: "errs-sec" +} +metric_info_list: { + value_key: "rs" + label: "requests-sec" +} + +# Used to track desired and actual pod counts alongside latency +metric_info_list: { + value_key: "dp" + label: "desired-pods" +} +metric_info_list: { + value_key: "ap" + label: "available-pods" +} +metric_info_list: { + value_key: "sks" + label: "sks-proxy" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/cluster.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/cluster.yaml new file mode 100644 index 0000000000..60da788048 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/cluster.yaml @@ -0,0 +1,21 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration file for the cluster that runs this benchmark continuously. + +GKECluster: + location: "us-west1" + nodeCount: 3 + nodeType: "n1-standard-4" + addons: "HorizontalPodAutoscaling,HttpLoadBalancing" diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/HEAD b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/HEAD new file mode 120000 index 0000000000..a41d326440 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/HEAD @@ -0,0 +1 @@ +../../../../../../.git/HEAD \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/dev.config new file mode 120000 index 0000000000..ee953f22c0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/dev.config @@ -0,0 +1 @@ +../../dev.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/prod.config new file mode 120000 index 0000000000..c8b39a6de1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/prod.config @@ -0,0 +1 @@ +../../prod.config \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/refs b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/refs new file mode 120000 index 0000000000..37881f203f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/kodata/refs @@ -0,0 +1 @@ +../../../../../../.git/refs/ \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/main.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/main.go new file mode 100644 index 0000000000..25d21510a1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/main.go @@ -0,0 +1,284 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "strconv" + "sync" + "testing" + "time" + + v1 "k8s.io/api/apps/v1" + + "github.com/google/mako/go/quickstore" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/watch" + "knative.dev/pkg/test/mako" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/test/performance" + + "golang.org/x/sync/errgroup" + + "knative.dev/pkg/injection/sharedmain" + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/autoscaling" + ktest "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + parallelCount = flag.Int("parallel", 0, "The count of ksvcs we want to run scale-from-zero in parallel") +) + +const ( + testNamespace = "default" + serviceName = "perftest-scalefromzero" + helloWorldExpectedOutput = "Hello World!" + helloWorldImage = "helloworld" + waitToServe = 2 * time.Minute +) + +func clientsFromConfig() (*test.Clients, error) { + cfg, err := sharedmain.GetConfig("", "") + if err != nil { + return nil, fmt.Errorf("error building kubeconfig: %v", err) + } + return test.NewClientsFromConfig(cfg, testNamespace) +} + +func createServices(clients *test.Clients, count int) ([]*v1a1test.ResourceObjects, func(), error) { + testNames := make([]*test.ResourceNames, count) + + // Initialize our service names. + for i := 0; i < count; i++ { + testNames[i] = &test.ResourceNames{ + Service: test.AppendRandomString(fmt.Sprintf("%s-%02d", serviceName, i)), + // The crd.go helpers will convert to the actual image path. + Image: helloWorldImage, + } + } + + cleanupNames := func() { + for i := 0; i < count; i++ { + test.TearDown(clients, *testNames[i]) + } + } + + objs := make([]*v1a1test.ResourceObjects, count) + begin := time.Now() + sos := []ktest.ServiceOption{ + // We set a small resource alloc so that we can pack more pods into the cluster. + ktest.WithResourceRequirements(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20Mi"), + }, + }), + ktest.WithConfigAnnotations(map[string]string{ + autoscaling.WindowAnnotationKey: "7s", + }), + } + g := errgroup.Group{} + for i := 0; i < count; i++ { + ndx := i + g.Go(func() error { + var err error + if objs[ndx], _, err = v1a1test.CreateRunLatestServiceReady(&testing.T{}, clients, testNames[ndx], false, sos...); err != nil { + return fmt.Errorf("%02d: failed to create Ready service: %v", ndx, err) + } + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, nil, err + } + log.Printf("Created all the services in %v", time.Since(begin)) + return objs, cleanupNames, nil +} + +func waitForScaleToZero(ctx context.Context, objs []*v1a1test.ResourceObjects) error { + g := errgroup.Group{} + for i := 0; i < len(objs); i++ { + idx := i + ro := objs[i] + g.Go(func() error { + log.Printf("%02d: waiting for deployment to scale to zero", idx) + selector := labels.SelectorFromSet(labels.Set{ + serving.ServiceLabelKey: ro.Service.Name, + }) + + if err := performance.WaitForScaleToZero(ctx, testNamespace, selector, 2*time.Minute); err != nil { + m := fmt.Sprintf("%02d: failed waiting for deployment to scale to zero: %v", idx, err) + log.Println(m) + return errors.New(m) + } + return nil + }) + } + return g.Wait() +} + +func parallelScaleFromZero(ctx context.Context, clients *test.Clients, objs []*v1a1test.ResourceObjects, q *quickstore.Quickstore) { + count := len(objs) + // Get the key for saving latency and error metrics in the benchmark. + lk := "l" + strconv.Itoa(count) + dlk := "dl" + strconv.Itoa(count) + ek := "e" + strconv.Itoa(count) + var wg sync.WaitGroup + wg.Add(count) + for i := 0; i < count; i++ { + ndx := i + go func() { + defer wg.Done() + sdur, ddur, err := runScaleFromZero(ctx, clients, ndx, objs[ndx]) + if err == nil { + q.AddSamplePoint(mako.XTime(time.Now()), map[string]float64{ + lk: sdur.Seconds(), + }) + q.AddSamplePoint(mako.XTime(time.Now()), map[string]float64{ + dlk: ddur.Seconds(), + }) + } else { + // Add 1 to the error metric whenever there is an error. + q.AddSamplePoint(mako.XTime(time.Now()), map[string]float64{ + ek: 1, + }) + // By reporting errors like this, the error strings show up on + // the details page for each Mako run. + q.AddError(mako.XTime(time.Now()), err.Error()) + } + }() + } + wg.Wait() +} + +func runScaleFromZero(ctx context.Context, clients *test.Clients, idx int, ro *v1a1test.ResourceObjects) ( + time.Duration, time.Duration, error) { + selector := labels.SelectorFromSet(labels.Set{ + serving.ServiceLabelKey: ro.Service.Name, + }) + + watcher, err := clients.KubeClient.Kube.AppsV1().Deployments(testNamespace).Watch( + metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + m := fmt.Sprintf("%02d: unable to watch the deployment for the service: %v", idx, err) + log.Println(m) + return 0, 0, errors.New(m) + } + defer watcher.Stop() + + ddch := watcher.ResultChan() + sdch := make(chan struct{}) + errch := make(chan error) + + go func() { + log.Printf("%02d: waiting for endpoint to serve request", idx) + url := ro.Route.Status.URL.URL() + _, err := pkgTest.WaitForEndpointStateWithTimeout( + clients.KubeClient, + log.Printf, + url, + pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.MatchesBody(helloWorldExpectedOutput)), + "HelloWorldServesText", + test.ServingFlags.ResolvableDomain, waitToServe, + ) + if err != nil { + m := fmt.Sprintf("%02d: the endpoint for Route %q at %q didn't serve the expected text %q: %v", idx, ro.Route.Name, url, helloWorldExpectedOutput, err) + log.Println(m) + errch <- errors.New(m) + return + } + + sdch <- struct{}{} + }() + + start := time.Now() + // Get the duration that takes to change deployment spec. + var dd time.Duration + for { + select { + case event := <-ddch: + if event.Type == watch.Modified { + dm := event.Object.(*v1.Deployment) + if *dm.Spec.Replicas != 0 && dd == 0 { + dd = time.Since(start) + } + } + case <-sdch: + return time.Since(start), dd, nil + case err := <-errch: + return 0, 0, err + } + } +} + +func testScaleFromZero(clients *test.Clients, count int) { + parallelTag := fmt.Sprintf("parallel=%d", count) + mc, err := mako.Setup(context.Background(), parallelTag) + if err != nil { + log.Fatalf("failed to setup mako: %v", err) + } + q, qclose, ctx := mc.Quickstore, mc.ShutDownFunc, mc.Context + defer qclose(ctx) + + // Create the services once. + objs, cleanup, err := createServices(clients, count) + // Wrap fatalf in a helper or our sidecar will live forever, also wrap cleanup. + fatalf := func(f string, args ...interface{}) { + cleanup() + qclose(ctx) + log.Fatalf(f, args...) + } + if err != nil { + fatalf("Failed to create services: %v", err) + } + defer cleanup() + + // Wait all services scaling to zero. + if err := waitForScaleToZero(ctx, objs); err != nil { + fatalf("Failed to wait for all services to scale to zero: %v", err) + } + + parallelScaleFromZero(ctx, clients, objs, q) + if err := mc.StoreAndHandleResult(); err != nil { + fatalf("Failed to store and handle benchmarking result: %v", err) + } +} + +func main() { + flag.Parse() + clients, err := clientsFromConfig() + if err != nil { + log.Fatalf("Failed to setup clients: %v", err) + } + + testScaleFromZero(clients, *parallelCount) +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/scale-from-zero.yaml b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/scale-from-zero.yaml new file mode 100644 index 0000000000..d75f6c7cf5 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/scale-from-zero.yaml @@ -0,0 +1,172 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scale-from-zero +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: scale-from-zero +subjects: + - kind: ServiceAccount + name: scale-from-zero + namespace: default +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: scale-from-zero-1 +spec: + schedule: "0,20,40 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: scale-from-zero + containers: + - name: scale-from-zero + image: knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous + args: + - "-parallel=1" + env: + - name: KO_DOCKER_REPO + value: gcr.io/knative-performance + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako-secrets + mountPath: /var/secret + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: scale-from-zero-5 +spec: + schedule: "6,26,46 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: scale-from-zero + containers: + - name: scale-from-zero + image: knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous + args: + - "-parallel=5" + env: + - name: KO_DOCKER_REPO + value: gcr.io/knative-performance + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako-secrets + mountPath: /var/secret + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: scale-from-zero-25 +spec: + schedule: "12,32,52 * * * *" + jobTemplate: + spec: + parallelism: 1 + template: + spec: + serviceAccountName: scale-from-zero + containers: + - name: scale-from-zero + image: knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous + args: + - "-parallel=25" + env: + - name: KO_DOCKER_REPO + value: gcr.io/knative-performance + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: config-mako + mountPath: /etc/config-mako + - name: mako-secrets + mountPath: /var/secret + - name: mako + image: gcr.io/knative-performance/mako-microservice:latest + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/secret/robot.json + volumeMounts: + - name: mako-secrets + mountPath: /var/secret + volumes: + - name: mako-secrets + secret: + secretName: mako-secrets + - name: config-mako + configMap: + name: config-mako + restartPolicy: Never + backoffLimit: 0 diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/sla.go b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/sla.go new file mode 100644 index 0000000000..99f161fd42 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/continuous/sla.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// TODO: Determine the SLAs. diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/dev.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/dev.config new file mode 100644 index 0000000000..24817eda65 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/dev.config @@ -0,0 +1,69 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/scale-from-zero/continuous/dev.config +# Updating this benchmark: +# mako update_benchmark \ +# test/performance/benchmarks/scale-from-zero/continuous/dev.config +project_name: "Knative" +benchmark_name: "Development - Serving scale from zero" +description: "Scale from zero test against ksvcs in parallel." +benchmark_key: '5024954898710528' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Anyone can add their IAM robot here to publish to this benchmark. +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" +# This is kleung's robot: +owner_list: "mako-upload@kleung-knative.iam.gserviceaccount.com" +# This is vagababov's robot: +owner_list: "mako-upload@dm-vagababov.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "l1" + label: "latency1" +} +metric_info_list: { + value_key: "dl1" + label: "deployment-latency1" +} +metric_info_list: { + value_key: "e1" + label: "error1" +} + +metric_info_list: { + value_key: "l5" + label: "latency5" +} +metric_info_list: { + value_key: "dl5" + label: "deployment-latency5" +} +metric_info_list: { + value_key: "e5" + label: "error5" +} + +metric_info_list: { + value_key: "l25" + label: "latency25" +} +metric_info_list: { + value_key: "dl25" + label: "deployment-latency25" +} +metric_info_list: { + value_key: "e25" + label: "error25" +} diff --git a/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/prod.config b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/prod.config new file mode 100644 index 0000000000..4b6ded00d6 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/benchmarks/scale-from-zero/prod.config @@ -0,0 +1,65 @@ +# Creating this benchmark: +# mako create_benchmark \ +# test/performance/benchmarks/scale-from-zero/continuous/prod.config +# Updating this benchmark: +# mako update_benchmark \ +# test/performance/benchmarks/scale-from-zero/continuous/prod.config +project_name: "Knative" +benchmark_name: "Serving scale from zero" +description: "Scale from zero test against ksvcs in parallel." +benchmark_key: '5607420646653952' + +# Human owners for manual benchmark adjustments. +owner_list: "vagababov@google.com" +owner_list: "chizhg@google.com" +owner_list: "yanweiguo@google.com" + +# Only this robot should publish data to Mako for this key! +owner_list: "mako-job@knative-performance.iam.gserviceaccount.com" + +# Define the name and type for x-axis of run charts +input_value_info: { + value_key: "t" + label: "time" + type: TIMESTAMP +} + +# Note: value_key is stored repeatedly and should be very short (ideally one or two characters). +metric_info_list: { + value_key: "l1" + label: "latency1" +} +metric_info_list: { + value_key: "dl1" + label: "deployment-latency1" +} +metric_info_list: { + value_key: "e1" + label: "error1" +} + +metric_info_list: { + value_key: "l5" + label: "latency5" +} +metric_info_list: { + value_key: "dl5" + label: "deployment-latency5" +} +metric_info_list: { + value_key: "e5" + label: "error5" +} + +metric_info_list: { + value_key: "l25" + label: "latency25" +} +metric_info_list: { + value_key: "dl25" + label: "deployment-latency25" +} +metric_info_list: { + value_key: "e25" + label: "error25" +} diff --git a/test/vendor/knative.dev/serving/test/performance/config/README.md b/test/vendor/knative.dev/serving/test/performance/config/README.md new file mode 100644 index 0000000000..9cd861a01a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/config/README.md @@ -0,0 +1,65 @@ +### Vegeta-based load generator + +This directory contains a simple `vegeta`-based load generator, which can be run +with: + +```shell +ko apply -f test/performance/config +``` + +By default, it is configured to load test the +[autoscale-go](https://github.com/knative/docs/tree/master/docs/serving/samples/autoscale-go) +sample, which must already be deployed. You can change the target by altering +the `ConfigMap` to point to a different endpoint. + +### Examining output + +Once the load generation pods terminate, their outputs can be examined with: + +```shell +for x in $(kubectl get pods -l app=load-test -oname); do + kubectl logs $x | python -mjson.tool +done +``` + +This will produce a series of pretty-printed JSON blocks like: + +```json +{ + "bytes_in": { + "mean": 38.15242083333333, + "total": 9156581 + }, + "bytes_out": { + "mean": 0, + "total": 0 + }, + "duration": 240001544213, + "earliest": "2019-06-29T22:49:57.272758595Z", + "end": "2019-06-29T22:53:57.399043387Z", + "errors": [ + "503 Service Unavailable", + "502 Bad Gateway", + "Get http://autoscale-go.default.svc.cluster.local?sleep=100: net/http: request canceled (Client.Timeout exceeded while awaiting headers)" + ], + "latencies": { + "50th": 102296894, + "95th": 29927947157, + "99th": 30000272067, + "max": 30186427377, + "mean": 2483484840, + "total": 596036361667202 + }, + "latest": "2019-06-29T22:53:57.274302808Z", + "rate": 999.9935658205657, + "requests": 240000, + "status_codes": { + "0": 12302, + "200": 185803, + "502": 7, + "503": 41888 + }, + "success": 0.7741791666666666, + "wait": 124740579 +} +``` diff --git a/test/vendor/knative.dev/serving/test/performance/config/config-mako.yaml b/test/vendor/knative.dev/serving/test/performance/config/config-mako.yaml new file mode 100644 index 0000000000..91b25069c6 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/config/config-mako.yaml @@ -0,0 +1,47 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-mako + +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + + # The Mako environment in which we are running. + # Only our performance automation should run in "prod", but + # there should be a "dev" environment with a fairly broad + # write ACL. Users can also develop against custom configurations + # by adding `foo.config` under their benchmark's kodata directory. + environment: dev + + # Additional tags to tag the runs. These tags are added + # to the list that the binary itself publishes (Kubernetes version, etc). + # It is a comma separated list of tags. + additionalTags: "key=value,absolute" diff --git a/test/vendor/knative.dev/serving/test/performance/config/job.yaml b/test/vendor/knative.dev/serving/test/performance/config/job.yaml new file mode 100644 index 0000000000..e81c3a877e --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/config/job.yaml @@ -0,0 +1,52 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: vegeta-payload +data: + payload: | + GET http://autoscale-go.default.svc.cluster.local?sleep=100 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: load-test + namespace: default +spec: + parallelism: 1 + template: + metadata: + labels: + app: load-test + spec: + containers: + - name: vegeta + image: knative.dev/serving/vendor/github.com/tsenart/vegeta + command: ["/bin/bash", "-c"] + args: + - "/ko-app/vegeta -cpus=1 attack -duration=4m -rate=1000/1s -targets=/var/vegeta/payload | /ko-app/vegeta report -type=json" + resources: + requests: + cpu: 1000m + memory: 3Gi + volumeMounts: + - name: vegeta-payload + mountPath: /var/vegeta + volumes: + - name: vegeta-payload + configMap: + name: vegeta-payload + restartPolicy: Never diff --git a/test/vendor/knative.dev/serving/test/performance/dev.md b/test/vendor/knative.dev/serving/test/performance/dev.md new file mode 100644 index 0000000000..1461596240 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/dev.md @@ -0,0 +1,85 @@ +# How to start with Mako + +This document describes how to start running writing benchmarks with mako on +GKE. + +## Preconditions + +- Assume cluster exists with istio lean and serving installed. +- ko is installed +- gcloud is installed + +## Steps + +Take `dataplane-probe` benchmark for example: + +1. Apply + [mako config](https://github.com/knative/serving/blob/master/test/performance/config/config-mako.yaml) + + ```shell + kubectl apply -f test/performance/config/config-mako.yaml + ``` + +1. Create an IAM service account: + + ```shell + gcloud iam service-accounts create mako-upload + ``` + +1. Add the IAM service account + [here](https://github.com/knative/serving/blob/d73bb8378cab8bb0c1825aa9802bea9ea2e6cb26/test/performance/benchmarks/dataplane-probe/continuous/dev.config#L20) + (A current owner must apply this before things will work and the SA must be + whitelisted) then run: + + ```shell + mako update_benchmark test/performance/benchmarks/dataplane-probe/dev.config + ``` + +1. Create a JSON key for it. + + ```shell + gcloud iam service-accounts keys create robot.json \ + --iam-account=mako-upload@${PROJECT_ID}.iam.gserviceaccount.com + ``` + +1. Create a secret with it: + + ```shell + kubectl create secret generic mako-secrets --from-file=./robot.json + ``` + +1. Patch istio: + + ```shell + kubectl patch hpa -n istio-system istio-ingressgateway \ + --patch '{"spec": {"minReplicas": 10, "maxReplicas": 10}}' + kubectl patch deploy -n istio-system cluster-local-gateway \ + --patch '{"spec": {"replicas": 10}}' + ``` + +1. Patch knative: + + ```shell + kubectl patch hpa -n knative-serving activator --patch '{"spec": {"minReplicas": 10}}' + ``` + +1. Apply `setup` for benchmark: + + ```shell + ko apply -f test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe-setup.yaml + ``` + +1. Wait for above to stabilize + +1. Attach your desired tags to the runs by editing the config map, see the + `_example` stanza for how. + + ```shell + kubectl edit cm config-mako + ``` + +1. Apply the benchmark cron: + + ```gcloud + ko apply -f test/performance/benchmarks/dataplane-probe/continuous/dataplane-probe.yaml + ``` diff --git a/test/vendor/knative.dev/serving/test/performance/latency_test.go b/test/vendor/knative.dev/serving/test/performance/latency_test.go new file mode 100644 index 0000000000..fbe88c5e63 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/latency_test.go @@ -0,0 +1,127 @@ +// +build performance + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// latency_test.go brings up a helloworld app and gets the latency metric + +package performance + +import ( + "fmt" + "net/http" + "net/url" + "testing" + "time" + + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + "knative.dev/test-infra/shared/junit" + perf "knative.dev/test-infra/shared/performance" + "knative.dev/test-infra/shared/testgrid" + + vegeta "github.com/tsenart/vegeta/lib" +) + +const ( + sleepTime = 1 * time.Minute + // sleepReqTimeout should be > sleepTime. Else, the request will time out before receiving the response + sleepReqTimeout = 2 * time.Minute + hwReqtimeout = 30 * time.Second + baseQPS = 10 +) + +func timeToServe(t *testing.T, img, query string, reqTimeout time.Duration) { + t.Helper() + tName := t.Name() + perfClients, err := Setup(t) + if err != nil { + t.Fatalf("Cannot initialize performance client: %v", err) + } + + clients := perfClients.E2EClients + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: img, + } + + defer TearDown(perfClients, names, t.Logf) + test.CleanupOnInterrupt(func() { TearDown(perfClients, names, t.Logf) }) + + t.Log("Creating a new Service") + objs, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false /* https TODO(taragu) turn this on after helloworld test running with https */) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + + routeURL := objs.Route.Status.URL.URL() + if _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + routeURL, + v1a1test.RetryingRouteInconsistency(pkgTest.IsStatusOK), + "WaitForSuccessfulResponse", + test.ServingFlags.ResolvableDomain); err != nil { + t.Fatalf("Error probing %s: %v", routeURL, err) + } + + endpoint, err := spoof.ResolveEndpoint(clients.KubeClient.Kube, routeURL.Hostname(), test.ServingFlags.ResolvableDomain, + pkgTest.Flags.IngressEndpoint) + if err != nil { + t.Fatalf("Cannot resolve service endpoint: %v", err) + } + + u, _ := url.Parse(routeURL.String()) + u.Host = endpoint + pacer := vegeta.ConstantPacer{Freq: baseQPS, Per: time.Second} + targeter := vegeta.NewStaticTargeter(vegeta.Target{ + Method: http.MethodGet, + Header: resolvedHeaders(routeURL.Hostname(), test.ServingFlags.ResolvableDomain), + URL: u.String() + "?" + query, + }) + attacker := vegeta.NewAttacker() + + var metrics vegeta.Metrics + for res := range attacker.Attack(targeter, pacer, duration, tName) { + metrics.Add(res) + } + metrics.Close() + + var tc []junit.TestCase + // Add latency metrics + tc = append(tc, perf.CreatePerfTestCase(float32(metrics.Latencies.P50.Seconds()*1000), "p50(ms)", tName)) + tc = append(tc, perf.CreatePerfTestCase(float32(metrics.Latencies.Quantile(0.90).Seconds()*1000), "p90(ms)", tName)) + tc = append(tc, perf.CreatePerfTestCase(float32(metrics.Latencies.P99.Seconds()*1000), "p99(ms)", tName)) + + if err = testgrid.CreateXMLOutput(tc, tName); err != nil { + t.Fatalf("Cannot create output xml: %v", err) + } +} + +// Performs perf test on the hello world app +func TestTimeToServeLatency(t *testing.T) { + timeToServe(t, "helloworld", "", hwReqtimeout) +} + +// Performs perf testing on a long running app. +// It uses the timeout app that sleeps for the specified amount of time. +func TestTimeToServeLatencyLongRunning(t *testing.T) { + q := fmt.Sprintf("timeout=%d", sleepTime.Milliseconds()) + timeToServe(t, "timeout", q, sleepReqTimeout) +} diff --git a/test/vendor/knative.dev/serving/test/performance/metrics/request.go b/test/vendor/knative.dev/serving/test/performance/metrics/request.go new file mode 100644 index 0000000000..ef432cbeb9 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/metrics/request.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/google/mako/go/quickstore" + vegeta "github.com/tsenart/vegeta/lib" + + "knative.dev/pkg/test/mako" +) + +// AggregateResult is the aggregated result of requests for better visualization. +type AggregateResult struct { + // ErrorRates is a map that saves the number of errors for each timestamp (in secs) + ErrorRates map[int64]int64 + // RequestRates is a map that saves the number of requests for each timestamp (in secs) + RequestRates map[int64]int64 +} + +// NewAggregateResult returns the pointer of a new AggregateResult object. +func NewAggregateResult(initialSize int) *AggregateResult { + return &AggregateResult{ + ErrorRates: make(map[int64]int64, initialSize), + RequestRates: make(map[int64]int64, initialSize), + } +} + +// HandleResult will handle the attack result by: +// 1. Adding its latency as a sample point if no error, or adding it as an error if there is +// 2. Updating the aggregate results +func HandleResult(q *quickstore.Quickstore, res vegeta.Result, latencyKey string, ar *AggregateResult) { + // Handle the result by reporting an error or a latency sample point. + var isAnError int64 + if res.Error != "" { + // By reporting errors like this the error strings show up on + // the details page for each Mako run. + q.AddError(mako.XTime(res.Timestamp), res.Error) + isAnError = 1 + } else { + // Add a sample points for the target benchmark's latency stat + // with the latency of the request this result is for. + q.AddSamplePoint(mako.XTime(res.Timestamp), map[string]float64{ + latencyKey: res.Latency.Seconds(), + }) + isAnError = 0 + } + + // Update our error and request rates. + // We handle errors this way to force zero values into every time for + // which we have data, even if there is no error. + ar.ErrorRates[res.Timestamp.Unix()] += isAnError + ar.RequestRates[res.Timestamp.Unix()]++ +} diff --git a/test/vendor/knative.dev/serving/test/performance/metrics/runtime.go b/test/vendor/knative.dev/serving/test/performance/metrics/runtime.go new file mode 100644 index 0000000000..774f8d369c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/metrics/runtime.go @@ -0,0 +1,144 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "log" + "time" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/labels" + + deploymentinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" + netv1alpha1 "knative.dev/serving/pkg/apis/networking/v1alpha1" + sksinformer "knative.dev/serving/pkg/client/injection/informers/networking/v1alpha1/serverlessservice" + + // Mysteriously required to support GCP auth (required by k8s libs). + // Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +// DeploymentStatus is a struct that wraps the status of a deployment. +type DeploymentStatus struct { + DesiredReplicas int32 + ReadyReplicas int32 + // Time is the time when the status is fetched + Time time.Time +} + +// FetchDeploymentsStatus creates a channel that can return the up-to-date DeploymentStatus periodically, +// selected via a label selector (can be more than one deployment). +func FetchDeploymentsStatus( + ctx context.Context, namespace string, selector labels.Selector, + duration time.Duration, +) <-chan DeploymentStatus { + dl := deploymentinformer.Get(ctx).Lister() + return fetchStatusInternal(ctx, duration, func() ([]*appsv1.Deployment, error) { + return dl.Deployments(namespace).List(selector) + }) +} + +// FetchDeploymentStatus creates a channel that can return the up-to-date DeploymentStatus periodically, +// selected via deployment name (at most one deployment). +func FetchDeploymentStatus( + ctx context.Context, namespace, name string, duration time.Duration, +) <-chan DeploymentStatus { + dl := deploymentinformer.Get(ctx).Lister() + return fetchStatusInternal(ctx, duration, func() ([]*appsv1.Deployment, error) { + d, err := dl.Deployments(namespace).Get(name) + if err != nil { + return []*appsv1.Deployment{}, err + } + return []*appsv1.Deployment{d}, nil + }) +} + +func fetchStatusInternal(ctx context.Context, duration time.Duration, + f func() ([]*appsv1.Deployment, error)) <-chan DeploymentStatus { + ch := make(chan DeploymentStatus) + startTick(duration, ctx.Done(), func(t time.Time) error { + // Overlay the desired and ready pod counts. + deployments, err := f() + if err != nil { + log.Printf("Error getting deployment(s): %v", err) + return err + } + + for _, d := range deployments { + ds := DeploymentStatus{ + DesiredReplicas: *d.Spec.Replicas, + ReadyReplicas: d.Status.ReadyReplicas, + Time: t, + } + ch <- ds + } + return nil + }) + return ch +} + +// ServerlessServiceStatus is a struct that wraps the status of a serverless service. +type ServerlessServiceStatus struct { + Mode netv1alpha1.ServerlessServiceOperationMode + // Time is the time when the status is fetched + Time time.Time +} + +// FetchSKSMode creates a channel that can return the up-to-date ServerlessServiceOperationMode periodically. +func FetchSKSMode( + ctx context.Context, namespace string, selector labels.Selector, + duration time.Duration, +) <-chan ServerlessServiceStatus { + sksl := sksinformer.Get(ctx).Lister() + ch := make(chan ServerlessServiceStatus) + startTick(duration, ctx.Done(), func(t time.Time) error { + // Overlay the SKS "mode". + skses, err := sksl.ServerlessServices(namespace).List(selector) + if err != nil { + log.Printf("Error listing serverless services: %v", err) + return err + } + for _, sks := range skses { + skss := ServerlessServiceStatus{ + Mode: sks.Spec.Mode, + Time: t, + } + ch <- skss + } + return nil + }) + + return ch +} + +func startTick(duration time.Duration, stop <-chan struct{}, action func(t time.Time) error) { + ticker := time.NewTicker(duration) + go func() { + defer ticker.Stop() + for { + select { + case t := <-ticker.C: + if err := action(t); err != nil { + return + } + case <-stop: + return + } + } + }() +} diff --git a/test/vendor/knative.dev/serving/test/performance/observed_concurency_test.go b/test/vendor/knative.dev/serving/test/performance/observed_concurency_test.go new file mode 100644 index 0000000000..62f919ba36 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/observed_concurency_test.go @@ -0,0 +1,226 @@ +// +build performance + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package performance + +import ( + "fmt" + "net/http" + "sort" + "strconv" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "golang.org/x/sync/errgroup" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/spoof" + v1a1opts "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" + "knative.dev/test-infra/shared/junit" + perf "knative.dev/test-infra/shared/performance" + "knative.dev/test-infra/shared/testgrid" +) + +// generateTraffic loads the given endpoint with the given concurrency for the given duration. +// All responses are forwarded to a channel, if given. +func generateTraffic(t *testing.T, client *spoof.SpoofingClient, url string, concurrency int, duration time.Duration, resChannel chan *spoof.Response) error { + var group errgroup.Group + // Notify the consumer about the end of the data stream. + defer close(resChannel) + + for i := 0; i < concurrency; i++ { + group.Go(func() error { + done := time.After(duration) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("error creating http request: %w", err) + } + for { + select { + case <-done: + return nil + default: + res, err := client.Do(req) + if err != nil { + t.Logf("Error sending request: %v", err) + } + resChannel <- res + } + } + }) + } + + if err := group.Wait(); err != nil { + return fmt.Errorf("error making requests for scale up: %w", err) + } + return nil +} + +// event represents the start or end of a request +type event struct { + concurrencyModifier int + timestamp time.Time +} + +// parseResponse parses a string of the form TimeInNano,TimeInNano into the respective +// start and end event +func parseResponse(body string) (*event, *event, error) { + body = strings.TrimSpace(body) + parts := strings.Split(body, ",") + + if len(parts) < 2 { + return nil, nil, fmt.Errorf("not enough parts in body, got %q", body) + } + + start, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse start timestamp, body %q: %w", body, err) + } + + end, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse end timestamp, body %q: %w", body, err) + } + + startEvent := &event{1, time.Unix(0, int64(start))} + endEvent := &event{-1, time.Unix(0, int64(end))} + + return startEvent, endEvent, nil +} + +// timeToScale calculates the time it took to scale to a given scale, starting from a given +// time. Returns an error if that scale was never reached. +func timeToScale(events []*event, desiredScale int) (time.Duration, error) { + var currentConcurrency int + start := events[0].timestamp + for _, event := range events { + currentConcurrency += event.concurrencyModifier + if currentConcurrency == desiredScale { + return event.timestamp.Sub(start), nil + } + } + + return 0, fmt.Errorf("desired scale of %d was never reached", desiredScale) +} + +func TestObservedConcurrency(t *testing.T) { + var tc []junit.TestCase + tests := []int{5, 10, 15} //going beyond 15 currently causes "overload" responses + for _, clients := range tests { + t.Run(fmt.Sprintf("scale-%02d", clients), func(t *testing.T) { + tc = append(tc, testConcurrencyN(t, clients)...) + }) + } + if err := testgrid.CreateXMLOutput(tc, t.Name()); err != nil { + t.Fatalf("Cannot create output xml: %v", err) + } +} + +func testConcurrencyN(t *testing.T, concurrency int) []junit.TestCase { + perfClients, err := Setup(t) + if err != nil { + t.Fatalf("Cannot initialize performance client: %v", err) + } + + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: "observed-concurrency", + } + clients := perfClients.E2EClients + + defer TearDown(perfClients, names, t.Logf) + test.CleanupOnInterrupt(func() { TearDown(perfClients, names, t.Logf) }) + + t.Log("Creating a new Service") + objs, _, err := v1a1test.CreateRunLatestServiceReady(t, clients, &names, + false, /* https TODO(taragu) turn this on after helloworld test running with https */ + v1a1opts.WithResourceRequirements(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20Mi"), + }, + }), + v1a1opts.WithContainerConcurrency(1)) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + + domain := objs.Route.Status.URL.Host + url := fmt.Sprintf("http://%s/?timeout=1000", domain) + client, err := pkgTest.NewSpoofingClient(clients.KubeClient, t.Logf, domain, test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatalf("Error creating spoofing client: %v", err) + } + + // This just helps with preallocation. + const presumedSize = 1000 + + eg := errgroup.Group{} + responseChannel := make(chan *spoof.Response, presumedSize) + events := make([]*event, 0, presumedSize) + failedRequests := 0 + + t.Logf("Running %d concurrent requests for %v", concurrency, duration) + eg.Go(func() error { + return generateTraffic(t, client, url, concurrency, duration, responseChannel) + }) + eg.Go(func() error { + for response := range responseChannel { + if response == nil { + failedRequests++ + continue + } + start, end, err := parseResponse(string(response.Body)) + if err != nil { + t.Logf("Failed to parse the body: %v", err) + failedRequests++ + continue + } + events = append(events, start, end) + } + // Sort all events by their timestamp. + sort.Slice(events, func(i, j int) bool { + return events[i].timestamp.Before(events[j].timestamp) + }) + return nil + }) + + if err := eg.Wait(); err != nil { + t.Fatalf("Failed to generate traffic and process responses: %v", err) + } + t.Logf("Generated %d requests with %d failed", len(events)+failedRequests, failedRequests) + + var tc []junit.TestCase + for i := 2; i <= concurrency; i++ { + toConcurrency, err := timeToScale(events, i) + if err != nil { + t.Logf("Never scaled to %d", i) + } else { + t.Logf("Took %v to scale to %d", toConcurrency, i) + tc = append(tc, perf.CreatePerfTestCase(float32(toConcurrency.Milliseconds()), fmt.Sprintf("to%d(ms)", i), t.Name())) + } + } + tc = append(tc, perf.CreatePerfTestCase(float32(failedRequests), "failed requests", t.Name())) + + return tc +} diff --git a/test/vendor/knative.dev/serving/test/performance/performance-tests.sh b/test/vendor/knative.dev/serving/test/performance/performance-tests.sh new file mode 100755 index 0000000000..a577c03e24 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/performance-tests.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# performance-tests.sh is added to manage all clusters that run the performance +# benchmarks in serving repo, it is ONLY intended to be run by Prow, users +# should NOT run it manually. + +# Setup env vars to override the default settings +export BENCHMARK_ROOT_PATH="$GOPATH/src/knative.dev/serving/test/performance/benchmarks" + +source vendor/knative.dev/test-infra/scripts/performance-tests.sh + +function update_knative() { + local istio_version="istio-1.4-latest" + # Mako needs to escape '.' in tags. Use '_' instead. + local istio_version_escaped=${istio_version//./_} + + pushd . + cd ${GOPATH}/src/knative.dev + echo ">> Update istio" + # Some istio pods occasionally get overloaded and die, delete all deployments + # and services from istio before reintalling it, to get them freshly recreated + kubectl delete deployments --all -n istio-system + kubectl delete services --all -n istio-system + kubectl apply -f serving/third_party/$istio_version/istio-crds.yaml || abort "Failed to apply istio-crds" + kubectl apply -f serving/third_party/$istio_version/istio-ci-no-mesh.yaml || abort "Failed to apply istio-ci-no-mesh" + + # Overprovision the Istio gateways and pilot. + kubectl patch hpa -n istio-system istio-ingressgateway \ + --patch '{"spec": {"minReplicas": 10, "maxReplicas": 10}}' + kubectl patch deploy -n istio-system cluster-local-gateway \ + --patch '{"spec": {"replicas": 10}}' + + echo ">> Updating serving" + # Retry installation for at most three times as there can sometime be a race condition when applying serving CRDs + local n=0 + until [ $n -ge 3 ] + do + ko apply -f serving/config/ && break + n=$[$n+1] + done + if [ $n == 3 ]; then + abort "Failed to patch serving" + fi + popd + + # Update the activator hpa minReplicas to 10 + kubectl patch hpa -n knative-serving activator \ + --patch '{"spec": {"minReplicas": 10}}' + # Update the scale-to-zero grace period to 10s + kubectl patch configmap/config-autoscaler \ + -n knative-serving \ + --type merge \ + -p '{"data":{"scale-to-zero-grace-period":"10s"}}' + + echo ">> Setting up 'prod' config-mako" + cat <> Deleting all the yamls for benchmark $1" + ko delete -f ${BENCHMARK_ROOT_PATH}/$1/continuous --ignore-not-found=true + echo ">> Deleting all Knative serving services" + kubectl delete ksvc --all + + echo ">> Applying all the yamls for benchmark $1" + ko apply -f ${BENCHMARK_ROOT_PATH}/$1/continuous || abort "failed to apply benchmarks yaml $1" +} + +main $@ diff --git a/test/vendor/knative.dev/serving/test/performance/performance.go b/test/vendor/knative.dev/serving/test/performance/performance.go new file mode 100644 index 0000000000..235be8907a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/performance.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package performance + +import ( + "context" + "fmt" + "log" + "net/http" + "testing" + "time" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + deploymentinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/prometheus" + "knative.dev/serving/test" + + "knative.dev/pkg/test/spoof" + + // Mysteriously required to support GCP auth (required by k8s libs). + // Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +const ( + monitoringNS = "knative-monitoring" + // Property name used by testgrid. + perfLatency = "perf_latency" + duration = 1 * time.Minute +) + +// Enable monitoring components +const ( + EnablePrometheus = iota +) + +// Client is the client used in the performance tests. +type Client struct { + E2EClients *test.Clients + PromClient *prometheus.PromProxy +} + +// Setup creates all the clients that we need to interact with in our tests +func Setup(t *testing.T, monitoring ...int) (*Client, error) { + pkgTest.SetupLoggingFlags() + clients, err := test.NewClients(pkgTest.Flags.Kubeconfig, pkgTest.Flags.Cluster, test.ServingNamespace) + if err != nil { + return nil, err + } + + var p *prometheus.PromProxy + for _, m := range monitoring { + switch m { + case EnablePrometheus: + t.Log("Creating prometheus proxy client") + p = &prometheus.PromProxy{Namespace: monitoringNS} + p.Setup(clients.KubeClient.Kube, t.Logf) + default: + t.Log("No monitoring components enabled") + } + } + + return &Client{E2EClients: clients, PromClient: p}, nil +} + +// TearDown cleans up resources used +func TearDown(client *Client, names test.ResourceNames, logf logging.FormatLogger) { + test.TearDown(client.E2EClients, names) + + // Teardown prometheus client + if client.PromClient != nil { + client.PromClient.Teardown(logf) + } +} + +// ProbeTargetTillReady will probe the target once per second for the given duration, until it's ready or error happens +func ProbeTargetTillReady(target string, duration time.Duration) error { + // Make sure the target is ready before sending the large amount of requests. + spoofingClient := spoof.SpoofingClient{ + Client: &http.Client{}, + RequestInterval: 1 * time.Second, + RequestTimeout: duration, + Logf: func(fmt string, args ...interface{}) { + log.Printf(fmt, args) + }, + } + req, err := http.NewRequest(http.MethodGet, target, nil) + if err != nil { + return fmt.Errorf("target %q is invalid, cannot probe: %w", target, err) + } + if _, err = spoofingClient.Poll(req, func(resp *spoof.Response) (done bool, err error) { + return true, nil + }); err != nil { + return fmt.Errorf("failed to get target %q ready: %w", target, err) + } + return nil +} + +// WaitForScaleToZero will wait for the deployments in the indexer to scale to 0 +func WaitForScaleToZero(ctx context.Context, namespace string, selector labels.Selector, duration time.Duration) error { + dl := deploymentinformer.Get(ctx).Lister() + return wait.PollImmediate(1*time.Second, duration, func() (bool, error) { + ds, err := dl.Deployments(namespace).List(selector) + if err != nil { + return true, err + } + scaledToZero := true + for _, d := range ds { + if d.Status.ReadyReplicas != 0 { + scaledToZero = false + break + } + } + return scaledToZero, nil + }) +} + +// resolvedHeaders returns headers for the request. +func resolvedHeaders(domain string, resolvableDomain bool) map[string][]string { + headers := make(map[string][]string) + if !resolvableDomain { + headers["Host"] = []string{domain} + } + return headers +} diff --git a/test/vendor/knative.dev/serving/test/performance/profiling.md b/test/vendor/knative.dev/serving/test/performance/profiling.md new file mode 100644 index 0000000000..79915198d3 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/profiling.md @@ -0,0 +1,83 @@ +# Profiling Knative Serving + +Knative Serving allows for collecting runtime profiling data expected by the +pprof visualization tool. Profiling data is available for the autoscaler, +activator, controller, webhook and for the queue-proxy container which is +injected into the user application pod. When enabled Knative serves profiling +data on the default port `8008` through a web server. + +## Steps to get profiling data + +Edit the `config-observability` ConfigMap and add `profiling.enable = "true"`: + +```shell +kubectl edit configmap config-observability -n knative-serving +``` + +Use port-forwarding to get access to Knative Serving pods. For example, +activator: + +```shell +ACTIVATOR_POD=$(kubectl -n knative-serving get pods -l app=activator -o custom-columns=:metadata.name --no-headers) +kubectl port-forward -n knative-serving $ACTIVATOR_POD 8008:8008 +``` + +View all available profiles at http://localhost:8008/debug/pprof/ through a web +browser or request specific profiling data using one of the commands below: + +### Heap profile + +```shell +go tool pprof http://localhost:8008/debug/pprof/heap +``` + +### 30-second CPU profile + +```shell +go tool pprof http://localhost:8008/debug/pprof/profile?seconds=30 +``` + +### Go routine blocking profile + +```shell +go tool pprof http://localhost:8008/debug/pprof/block +``` + +### 5-second execution trace + +```shell +wget http://localhost:8008/debug/pprof/trace\?seconds\=5 && go tool trace trace\?seconds\=5 +``` + +### All memory allocations + +```shell +go tool pprof http://localhost:8008/debug/pprof/allocs +``` + +### Holders of contended mutexes + +```shell +go tool pprof http://localhost:8008/debug/pprof/mutex +``` + +### Stack traces of all current goroutines + +```shell +go tool pprof http://localhost:8008/debug/pprof/goroutine +``` + +### Stack traces that led to the creation of new OS threads + +```shell +go tool pprof http://localhost:8008/debug/pprof/threadcreate +``` + +### Command line arguments for the current program + +```shell +curl http://localhost:8008/debug/pprof/cmdline --output - +``` + +More information on profiling Go applications in this +[blog](https://blog.golang.org/profiling-go-programs) diff --git a/test/vendor/knative.dev/serving/test/performance/scale_test.go b/test/vendor/knative.dev/serving/test/performance/scale_test.go new file mode 100644 index 0000000000..f126c0cfa4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/performance/scale_test.go @@ -0,0 +1,164 @@ +// +build performance + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// scale_test.go brings up a number of services tracking the time to various waypoints. + +package performance + +import ( + "fmt" + "sort" + "sync" + "testing" + "time" + + "knative.dev/test-infra/shared/junit" + perf "knative.dev/test-infra/shared/performance" + "knative.dev/test-infra/shared/testgrid" + + "knative.dev/serving/test/e2e" +) + +type metrics struct { + min time.Duration + max time.Duration + totalDuration time.Duration + num int64 +} + +func (m *metrics) Add(start time.Time) { + // Compute the duration since the provided start time. + d := time.Since(start) + + if d < m.min || m.min == 0 { + m.min = d + } + if d > m.max { + m.max = d + } + m.totalDuration += d + m.num++ +} + +func (m metrics) Min() float32 { + return float32(m.min.Seconds()) +} + +func (m metrics) Max() float32 { + return float32(m.max.Seconds()) +} + +func (m metrics) Avg() float32 { + if m.num == 0 { + return -1.0 + } + return float32(m.totalDuration.Seconds()) / float32(m.num) +} + +func (m metrics) Num() int64 { + return m.num +} + +type latencies struct { + // Guards access to latencies. + m sync.RWMutex + metrics map[string]metrics +} + +var _ e2e.Latencies = (*latencies)(nil) + +func (l *latencies) Add(name string, start time.Time) { + l.m.Lock() + defer l.m.Unlock() + + m := l.metrics[name] + m.Add(start) + l.metrics[name] = m +} + +func (l *latencies) Min(name string) float32 { + l.m.RLock() + defer l.m.RUnlock() + return l.metrics[name].Min() +} + +func (l *latencies) Max(name string) float32 { + l.m.RLock() + defer l.m.RUnlock() + return l.metrics[name].Max() +} + +func (l *latencies) Avg(name string) float32 { + l.m.RLock() + defer l.m.RUnlock() + return l.metrics[name].Avg() +} + +func (l *latencies) Num(name string) int64 { + l.m.RLock() + defer l.m.RUnlock() + return l.metrics[name].Num() +} + +func (l *latencies) Results(t *testing.T) []junit.TestCase { + l.m.RLock() + defer l.m.RUnlock() + + order := make([]string, 0, len(l.metrics)) + for k := range l.metrics { + order = append(order, k) + } + sort.Strings(order) + + // Add latency metrics + tc := make([]junit.TestCase, 0, 3*len(order)) + for _, key := range order { + tc = append(tc, + perf.CreatePerfTestCase(l.Min(key), fmt.Sprintf("%s.min", key), t.Name()), + perf.CreatePerfTestCase(l.Max(key), fmt.Sprintf("%s.max", key), t.Name()), + perf.CreatePerfTestCase(l.Avg(key), fmt.Sprintf("%s.avg", key), t.Name()), + perf.CreatePerfTestCase(float32(l.Num(key)), fmt.Sprintf("%s.num", key), t.Name())) + } + return tc +} + +func TestScaleToN(t *testing.T) { + // Run each of these variations. + tests := []int{10, 50, 100} + + // Accumulate the results from each row in our table (recorded below). + var results []junit.TestCase + + for _, size := range tests { + t.Run(fmt.Sprintf("scale-%02d", size), func(t *testing.T) { + // Record the observed latencies. + l := &latencies{ + metrics: make(map[string]metrics), + } + defer func() { + results = append(results, l.Results(t)...) + }() + + e2e.ScaleToWithin(t, size, 30*time.Minute, l) + }) + } + + if err := testgrid.CreateXMLOutput(results, t.Name()); err != nil { + t.Errorf("Cannot create output xml: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/presubmit-tests.sh b/test/vendor/knative.dev/serving/test/presubmit-tests.sh new file mode 100755 index 0000000000..3597100748 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/presubmit-tests.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs the presubmit tests; it is started by prow for each PR. +# For convenience, it can also be executed manually. +# Running the script without parameters, or with the --all-tests +# flag, causes all tests to be executed, in the right order. +# Use the flags --build-tests, --unit-tests and --integration-tests +# to run a specific set of tests. + +# Markdown linting failures don't show up properly in Gubernator resulting +# in a net-negative contributor experience. +export DISABLE_MD_LINTING=1 + +source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/presubmit-tests.sh + +# We use the default build, unit and integration test runners. + +main "$@" diff --git a/test/vendor/knative.dev/serving/test/prober.go b/test/vendor/knative.dev/serving/test/prober.go new file mode 100644 index 0000000000..11850c4adc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/prober.go @@ -0,0 +1,293 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// route.go provides methods to perform actions on the route resource. + +package test + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "golang.org/x/sync/errgroup" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" +) + +// Prober is the interface for a prober, which checks the result of the probes when stopped. +type Prober interface { + // SLI returns the "service level indicator" for the prober, which is the observed + // success rate of the probes. This will panic if the prober has not been stopped. + SLI() (total int64, failures int64) + + // Stop terminates the prober, returning any observed errors. + // Implementations may choose to put additional requirements on + // the prober, which may cause this to block (e.g. a minimum number + // of probes to achieve a population suitable for SLI measurement). + Stop() error +} + +type prober struct { + // These shouldn't change after creation + logf logging.FormatLogger + url *url.URL + minimumProbes int64 + + // m guards access to these fields + m sync.RWMutex + requests int64 + failures int64 + stopped bool + + // This channel is used to send errors encountered probing the domain. + errCh chan error + // This channel is simply closed when minimumProbes has been satisfied. + minDoneCh chan struct{} +} + +// prober implements Prober +var _ Prober = (*prober)(nil) + +// SLI implements Prober +func (p *prober) SLI() (int64, int64) { + p.m.RLock() + defer p.m.RUnlock() + + return p.requests, p.failures +} + +// Stop implements Prober +func (p *prober) Stop() error { + // When we're done stop sending requests. + defer func() { + p.m.Lock() + defer p.m.Unlock() + p.stopped = true + }() + + // Check for any immediately available errors + select { + case err := <-p.errCh: + return err + default: + // Don't block if there are no errors immediately available. + } + + // If there aren't any immediately available errors, then + // wait for either an error or the minimum number of probes + // to be satisfied. + select { + case err := <-p.errCh: + return err + case <-p.minDoneCh: + return nil + } +} + +func (p *prober) handleResponse(response *spoof.Response) (bool, error) { + p.m.Lock() + defer p.m.Unlock() + + if p.stopped { + return p.stopped, nil + } + + p.logRequestNoLock() + if response.StatusCode != http.StatusOK { + p.logf("%q status = %d, want: %d", p.url, response.StatusCode, http.StatusOK) + p.logf("response: %s", response) + p.failures++ + } + + // Returning (false, nil) causes SpoofingClient.Poll to retry. + return false, nil +} + +func (p *prober) handleErrorRetry(err error) (bool, error) { + p.m.Lock() + defer p.m.Unlock() + + p.logRequestNoLock() + p.failures++ + + // Returning true causes SpoofingClient.Poll to retry. + return true, fmt.Errorf("retry on all errors: %v", err) +} + +// logRequestNoLock should always be called after obtaining p.m.Lock(), +// thus it doesn't try to get the lock here again. +func (p *prober) logRequestNoLock() { + p.requests++ + if p.requests == p.minimumProbes { + close(p.minDoneCh) + } +} + +// ProberManager is the interface for spawning probers, and checking their results. +type ProberManager interface { + // The ProberManager should expose a way to collectively reason about spawned + // probes as a sort of aggregating Prober. + Prober + + // Spawn creates a new Prober + Spawn(url *url.URL) Prober + + // Foreach iterates over the probers spawned by this ProberManager. + Foreach(func(url *url.URL, p Prober)) +} + +type manager struct { + // Should not change after creation + logf logging.FormatLogger + clients *Clients + minProbes int64 + + m sync.RWMutex + probes map[*url.URL]Prober +} + +var _ ProberManager = (*manager)(nil) + +// Spawn implements ProberManager +func (m *manager) Spawn(url *url.URL) Prober { + m.m.Lock() + defer m.m.Unlock() + + if p, ok := m.probes[url]; ok { + return p + } + + m.logf("Starting Route prober for %s.", url) + p := &prober{ + logf: m.logf, + url: url, + minimumProbes: m.minProbes, + errCh: make(chan error, 1), + minDoneCh: make(chan struct{}), + } + m.probes[url] = p + go func() { + client, err := pkgTest.NewSpoofingClient(m.clients.KubeClient, m.logf, url.Hostname(), ServingFlags.ResolvableDomain) + if err != nil { + m.logf("NewSpoofingClient() = %v", err) + p.errCh <- err + return + } + + // RequestTimeout is set to 0 to make the polling infinite. + client.RequestTimeout = 0 + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + m.logf("NewRequest() = %v", err) + p.errCh <- err + return + } + + // We keep polling the domain and accumulate success rates + // to ultimately establish the SLI and compare to the SLO. + _, err = client.Poll(req, p.handleResponse, p.handleErrorRetry) + if err != nil { + // SLO violations are not reflected as errors. They are + // captured and calculated internally. + m.logf("Poll() = %v", err) + p.errCh <- err + return + } + }() + return p +} + +// Stop implements ProberManager +func (m *manager) Stop() error { + m.m.Lock() + defer m.m.Unlock() + + m.logf("Stopping all probers") + + errgrp := &errgroup.Group{} + for _, prober := range m.probes { + errgrp.Go(prober.Stop) + } + return errgrp.Wait() +} + +// SLI implements Prober +func (m *manager) SLI() (total int64, failures int64) { + m.m.RLock() + defer m.m.RUnlock() + for _, prober := range m.probes { + pt, pf := prober.SLI() + total += pt + failures += pf + } + return +} + +// Foreach implements ProberManager +func (m *manager) Foreach(f func(url *url.URL, p Prober)) { + m.m.RLock() + defer m.m.RUnlock() + + for url, prober := range m.probes { + f(url, prober) + } +} + +// NewProberManager creates a new manager for probes. +func NewProberManager(logf logging.FormatLogger, clients *Clients, minProbes int64) ProberManager { + return &manager{ + logf: logf, + clients: clients, + minProbes: minProbes, + probes: make(map[*url.URL]Prober), + } +} + +// RunRouteProber starts a single Prober of the given domain. +func RunRouteProber(logf logging.FormatLogger, clients *Clients, url *url.URL) Prober { + // Default to 10 probes + pm := NewProberManager(logf, clients, 10) + pm.Spawn(url) + return pm +} + +// AssertProberDefault is a helper for stopping the Prober and checking its SLI +// against the default SLO, which requires perfect responses. +// This takes `testing.T` so that it may be used in `defer`. +func AssertProberDefault(t pkgTest.T, p Prober) { + t.Helper() + if err := p.Stop(); err != nil { + t.Error("Stop()", "error", err.Error()) + } + // Default to 100% correct (typically used in conjunction with the low probe count above) + if err := CheckSLO(1.0, t.Name(), p); err != nil { + t.Error("CheckSLO()", "error", err.Error()) + } +} + +// CheckSLO compares the SLI of the given prober against the SLO, erroring if too low. +func CheckSLO(slo float64, name string, p Prober) error { + total, failures := p.SLI() + + successRate := float64(total-failures) / float64(total) + if successRate < slo { + return fmt.Errorf("SLI for %q = %f, wanted >= %f", name, successRate, slo) + } + return nil +} diff --git a/test/vendor/knative.dev/serving/test/scale/scale_test.go b/test/vendor/knative.dev/serving/test/scale/scale_test.go new file mode 100644 index 0000000000..b58d972261 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/scale/scale_test.go @@ -0,0 +1,65 @@ +// +build e2e + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "testing" + "time" + + . "knative.dev/serving/test/e2e" +) + +type nopLatencies struct { + t *testing.T +} + +var _ Latencies = (*nopLatencies)(nil) + +func (nl *nopLatencies) Add(metric string, start time.Time) { + duration := time.Since(start) + + nl.t.Logf("%q took %v", metric, duration) +} + +const ( + // Limit for scale in -short mode + shortModeMaxScale = 10 + // Timeout for each worker task + workerTimeout = 5 * time.Minute +) + +// While redundant, we run two versions of this by default: +// 1. TestScaleToN/size-10: a developer smoke test that's useful when changing this to assess whether +// things have gone horribly wrong. This should take about 12-20 seconds total. +// 2. TestScaleToN/scale-100: a more proper execution of the test, which verifies a slightly more +// interesting burst of deployments, but low enough to complete in a reasonable window. +func TestScaleToN(t *testing.T) { + // Run each of these variations. + tests := []int{10, 100} + + for _, size := range tests { + t.Run(fmt.Sprintf("scale-%d", size), func(t *testing.T) { + if testing.Short() && size > shortModeMaxScale { + t.Skip("Skipping test in short mode") + } + ScaleToWithin(t, size, workerTimeout, &nopLatencies{t}) + }) + } +} diff --git a/test/vendor/knative.dev/serving/test/system.go b/test/vendor/knative.dev/serving/test/system.go new file mode 100644 index 0000000000..0cb32d8ce1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/system.go @@ -0,0 +1,27 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "os" + + "knative.dev/pkg/system" +) + +func init() { + os.Setenv(system.NamespaceEnvKey, "knative-serving") +} diff --git a/test/vendor/knative.dev/serving/test/test_images/README.md b/test/vendor/knative.dev/serving/test/test_images/README.md new file mode 100644 index 0000000000..ec7edf939a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/README.md @@ -0,0 +1,7 @@ +# Test images node + +The subdirectories contain the test images used in the conformance and e2e +tests. + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/autoscale/README.md b/test/vendor/knative.dev/serving/test/test_images/autoscale/README.md new file mode 100644 index 0000000000..d064bbebb4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/autoscale/README.md @@ -0,0 +1,31 @@ +# Autoscale test image + +This directory contains the test image used in the autoscale e2e tests. + +The image contains a simple Go webserver, `autoscale.go`, that will listen on +port `8080` and expose a service at `/`. + +The service applies different modes of resource consumption based on query +parameters. + +- `sleep=X` -- number of milliseconds to sleep (e.g. + `curl http://${URL}/?sleep=200`), alternatively duration can be specified as + `time.Duration`, e.g. `sleep=13s`. +- `sleep-stddev=X` -- valid only if `sleep` is provided, sleeps for a random + period of time according to a normal distribution centered around `sleep` with + stddev equal to this value. +- `bloat=X` -- creates a byte array size of `X*1024*1024` and assigns 1 to each + array value (to ensure heap allocation). +- `prime=X` -- computes the smallest prime less than `X`. Does not support + `X > 40000000`. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/autoscale/autoscale.go b/test/vendor/knative.dev/serving/test/test_images/autoscale/autoscale.go new file mode 100644 index 0000000000..5c92cb8f63 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/autoscale/autoscale.go @@ -0,0 +1,222 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "math" + "math/rand" + "net/http" + "strconv" + "sync" + "time" + + "knative.dev/serving/test" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// Algorithm from https://stackoverflow.com/a/21854246 + +// Only primes less than or equal to N will be generated +func primes(N int) []int { + var x, y, n int + nsqrt := math.Sqrt(float64(N)) + + isPrime := make([]bool, N) + + for x = 1; float64(x) <= nsqrt; x++ { + for y = 1; float64(y) <= nsqrt; y++ { + n = 4*(x*x) + y*y + if n <= N && (n%12 == 1 || n%12 == 5) { + isPrime[n] = !isPrime[n] + } + n = 3*(x*x) + y*y + if n <= N && n%12 == 7 { + isPrime[n] = !isPrime[n] + } + n = 3*(x*x) - y*y + if x > y && n <= N && n%12 == 11 { + isPrime[n] = !isPrime[n] + } + } + } + + for n = 5; float64(n) <= nsqrt; n++ { + if isPrime[n] { + for y = n * n; y < N; y += n * n { + isPrime[y] = false + } + } + } + + isPrime[2] = true + isPrime[3] = true + + primes := make([]int, 0, 1270606) + for x = 0; x < len(isPrime)-1; x++ { + if isPrime[x] { + primes = append(primes, x) + } + } + + // primes is now a slice that contains all primes numbers up to N + return primes +} + +func bloat(mb int) string { + b := make([]byte, mb*1024*1024) + for i := 0; i < len(b); i++ { + b[i] = 1 + } + return fmt.Sprintf("Allocated %v Mb of memory.\n", mb) +} + +func prime(max int) string { + p := primes(max) + if len(p) == 0 { + return fmt.Sprintf("There are no primes smaller than %d.\n", max) + } + return fmt.Sprintf("The largest prime less than %d is %d.\n", max, p[len(p)-1]) +} + +func sleep(d time.Duration) string { + start := time.Now() + time.Sleep(d) + return fmt.Sprintf("Slept for %v.\n", time.Since(start)) +} + +func randSleep(randSleepTimeMean time.Duration, randSleepTimeStdDev int) string { + start := time.Now() + randRes := time.Duration(rand.NormFloat64()*float64(randSleepTimeStdDev))*time.Millisecond + randSleepTimeMean + time.Sleep(randRes) + return fmt.Sprintf("Randomly slept for %v.\n", time.Since(start)) +} + +func parseDurationParam(r *http.Request, param string) (time.Duration, bool, error) { + value := r.URL.Query().Get(param) + if value == "" { + return 0, false, nil + } + d, err := time.ParseDuration(value) + if err != nil { + return 0, false, err + } + return d, true, nil +} + +func parseIntParam(r *http.Request, param string) (int, bool, error) { + value := r.URL.Query().Get(param) + if value == "" { + return 0, false, nil + } + i, err := strconv.Atoi(value) + if err != nil { + return 0, false, err + } + return i, true, nil +} + +func handler(w http.ResponseWriter, r *http.Request) { + // Validate inputs. + var ms time.Duration + msv, hasMs, err := parseIntParam(r, "sleep") + if err != nil { + // If it is a numeric error and it's parsing error, then + // try to parse it as a duration + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrSyntax { + ms, hasMs, err = parseDurationParam(r, "sleep") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } else { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } else { + ms = time.Duration(msv) * time.Millisecond + } + if ms < 0 { + http.Error(w, "Negative query params are not supported", http.StatusBadRequest) + return + } + mssd, hasMssd, err := parseIntParam(r, "sleep-stddev") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if mssd < 0 { + http.Error(w, "Negative query params are not supported", http.StatusBadRequest) + return + } + max, hasMax, err := parseIntParam(r, "prime") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if max < 0 { + http.Error(w, "Negative query params are not supported", http.StatusBadRequest) + return + } + mb, hasMb, err := parseIntParam(r, "bloat") + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if mb < 0 { + http.Error(w, "Negative durations are not supported", http.StatusBadRequest) + return + } + // Consume time, cpu and memory in parallel. + var wg sync.WaitGroup + defer wg.Wait() + if hasMs && !hasMssd && ms > 0 { + wg.Add(1) + go func() { + defer wg.Done() + fmt.Fprint(w, sleep(ms)) + }() + } + if hasMs && hasMssd && ms > 0 && mssd > 0 { + wg.Add(1) + go func() { + defer wg.Done() + fmt.Fprint(w, randSleep(ms, mssd)) + }() + } + if hasMax && max > 0 { + wg.Add(1) + go func() { + defer wg.Done() + fmt.Fprint(w, prime(max)) + }() + } + if hasMb && mb > 0 { + wg.Add(1) + go func() { + defer wg.Done() + fmt.Fprint(w, bloat(mb)) + }() + } +} + +func main() { + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/autoscale/service.yaml b/test/vendor/knative.dev/serving/test/test_images/autoscale/service.yaml new file mode 100644 index 0000000000..69b3231efe --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/autoscale/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: autoscale-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/autoscale diff --git a/test/vendor/knative.dev/serving/test/test_images/failing/README.md b/test/vendor/knative.dev/serving/test/test_images/failing/README.md new file mode 100644 index 0000000000..3425d68fef --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/failing/README.md @@ -0,0 +1,17 @@ +# Failing test image + +This directory contains the test image used to simulate a crashing image. + +The image runs for 10 seconds and then exits. It is useful for testing readiness +probes. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/failing/failing.go b/test/vendor/knative.dev/serving/test/test_images/failing/failing.go new file mode 100644 index 0000000000..6d35ef6c5c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/failing/failing.go @@ -0,0 +1,34 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "log" + "os" + "time" +) + +func main() { + log.Println("Started...") + + // Sleep for 10 seconds to force a race condition, where this + // container becomes ready if no readinessProbe is set. + time.Sleep(10 * time.Second) + + log.Println("Crashed...") + os.Exit(5) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/failing/service.yaml b/test/vendor/knative.dev/serving/test/test_images/failing/service.yaml new file mode 100644 index 0000000000..d45db66738 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/failing/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: failing-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/failing diff --git a/test/vendor/knative.dev/serving/test/test_images/flaky/README.md b/test/vendor/knative.dev/serving/test/test_images/flaky/README.md new file mode 100644 index 0000000000..736e47aac3 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/flaky/README.md @@ -0,0 +1,16 @@ +# Flaky test image + +The image contains a simple Go webserver, `main.go`, that will only succeed +every Nth request. The value of N is specified in the PERIOD environment +variable. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/flaky/main.go b/test/vendor/knative.dev/serving/test/test_images/flaky/main.go new file mode 100644 index 0000000000..d3816bc7a6 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/flaky/main.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "log" + "net/http" + "os" + "strconv" + "sync/atomic" + + "knative.dev/serving/pkg/network" + "knative.dev/serving/test" +) + +var ( + period uint64 + count uint64 +) + +func handler(w http.ResponseWriter, r *http.Request) { + // Always succeed probes. + if network.IsKubeletProbe(r) { + w.WriteHeader(http.StatusOK) + return + } + + // Increment the request count per non-probe request. + val := atomic.AddUint64(&count, 1) + + if val%period > 0 { + w.WriteHeader(http.StatusInternalServerError) + } + w.Write([]byte(fmt.Sprintf("count = %d", val))) +} + +func main() { + p, err := strconv.Atoi(os.Getenv("PERIOD")) + if err != nil { + log.Fatal("Must specify PERIOD environment variable.") + } else if p < 1 { + log.Fatalf("Period must be positive, got: %d", p) + } + period = uint64(p) + h := network.NewProbeHandler(http.HandlerFunc(handler)) + test.ListenAndServeGracefully(":"+os.Getenv("PORT"), h.ServeHTTP) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/flaky/service.yaml b/test/vendor/knative.dev/serving/test/test_images/flaky/service.yaml new file mode 100644 index 0000000000..4bc94a6a09 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/flaky/service.yaml @@ -0,0 +1,14 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: flaky-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/flaky + env: + # Succeed every Nth request. + - name: PERIOD + value: "5" diff --git a/test/vendor/knative.dev/serving/test/test_images/grpc-ping/main.go b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/main.go new file mode 100644 index 0000000000..25b90cb0dd --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/main.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "io" + "log" + "net/http" + "os" + + "google.golang.org/grpc" + + "knative.dev/pkg/network" + servingnetwork "knative.dev/serving/pkg/network" + ping "knative.dev/serving/test/test_images/grpc-ping/proto" +) + +func pong(req *ping.Request) *ping.Response { + return &ping.Response{Msg: req.Msg + os.Getenv("SUFFIX")} +} + +type server struct{} + +func (s *server) Ping(ctx context.Context, req *ping.Request) (*ping.Response, error) { + log.Printf("Received ping: %v", req.Msg) + + resp := pong(req) + + log.Printf("Sending pong: %v", resp.Msg) + return resp, nil +} + +func (s *server) PingStream(stream ping.PingService_PingStreamServer) error { + log.Printf("Starting stream") + for { + req, err := stream.Recv() + if err == io.EOF { + log.Printf("Ending stream") + return nil + } + if err != nil { + log.Printf("Failed to receive ping: %v", err) + return err + } + + log.Printf("Received ping: %v", req.Msg) + + resp := pong(req) + + log.Printf("Sending pong: %v", resp.Msg) + err = stream.Send(resp) + if err != nil { + log.Printf("Failed to send pong: %v", err) + return err + } + } +} + +func httpWrapper(g *grpc.Server) http.Handler { + return servingnetwork.NewProbeHandler( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && r.Header.Get("Content-Type") == "application/grpc" { + g.ServeHTTP(w, r) + } + }), + ) +} + +func main() { + log.Printf("Starting server on %s", os.Getenv("PORT")) + + g := grpc.NewServer() + s := network.NewServer(":"+os.Getenv("PORT"), httpWrapper(g)) + + ping.RegisterPingServiceServer(g, &server{}) + + log.Fatal(s.ListenAndServe()) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.pb.go b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.pb.go new file mode 100644 index 0000000000..49615ae72f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.pb.go @@ -0,0 +1,211 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: ping.proto + +package ping + +import ( + fmt "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Request struct { + Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Request) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +type Response struct { + Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Response) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +func init() { + proto.RegisterType((*Request)(nil), "ping.Request") + proto.RegisterType((*Response)(nil), "ping.Response") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for PingService service + +type PingServiceClient interface { + Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) + PingStream(ctx context.Context, opts ...grpc.CallOption) (PingService_PingStreamClient, error) +} + +type pingServiceClient struct { + cc *grpc.ClientConn +} + +func NewPingServiceClient(cc *grpc.ClientConn) PingServiceClient { + return &pingServiceClient{cc} +} + +func (c *pingServiceClient) Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) { + out := new(Response) + err := grpc.Invoke(ctx, "/ping.PingService/Ping", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pingServiceClient) PingStream(ctx context.Context, opts ...grpc.CallOption) (PingService_PingStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_PingService_serviceDesc.Streams[0], c.cc, "/ping.PingService/PingStream", opts...) + if err != nil { + return nil, err + } + x := &pingServicePingStreamClient{stream} + return x, nil +} + +type PingService_PingStreamClient interface { + Send(*Request) error + Recv() (*Response, error) + grpc.ClientStream +} + +type pingServicePingStreamClient struct { + grpc.ClientStream +} + +func (x *pingServicePingStreamClient) Send(m *Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *pingServicePingStreamClient) Recv() (*Response, error) { + m := new(Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for PingService service + +type PingServiceServer interface { + Ping(context.Context, *Request) (*Response, error) + PingStream(PingService_PingStreamServer) error +} + +func RegisterPingServiceServer(s *grpc.Server, srv PingServiceServer) { + s.RegisterService(&_PingService_serviceDesc, srv) +} + +func _PingService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PingServiceServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ping.PingService/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PingServiceServer).Ping(ctx, req.(*Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _PingService_PingStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PingServiceServer).PingStream(&pingServicePingStreamServer{stream}) +} + +type PingService_PingStreamServer interface { + Send(*Response) error + Recv() (*Request, error) + grpc.ServerStream +} + +type pingServicePingStreamServer struct { + grpc.ServerStream +} + +func (x *pingServicePingStreamServer) Send(m *Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *pingServicePingStreamServer) Recv() (*Request, error) { + m := new(Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _PingService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ping.PingService", + HandlerType: (*PingServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _PingService_Ping_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "PingStream", + Handler: _PingService_PingStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "ping.proto", +} + +func init() { proto.RegisterFile("ping.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 139 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b, + 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x01, 0xb1, 0x95, 0xa4, 0xb9, 0xd8, 0x83, 0x52, + 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x04, 0xb8, 0x98, 0x73, 0x8b, 0xd3, 0x25, 0x18, 0x15, 0x18, + 0x35, 0x38, 0x83, 0x40, 0x4c, 0x25, 0x19, 0x2e, 0x8e, 0xa0, 0xd4, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, + 0x54, 0x4c, 0x59, 0xa3, 0x4c, 0x2e, 0xee, 0x80, 0xcc, 0xbc, 0xf4, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, + 0xe4, 0x54, 0x21, 0x75, 0x2e, 0x16, 0x10, 0x57, 0x88, 0x57, 0x0f, 0x6c, 0x09, 0xd4, 0x54, 0x29, + 0x3e, 0x18, 0x17, 0x62, 0x8e, 0x12, 0x83, 0x90, 0x21, 0x17, 0x17, 0x58, 0x5f, 0x49, 0x51, 0x6a, + 0x62, 0x2e, 0x41, 0xe5, 0x1a, 0x8c, 0x06, 0x8c, 0x49, 0x6c, 0x60, 0x27, 0x1b, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x85, 0x87, 0x57, 0xf8, 0xc0, 0x00, 0x00, 0x00, +} diff --git a/test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.proto b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.proto new file mode 100644 index 0000000000..88cd7f7fcc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/proto/ping.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package ping; + +service PingService { + rpc Ping(Request) returns (Response) {} + rpc PingStream(stream Request) returns (stream Response) {} +} + +message Request { + string msg = 1; +} + +message Response { + string msg = 1; +} + diff --git a/test/vendor/knative.dev/serving/test/test_images/grpc-ping/service.yaml b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/service.yaml new file mode 100644 index 0000000000..4eb07e3113 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/grpc-ping/service.yaml @@ -0,0 +1,13 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: grpc-ping-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/grpc-ping + ports: + - name: h2c + containerPort: 8080 diff --git a/test/vendor/knative.dev/serving/test/test_images/hellovolume/hellovolume.go b/test/vendor/knative.dev/serving/test/test_images/hellovolume/hellovolume.go new file mode 100644 index 0000000000..b6a0d038bb --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/hellovolume/hellovolume.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "path/filepath" + "strings" + + "knative.dev/serving/test" +) + +func handler(w http.ResponseWriter, r *http.Request) { + base := filepath.Dir(test.HelloVolumePath) + p := filepath.Join(base, r.URL.Path) + if p == base { + p = test.HelloVolumePath + } + if !strings.HasPrefix(p, base) { + http.Error(w, "there is no escape", http.StatusBadRequest) + return + } + content, err := ioutil.ReadFile(p) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + log.Printf("Hello volume received a request: %s", string(content)) + fmt.Fprintln(w, string(content)) +} + +func main() { + flag.Parse() + log.Print("Hello volume app started.") + + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/hellovolume/service.yaml b/test/vendor/knative.dev/serving/test/test_images/hellovolume/service.yaml new file mode 100644 index 0000000000..0f1c3615d4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/hellovolume/service.yaml @@ -0,0 +1,51 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: hello-volume + namespace: default +spec: + template: + spec: + containers: + - # This is the Go import path for the binary to containerize + # and substitute here. + image: knative.dev/serving/test/test_images/hellovolume + volumeMounts: + - name: foo + mountPath: /hello/ + volumes: + - name: foo + projected: + sources: + - secret: + name: bar + - configMap: + name: baz +--- +apiVersion: v1 +kind: Secret +metadata: + name: bar +stringData: + world: Not the droids you are looking for. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: baz +data: + universe: The droids you are looking for. diff --git a/test/vendor/knative.dev/serving/test/test_images/helloworld/README.md b/test/vendor/knative.dev/serving/test/test_images/helloworld/README.md new file mode 100644 index 0000000000..785f32aa01 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/helloworld/README.md @@ -0,0 +1,23 @@ +# Helloworld test image + +This directory contains the test image used in the helloworld e2e test. + +The image contains a simple Go webserver, `helloworld.go`, that will, by +default, listen on port `8080` and expose a service at `/`. + +When called, the server emits a "hello world" message. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +To run this image as just a Route and Configuration: + +`ko apply -f helloworld.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.go b/test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.go new file mode 100644 index 0000000000..9ebf248781 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + + "knative.dev/serving/test" +) + +func handler(w http.ResponseWriter, r *http.Request) { + log.Print("Hello world received a request.") + fmt.Fprintln(w, "Hello World! How about some tasty noodles?") +} + +func main() { + flag.Parse() + log.Print("Hello world app started.") + + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.yaml b/test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.yaml new file mode 100644 index 0000000000..f1f5c02230 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/helloworld/helloworld.yaml @@ -0,0 +1,40 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: serving.knative.dev/v1 +kind: Configuration +metadata: + name: configuration-example + namespace: default +spec: + template: + spec: + containers: + - # This is the Go import path for the binary to containerize + # and substitute here. + image: knative.dev/serving/test/test_images/helloworld + readinessProbe: + httpGet: + path: / + initialDelaySeconds: 3 +--- +apiVersion: serving.knative.dev/v1 +kind: Route +metadata: + name: route-example + namespace: default +spec: + traffic: + - configurationName: configuration-example + percent: 100 diff --git a/test/vendor/knative.dev/serving/test/test_images/helloworld/service.yaml b/test/vendor/knative.dev/serving/test/test_images/helloworld/service.yaml new file mode 100644 index 0000000000..a8bf414065 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/helloworld/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/helloworld diff --git a/test/vendor/knative.dev/serving/test/test_images/httpproxy/README.md b/test/vendor/knative.dev/serving/test/test_images/httpproxy/README.md new file mode 100644 index 0000000000..74e6e3f39d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/httpproxy/README.md @@ -0,0 +1,23 @@ +# HTTPproxy test image + +This directory contains the test image used in the e2e test to verify +service-to-service call within cluster. + +The image contains a simple Go webserver, `httproxy.go`, that will, by default, +listen on port `8080` and expose a service at `/`. + +When called, the proxy server redirects request to the target server. + +To use this image, users need to first set the host of the target server that +the proxy redirects request to by setting environment variable `TARGET_HOST`. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/httpproxy/httpproxy.go b/test/vendor/knative.dev/serving/test/test_images/httpproxy/httpproxy.go new file mode 100644 index 0000000000..467eb2ebbc --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/httpproxy/httpproxy.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "log" + "os" + + "net/http" + "net/http/httputil" + "net/url" + + "knative.dev/serving/pkg/network" + "knative.dev/serving/test" +) + +const ( + targetHostEnv = "TARGET_HOST" + gatewayHostEnv = "GATEWAY_HOST" + portEnv = "PORT" // Allow port to be customized / randomly assigned by tests + + defaultPort = "8080" +) + +var ( + httpProxy *httputil.ReverseProxy +) + +func handler(w http.ResponseWriter, r *http.Request) { + log.Print("HTTP proxy received a request.") + // Reverse proxy does not automatically reset the Host header. + // We need to manually reset it. + r.Host = getTargetHostEnv() + httpProxy.ServeHTTP(w, r) +} + +func getPort() string { + value := os.Getenv(portEnv) + if value == "" { + return defaultPort + } + return value +} + +func getTargetHostEnv() string { + value := os.Getenv(targetHostEnv) + if value == "" { + log.Fatalf("No env %v provided.", targetHostEnv) + } + return value +} + +func initialHTTPProxy(proxyURL string) *httputil.ReverseProxy { + target, err := url.Parse(proxyURL) + if err != nil { + log.Fatalf("Failed to parse url %v", proxyURL) + } + proxy := httputil.NewSingleHostReverseProxy(target) + proxy.ErrorHandler = func(w http.ResponseWriter, req *http.Request, err error) { + log.Printf("error reverse proxying request: %v", err) + http.Error(w, err.Error(), http.StatusBadGateway) + } + return proxy +} + +func main() { + flag.Parse() + log.Print("HTTP Proxy app started.") + + targetHost := getTargetHostEnv() + port := getPort() + + // Gateway is an optional value. It is used only when resolvable domain is not set + // for external access test, as xip.io is flaky. + // ref: https://github.com/knative/serving/issues/5389 + gateway := os.Getenv(gatewayHostEnv) + if gateway != "" { + targetHost = gateway + } + targetURL := fmt.Sprintf("http://%s", targetHost) + log.Print("target is " + targetURL) + httpProxy = initialHTTPProxy(targetURL) + + address := fmt.Sprintf(":%s", port) + log.Printf("Listening on address: %s", address) + // Handle forwarding requests which uses "K-Network-Hash" header. + probeHandler := network.NewProbeHandler(http.HandlerFunc(handler)).ServeHTTP + test.ListenAndServeGracefully(address, probeHandler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/httpproxy/service.yaml b/test/vendor/knative.dev/serving/test/test_images/httpproxy/service.yaml new file mode 100644 index 0000000000..3efd272d9d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/httpproxy/service.yaml @@ -0,0 +1,25 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: helloworld-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/helloworld +--- +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: httpproxy-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/httpproxy + env: + - name: TARGET_HOST + value: "helloworld-test-image.default.svc.cluster.local" + diff --git a/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/README.md b/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/README.md new file mode 100644 index 0000000000..5188822c61 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/README.md @@ -0,0 +1,20 @@ +# ObservedConcurrency test image + +This directory contains the test image used in the observed concurrency +performance test. + +The image contains a simple Go webserver, `observed_concurrency.go`, that sets a +single concurrency model for the service. + +Each request will return its serverside start and end-time in nanoseconds. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/observed_concurrency.go b/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/observed_concurrency.go new file mode 100644 index 0000000000..f39facd1b2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/observed_concurrency.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "log" + "net/http" + "strconv" + "time" + + "knative.dev/serving/test" +) + +func handler(w http.ResponseWriter, r *http.Request) { + timeout, _ := strconv.Atoi(r.URL.Query().Get("timeout")) + start := time.Now().UnixNano() + time.Sleep(time.Duration(timeout) * time.Millisecond) + end := time.Now().UnixNano() + fmt.Fprintf(w, "%d,%d\n", start, end) +} + +func main() { + log.Print("Benchmark container for 'observed_concurrency' started.") + log.Print("Requests against '/?timeout={TIME_IN_MILLISECONDS}' will sleep for the given time.") + log.Print("Each request will return its serverside start and end-time in nanoseconds.") + + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/service.yaml b/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/service.yaml new file mode 100644 index 0000000000..6a280d552d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/observed-concurrency/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: observed-concurrency-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/observed-concurrency diff --git a/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/README.md b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/README.md new file mode 100644 index 0000000000..99a8be3384 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/README.md @@ -0,0 +1,19 @@ +# Conformance test image (v1) + +This directory contains a test image used in the conformance tests. + +The images contain a webserver that will by default listens on port `8080` and +expose a service at `/`. + +When called, the server emits the message "What a spaceport!". + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/main.go b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/main.go new file mode 100644 index 0000000000..1f0d82311d --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/main.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "net/http" + + "knative.dev/serving/test" +) + +func handler(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "What a spaceport!") +} + +func main() { + flag.Parse() + + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/service.yaml b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/service.yaml new file mode 100644 index 0000000000..533453f781 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv1/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: pizzaplanetv1-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/pizzaplanetv1 diff --git a/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/README.md b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/README.md new file mode 100644 index 0000000000..bd8235f6eb --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/README.md @@ -0,0 +1,20 @@ +# Conformance test image (v2) + +This directory contains a test image used in the conformance tests. + +The images contain a webserver that will by default listens on port `8080` and +expose a service at `/`. + +When called, the server emits the message "Re-energize yourself with a slice of +pepperoni!". + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/main.go b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/main.go new file mode 100644 index 0000000000..d63048d00f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/main.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "net/http" + + "knative.dev/serving/test" +) + +func handler(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Re-energize yourself with a slice of pepperoni!") +} + +func main() { + flag.Parse() + + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/service.yaml b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/service.yaml new file mode 100644 index 0000000000..365bf2565b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/pizzaplanetv2/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: pizzaplanetv2-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/pizzaplanetv2 diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/args.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/args.go new file mode 100644 index 0000000000..195e175ef4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/args.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "os" +) + +func args() []string { + return os.Args +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/cgroup.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/cgroup.go new file mode 100644 index 0000000000..d0b3d6994e --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/cgroup.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "io/ioutil" + "os" + "strconv" + "strings" + + "knative.dev/serving/test/types" +) + +// cgroupPaths is the set of cgroups probed and returned to the +// client as Cgroups. +var cgroupPaths = []string{ + "/sys/fs/cgroup/memory/memory.limit_in_bytes", + "/sys/fs/cgroup/cpu/cpu.cfs_period_us", + "/sys/fs/cgroup/cpu/cpu.cfs_quota_us", + "/sys/fs/cgroup/cpu/cpu.shares"} + +var ( + yes = true + no = false +) + +func cgroups(paths ...string) []*types.Cgroup { + var cgroups []*types.Cgroup + for _, path := range paths { + if _, err := os.Stat(path); os.IsNotExist(err) { + cgroups = append(cgroups, &types.Cgroup{Name: path, Error: err.Error()}) + continue + } + + bc, err := ioutil.ReadFile(path) + if err != nil { + cgroups = append(cgroups, &types.Cgroup{Name: path, Error: err.Error()}) + continue + } + cs := strings.Trim(string(bc), "\n") + ic, err := strconv.Atoi(cs) + if err != nil { + cgroups = append(cgroups, &types.Cgroup{Name: path, Error: err.Error()}) + continue + } + + // Try to write to the Cgroup. We expect this to fail as a cheap + // method for read-only validation + newValue := []byte{'9'} + err = ioutil.WriteFile(path, newValue, 0644) + if err != nil { + cgroups = append(cgroups, &types.Cgroup{Name: path, Value: &ic, ReadOnly: &yes}) + } else { + cgroups = append(cgroups, &types.Cgroup{Name: path, Value: &ic, ReadOnly: &no}) + } + } + return cgroups +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/env.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/env.go new file mode 100644 index 0000000000..c51af2e17c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/env.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "os" + "strings" +) + +func env() map[string]string { + envMap := map[string]string{} + for _, e := range os.Environ() { + pair := strings.SplitN(e, "=", 2) + envMap[pair[0]] = pair[1] + } + return envMap +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file.go new file mode 100644 index 0000000000..4113a56274 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "os" + "strings" + + "knative.dev/serving/test/types" +) + +func fileInfo(paths ...string) map[string]types.FileInfo { + files := map[string]types.FileInfo{} + for _, path := range paths { + file, err := os.Stat(path) + if err != nil { + files[path] = types.FileInfo{Error: err.Error()} + continue + } + size := file.Size() + dir := file.IsDir() + source, _ := os.Readlink(path) + + // If we apply the UNIX permissions mask via 'Perm' the leading + // character will always be "-" because all the mode bits are dropped. + perm := strings.TrimPrefix(file.Mode().Perm().String(), "-") + + files[path] = types.FileInfo{ + Size: &size, + Perm: perm, + ModTime: file.ModTime(), + SourceFile: source, + IsDir: &dir} + } + return files +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file_access_attempt.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file_access_attempt.go new file mode 100644 index 0000000000..92ac3a20e7 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/file_access_attempt.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "io/ioutil" + "os" + + "knative.dev/serving/test/types" +) + +type permissionBits uint32 + +const ( + otherRead permissionBits = 1 << (2 - iota) + otherWrite +) + +func (p permissionBits) hasPermission(mode permissionBits) bool { + return p&mode == mode +} + +func fileAccessAttempt(filePaths ...string) map[string]types.FileAccessInfo { + files := map[string]types.FileAccessInfo{} + for _, path := range filePaths { + accessInfo := types.FileAccessInfo{} + + if err := checkReadable(path); err != nil { + accessInfo.ReadErr = err.Error() + } + + if err := checkWritable(path); err != nil { + accessInfo.WriteErr = err.Error() + } + + files[path] = accessInfo + } + return files +} + +// checkReadable function checks whether path file or directory is readable +func checkReadable(path string) error { + file, err := os.Stat(path) // It should only return error only if file does not exist + if err != nil { + return err + } + + // We aren't expected to be able to read, so just exit + perm := permissionBits(file.Mode().Perm()) + if !perm.hasPermission(otherRead) { + return nil + } + + if file.IsDir() { + _, err := ioutil.ReadDir(path) + return err + } + + readFile, err := os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + return err + } + return readFile.Close() +} + +// checkWritable function checks whether path file or directory is writable +func checkWritable(path string) error { + file, err := os.Stat(path) // It should only return error only if file does not exist + if err != nil { + return err + } + + // We aren't expected to be able to write, so just exits + perm := permissionBits(file.Mode().Perm()) + if !perm.hasPermission(otherWrite) { + return nil + } + + if file.IsDir() { + writeFile, err := ioutil.TempFile(path, "random") + if writeFile != nil { + os.Remove(writeFile.Name()) + } + return err + } + + writeFile, err := os.OpenFile(path, os.O_APPEND, 0) + if writeFile != nil { + defer writeFile.Close() + } + return err +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/handler.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/handler.go new file mode 100644 index 0000000000..53d58d90e7 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/handler.go @@ -0,0 +1,72 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "encoding/json" + "log" + "net/http" + "net/http/httputil" + "strings" + + "knative.dev/serving/pkg/network" +) + +// InitHandlers initializes all handlers. +func InitHandlers(mux *http.ServeMux) { + h := network.NewProbeHandler(withHeaders(withRequestLog(runtimeHandler))) + mux.HandleFunc("/", h.ServeHTTP) + mux.HandleFunc("/healthz", withRequestLog(withKubeletProbeHeaderCheck)) +} + +// withRequestLog logs each request before handling it. +func withRequestLog(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + reqDump, err := httputil.DumpRequest(r, true) + if err != nil { + log.Println(err) + } else { + log.Println(string(reqDump)) + } + + next.ServeHTTP(w, r) + } +} + +// withKubeletProbeHeaderCheck checks each health request has Kubelet probe header +func withKubeletProbeHeaderCheck(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.Header.Get("User-Agent"), network.KubeProbeUAPrefix) { + w.WriteHeader(http.StatusBadRequest) + } else { + w.WriteHeader(http.StatusOK) + } +} + +// setHeaders injects headers on the responses. +func withHeaders(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") + w.Header().Set("Pragma", "no-cache") + w.Header().Set("Expires", "0") + next.ServeHTTP(w, r) + } +} + +// WriteObject write content to response. +func writeJSON(w http.ResponseWriter, o interface{}) { + w.WriteHeader(http.StatusOK) + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(o) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/mount.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/mount.go new file mode 100644 index 0000000000..110b5d4cff --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/mount.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "bufio" + "os" + "strings" + + "knative.dev/serving/test/types" +) + +func mounts() []*types.Mount { + file, err := os.Open("/proc/mounts") + if err != nil { + return []*types.Mount{{Error: err.Error()}} + } + defer file.Close() + + sc := bufio.NewScanner(file) + var mounts []*types.Mount + for sc.Scan() { + ml := sc.Text() + // Each line should be: + // Device Path Type Options Unused Unused + // sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 + ms := strings.Split(ml, " ") + if len(ms) < 6 { + return []*types.Mount{{Error: "unknown /proc/mounts format"}} + } + mounts = append(mounts, &types.Mount{ + Device: ms[0], + Path: ms[1], + Type: ms[2], + Options: strings.Split(ms[3], ",")}) + } + + if err := sc.Err(); err != nil { + // Don't return partial list of mounts on error + return []*types.Mount{{Error: err.Error()}} + } + + return mounts +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/proc.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/proc.go new file mode 100644 index 0000000000..606a6ee3f7 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/proc.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "io" + "os" + + "knative.dev/serving/test/types" +) + +// stdin attempts to read bytes from the stdin file descriptor and returns the result. +func stdin() *types.Stdin { + _, err := os.Stdin.Read(make([]byte, 1)) + if err == io.EOF { + return &types.Stdin{EOF: &yes} + } + if err != nil { + return &types.Stdin{Error: err.Error()} + } + + return &types.Stdin{EOF: &no} +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/request.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/request.go new file mode 100644 index 0000000000..a4691def31 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/request.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "net/http" + "time" + + "knative.dev/serving/test/types" +) + +func requestInfo(r *http.Request) *types.RequestInfo { + return &types.RequestInfo{ + Ts: time.Now(), + URI: r.RequestURI, + Host: r.Host, + Method: r.Method, + Headers: r.Header, + ProtoMajor: r.ProtoMajor, + ProtoMinor: r.ProtoMinor, + } +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/runtime.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/runtime.go new file mode 100644 index 0000000000..21d29acf5b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/runtime.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "log" + "net/http" + + "knative.dev/serving/test/types" +) + +var fileAccessExclusions = []string{ + "/dev/tty", + "/dev/console", +} + +func runtimeHandler(w http.ResponseWriter, r *http.Request) { + log.Println("Retrieving Runtime Information") + w.Header().Set("Content-Type", "application/json") + + filePaths := make([]string, len(types.MustFiles)+len(types.ShouldFiles)) + i := 0 + for key := range types.MustFiles { + filePaths[i] = key + i++ + } + for key := range types.ShouldFiles { + filePaths[i] = key + i++ + } + + k := &types.RuntimeInfo{ + Request: requestInfo(r), + Host: &types.HostInfo{EnvVars: env(), + Files: fileInfo(filePaths...), + FileAccess: fileAccessAttempt(excludeFilePaths(filePaths, fileAccessExclusions)...), + Cgroups: cgroups(cgroupPaths...), + Mounts: mounts(), + Stdin: stdin(), + User: userInfo(), + Args: args(), + }, + } + + writeJSON(w, k) +} + +func excludeFilePaths(filePaths []string, excludedPaths []string) []string { + var paths []string + for _, path := range filePaths { + excluded := false + for _, excludedPath := range excludedPaths { + if path == excludedPath { + excluded = true + break + } + } + + if !excluded { + paths = append(paths, path) + } + } + return paths +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/user.go b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/user.go new file mode 100644 index 0000000000..96e79b4796 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/handlers/user.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "os" + + "knative.dev/serving/test/types" +) + +func userInfo() *types.UserInfo { + cwd, err := os.Getwd() + cwdInfo := &types.Cwd{ + Directory: cwd, + } + if err != nil { + cwdInfo.Error = err.Error() + } + + return &types.UserInfo{ + UID: os.Getuid(), + EUID: os.Geteuid(), + GID: os.Getgid(), + EGID: os.Getegid(), + Cwd: cwdInfo, + } +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/main.go b/test/vendor/knative.dev/serving/test/test_images/runtime/main.go new file mode 100644 index 0000000000..d231e82a73 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/main.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "log" + "net/http" + "os" + + "knative.dev/serving/test" + "knative.dev/serving/test/test_images/runtime/handlers" +) + +func main() { + // We expect PORT to be defined in a Knative environment + // and don't want to mask this failure in a test image. + port, isSet := os.LookupEnv("PORT") + if !isSet { + log.Fatal("Environment variable PORT is not set.") + } + + // This is an option for exec readiness probe test. + flag.Parse() + args := flag.Args() + if len(args) > 0 && args[0] == "probe" { + url := "http://localhost:" + port + if _, err := http.Get(url); err != nil { + log.Fatalf("Failed to probe %v", err) + } + return + } + + mux := http.NewServeMux() + handlers.InitHandlers(mux) + + log.Printf("Server starting on port %s", port) + test.ListenAndServeGracefullyWithHandler(":"+port, mux) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/runtime/service.yaml b/test/vendor/knative.dev/serving/test/test_images/runtime/service.yaml new file mode 100644 index 0000000000..48fa15374f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/runtime/service.yaml @@ -0,0 +1,12 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: runtime-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/runtime + securityContext: + runAsUser: 65532 diff --git a/test/vendor/knative.dev/serving/test/test_images/singlethreaded/README.md b/test/vendor/knative.dev/serving/test/test_images/singlethreaded/README.md new file mode 100644 index 0000000000..f6404699f9 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/singlethreaded/README.md @@ -0,0 +1,21 @@ +# Conformance test image (single threaded) + +This directory contains a test image used in the conformance tests. + +The images contain a webserver that will by default listens on port `8080` and +expose a service at `/`. + +When called, the server sleeps a short period and returns a 200 status if no +other request is running in the container concurrently. If it does detect +concurrent requests, it instead returns a 500 status. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/singlethreaded/main.go b/test/vendor/knative.dev/serving/test/test_images/singlethreaded/main.go new file mode 100644 index 0000000000..34d1251b36 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/singlethreaded/main.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The singlethreaded program +package main + +import ( + "flag" + "fmt" + "net/http" + "sync/atomic" + "time" + + "knative.dev/serving/test" +) + +var ( + lockedFlag uint32 +) + +func handler(w http.ResponseWriter, r *http.Request) { + // Use an atomic int as a simple boolean flag we can check safely + if !atomic.CompareAndSwapUint32(&lockedFlag, 0, 1) { + // Return HTTP 500 if more than 1 request at a time gets in + w.WriteHeader(http.StatusInternalServerError) + return + } + defer atomic.StoreUint32(&lockedFlag, 0) + + time.Sleep(500 * time.Millisecond) + fmt.Fprintf(w, "One at a time") +} + +func main() { + flag.Parse() + + test.ListenAndServeGracefully(":8080", handler) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/singlethreaded/service.yaml b/test/vendor/knative.dev/serving/test/test_images/singlethreaded/service.yaml new file mode 100644 index 0000000000..79151344ca --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/singlethreaded/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: singlethreaded-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/singlethreaded diff --git a/test/vendor/knative.dev/serving/test/test_images/timeout/README.md b/test/vendor/knative.dev/serving/test/test_images/timeout/README.md new file mode 100644 index 0000000000..441dd73eb8 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/timeout/README.md @@ -0,0 +1,19 @@ +# Timeout test image + +The image contains a simple Go webserver, `timeout.go`, that will, by default, +listen on port `8080` and expose a service at `/`. + +When called, the server sleeps for the amount of milliseconds passed in via the +query parameter `timeout` and responds with "Slept for X milliseconds` where X +is the amount of milliseconds passed in. + +## Trying out + +To run the image as a Service outisde of the test suite: + +`ko apply -f service.yaml` + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/timeout/service.yaml b/test/vendor/knative.dev/serving/test/test_images/timeout/service.yaml new file mode 100644 index 0000000000..d72596514a --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/timeout/service.yaml @@ -0,0 +1,10 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: timeout-test-image + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/timeout diff --git a/test/vendor/knative.dev/serving/test/test_images/timeout/timeout.go b/test/vendor/knative.dev/serving/test/test_images/timeout/timeout.go new file mode 100644 index 0000000000..7ccfb95c69 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/timeout/timeout.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "net/http" + "os" + "strconv" + "time" + + "knative.dev/serving/pkg/network" + "knative.dev/serving/test" +) + +func handler(w http.ResponseWriter, r *http.Request) { + // Sleep for a set amount of time before sending headers + if initialTimeout := r.URL.Query().Get("initialTimeout"); initialTimeout != "" { + parsed, _ := strconv.Atoi(initialTimeout) + time.Sleep(time.Duration(parsed) * time.Millisecond) + } + + w.WriteHeader(http.StatusOK) + + // Explicitly flush the already written data to trigger (or not) + // the time-to-first-byte timeout. + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + // Sleep for a set amount of time before sending response + timeout, _ := strconv.Atoi(r.URL.Query().Get("timeout")) + time.Sleep(time.Duration(timeout) * time.Millisecond) + + fmt.Fprintf(w, "Slept for %d milliseconds", timeout) +} + +func main() { + h := network.NewProbeHandler(http.HandlerFunc(handler)) + test.ListenAndServeGracefully(":"+os.Getenv("PORT"), h.ServeHTTP) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/wsserver/README.md b/test/vendor/knative.dev/serving/test/test_images/wsserver/README.md new file mode 100644 index 0000000000..897dc5edbb --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/wsserver/README.md @@ -0,0 +1,11 @@ +# Echo WebSocket test image + +A simple WebSocket server adapted from +https://github.com/gorilla/WebSocket/blob/master/examples/echo/server.go . The +server simply echoes messages sent to it. We use this server in testing that all +our proxies on request path can handle WebSocket upgrades. + +## Building + +For details about building and adding new images, see the +[section about test images](/test/README.md#test-images). diff --git a/test/vendor/knative.dev/serving/test/test_images/wsserver/echo.go b/test/vendor/knative.dev/serving/test/test_images/wsserver/echo.go new file mode 100644 index 0000000000..6df981168e --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/wsserver/echo.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "flag" + "log" + "net/http" + "os" + + "github.com/gorilla/websocket" + "knative.dev/serving/pkg/network" + "knative.dev/serving/test" +) + +const suffixMessageEnv = "SUFFIX" + +// Gets the message suffix from envvar. Empty by default. +func messageSuffix() string { + value := os.Getenv(suffixMessageEnv) + if value == "" { + return "" + } + return value +} + +var upgrader = websocket.Upgrader{ + // Allow any origin, since we are spoofing requests anyway. + CheckOrigin: func(r *http.Request) bool { + return true + }, +} + +func handler(w http.ResponseWriter, r *http.Request) { + if network.IsKubeletProbe(r) { + w.WriteHeader(http.StatusOK) + return + } + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Error upgrading websocket:", err) + return + } + defer conn.Close() + log.Println("Connection upgraded to WebSocket. Entering receive loop.") + for { + messageType, message, err := conn.ReadMessage() + if err != nil { + // We close abnormally, because we're just closing the connection in the client, + // which is okay. There's no value delaying closure of the connection unnecessarily. + if websocket.IsCloseError(err, websocket.CloseAbnormalClosure) { + log.Println("Client disconnected.") + } else { + log.Println("Handler exiting on error:", err) + } + return + } + if suffix := messageSuffix(); suffix != "" { + respMes := string(message) + " " + suffix + message = []byte(respMes) + } + + log.Printf("Successfully received: %q", message) + if err = conn.WriteMessage(messageType, message); err != nil { + log.Println("Failed to write message:", err) + return + } + log.Printf("Successfully wrote: %q", message) + } +} + +func main() { + flag.Parse() + log.SetFlags(0) + h := network.NewProbeHandler(http.HandlerFunc(handler)) + test.ListenAndServeGracefully(":"+os.Getenv("PORT"), h.ServeHTTP) +} diff --git a/test/vendor/knative.dev/serving/test/test_images/wsserver/service.yaml b/test/vendor/knative.dev/serving/test/test_images/wsserver/service.yaml new file mode 100644 index 0000000000..9559e59864 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/test_images/wsserver/service.yaml @@ -0,0 +1,23 @@ +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: websocket-server + namespace: default +spec: + template: + spec: + containers: + - image: knative.dev/serving/test/test_images/wsserver diff --git a/test/vendor/knative.dev/serving/test/types/runtime.go b/test/vendor/knative.dev/serving/test/types/runtime.go new file mode 100644 index 0000000000..c8dd7c242e --- /dev/null +++ b/test/vendor/knative.dev/serving/test/types/runtime.go @@ -0,0 +1,228 @@ +/* +Copyright 2019 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "net/http" + "time" + + "knative.dev/pkg/ptr" +) + +// MustEnvVars defines environment variables that "MUST" be set. +// The value provided is an example value. +var MustEnvVars = map[string]string{ + "PORT": "8801", +} + +// ShouldEnvVars defines environment variables that "SHOULD" be set. +// To match these values with test service parameters, +// map values must represent corresponding test.ResourceNames fields +var ShouldEnvVars = map[string]string{ + "K_SERVICE": "Service", + "K_CONFIGURATION": "Config", + "K_REVISION": "Revision", +} + +// MustFiles specifies the file paths and expected permissions that MUST be set as specified in the runtime contract. +// See https://golang.org/pkg/os/#FileMode for "Mode" string meaning. '*' indicates no specification. +var MustFiles = map[string]FileInfo{ + "/dev/fd": { + IsDir: ptr.Bool(true), + SourceFile: "/proc/self/fd", + }, + "/dev/full": { + IsDir: ptr.Bool(false), + }, + "/dev/null": { + IsDir: ptr.Bool(false), + }, + "/dev/ptmx": { + IsDir: ptr.Bool(false), + }, + "/dev/random": { + IsDir: ptr.Bool(false), + }, + "/dev/stdin": { + IsDir: ptr.Bool(false), + SourceFile: "/proc/self/fd/0", + }, + "/dev/stdout": { + IsDir: ptr.Bool(false), + SourceFile: "/proc/self/fd/1", + }, + "/dev/stderr": { + IsDir: ptr.Bool(false), + SourceFile: "/proc/self/fd/2", + }, + "/dev/tty": { + IsDir: ptr.Bool(false), + }, + "/dev/urandom": { + IsDir: ptr.Bool(false), + }, + "/dev/zero": { + IsDir: ptr.Bool(false), + }, + "/proc/self/fd": { + IsDir: ptr.Bool(true), + }, + "/proc/self/fd/0": { + IsDir: ptr.Bool(false), + }, + "/proc/self/fd/1": { + IsDir: ptr.Bool(false), + }, + "/proc/self/fd/2": { + IsDir: ptr.Bool(false), + }, + "/tmp": { + IsDir: ptr.Bool(true), + Perm: "rwxrwxrwx", + }, + "/var/log": { + IsDir: ptr.Bool(true), + Perm: "rwxrwxrwx", + }, +} + +// ShouldFiles specifies the file paths and expected permissions that SHOULD be set as specified in the runtime contract. +// See https://golang.org/pkg/os/#FileMode for "Mode" string meaning. '*' indicates no specification. +var ShouldFiles = map[string]FileInfo{ + "/etc/resolv.conf": { + IsDir: ptr.Bool(false), + Perm: "rw*r**r**", + }, + "/dev/console": { // This file SHOULD NOT exist. + Error: "stat /dev/console: no such file or directory", + }, +} + +// RuntimeInfo encapsulates both the host and request information. +type RuntimeInfo struct { + // Request is information about the request. + Request *RequestInfo `json:"request"` + // Host is a set of host information. + Host *HostInfo `json:"host"` +} + +// RequestInfo encapsulates information about the request. +type RequestInfo struct { + // Ts is the timestamp of when the request came in from the system time. + Ts time.Time `json:"ts"` + // URI is the request-target of the Request-Line. + URI string `json:"uri"` + // Host is the hostname on which the URL is sought. + Host string `json:"host"` + // Method is the method used for the request. + Method string `json:"method"` + // Headers is a Map of all headers set. + Headers http.Header `json:"headers"` + // ProtoMajor is the major version of the incoming protocol. + ProtoMajor int `json:"protoMajor"` + // ProtoMinor is the minor version of the incoming protocol. + ProtoMinor int `json:"protoMinor"` +} + +// HostInfo contains information about the host environment. +type HostInfo struct { + // Files is a map of file metadata. + Files map[string]FileInfo `json:"files"` + // EnvVars is a map of all environment variables set. + EnvVars map[string]string `json:"envs"` + // FileAccess is a map of file access information + FileAccess map[string]FileAccessInfo `json:"fileaccess"` + // Cgroups is a list of cgroup information. + Cgroups []*Cgroup `json:"cgroups"` + // Mounts is a list of mounted volume information, or error. + Mounts []*Mount `json:"mounts"` + Stdin *Stdin `json:"stdin"` + User *UserInfo `json:"user"` + Args []string `json:"args"` +} + +// Stdin contains information about the Stdin file descriptor for the container. +type Stdin struct { + // EOF is true if the first byte read from stdin results in EOF. + EOF *bool `json:"eof,omitempty"` + // Error is the String representation of an error probing sdtin. + Error string `json:"error,omitempty"` +} + +// UserInfo container information about the current user and group for the running process. +type UserInfo struct { + UID int `json:"uid"` + EUID int `json:"euid"` + GID int `json:"gid"` + EGID int `json:"egid"` + Cwd *Cwd `json:"cwd"` +} + +type Cwd struct { + Directory string `json:"directory"` + Error string `json:"error"` +} + +// FileInfo contains the metadata for a given file. +type FileInfo struct { + // Size is the length in bytes for regular files; system-dependent for others. + Size *int64 `json:"size,omitempty"` + // Perm are the unix permission bits. + Perm string `json:"mode,omitempty"` + // ModTime is the file last modified time. + ModTime time.Time `json:"modTime,omitempty"` + // SourceFile is populated if this file is a symlink. The SourceFile is the file where + // the symlink resolves. + SourceFile string `json:"sourceFile,omitempty"` + // IsDir is true if the file is a directory. + IsDir *bool `json:"isDir,omitempty"` + // Error is the String representation of the error returned obtaining the information. + Error string `json:"error,omitempty"` +} + +// FileAccessInfo contains the file access information +type FileAccessInfo struct { + // ReadErr is the String representation of an error received when attempting to read + // a file or directory + ReadErr string `json:"read_error,omitempty"` + // WriteErr is the String representation of an error received when attempting to write + // to a file or directory + WriteErr string `json:"write_error,omitempty"` +} + +// Cgroup contains the Cgroup value for a given setting. +type Cgroup struct { + // Name is the full path name of the cgroup. + Name string `json:"name"` + // Value is the integer files in the cgroup file. + Value *int `json:"value,omitempty"` + // ReadOnly is true if the cgroup was not writable. + ReadOnly *bool `json:"readOnly,omitempty"` + // Error is the String representation of the error returned obtaining the information. + Error string `json:"error,omitempty"` +} + +// Mount contains information about a given mount. +type Mount struct { + // Device is the device that is mounted + Device string `json:"device,omitempty"` + // Path is the location where the volume is mounted + Path string `json:"path,omitempty"` + // Type is the filesystem type (i.e. sysfs, proc, tmpfs, ext4, overlay, etc.) + Type string `json:"type,omitempty"` + // Options is the mount options set (i.e. rw, nosuid, relatime, etc.) + Options []string `json:"options,omitempty"` + // Error is the String representation of the error returned obtaining the information. + Error string `json:"error,omitempty"` +} diff --git a/test/vendor/knative.dev/serving/test/upgrade/README.md b/test/vendor/knative.dev/serving/test/upgrade/README.md new file mode 100644 index 0000000000..ac18e443c5 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upgrade/README.md @@ -0,0 +1,64 @@ +# Upgrade Tests + +In order to get coverage for the upgrade process from an operator’s perspective, +we need an additional suite of tests that perform a complete knative upgrade. +Running these tests on every commit will ensure that we don’t introduce any +non-upgradeable changes, so every commit should be releasable. + +This is inspired by kubernetes +[upgrade testing](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-tests.md#version-skewed-and-upgrade-testing). + +These tests are a pretty big hammer in that they cover more than just version +changes, but it’s one of the only ways to make sure we don’t accidentally make +breaking changes for now. + +## Flow + +We’d like to validate that the upgrade doesn’t break any resources (they still +respond to requests) and doesn't break our installation (we can still update +resources). + +At a high level, we want to do this: + +1. Install the latest knative release. +1. Create some resources. +1. Install knative at HEAD. +1. Test those resources, verify that we didn’t break anything. + +To achieve that, we just have three separate build tags: + +1. Install the latest release from GitHub. +1. Run the `preupgrade` tests in this directory. +1. Install at HEAD (`ko apply -f config/`). +1. Run the `postupgrade` tests in this directory. +1. Install the latest release from GitHub. +1. Run the `postdowngrade` tests in this directory. + +## Tests + +### Service test + +This was stolen from the conformance tests but stripped down to check fewer +things. + +#### preupgrade + +Create a RunLatest Service pointing to `image1`, ensure it responds correctly. + +#### postupgrade + +Ensure the Service still responds correctly after upgrading. Update it to point +to `image2`, ensure it responds correctly. + +#### postdowngrade + +Ensure the Service still responds correctly after downgrading. Update it to +point back to `image1`, ensure it responds correctly. + +### Probe test + +In order to verify that we don't have data-plane unavailability during our +control-plane outages (when we're upgrading the knative/serving installation), +we run a prober test that continually sends requests to a service during the +entire upgrade process. When the upgrade completes, we make sure that none of +those requests failed. diff --git a/test/vendor/knative.dev/serving/test/upgrade/probe_test.go b/test/vendor/knative.dev/serving/test/upgrade/probe_test.go new file mode 100644 index 0000000000..cb55c5c1d3 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upgrade/probe_test.go @@ -0,0 +1,71 @@ +// +build probe + +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrade + +import ( + "io/ioutil" + "log" + "os" + "syscall" + "testing" + + "knative.dev/serving/test" + "knative.dev/serving/test/e2e" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +const pipe = "/tmp/prober-signal" + +func TestProbe(t *testing.T) { + // We run the prober as a golang test because it fits in nicely with + // the rest of our integration tests, and AssertProberDefault needs + // a *testing.T. Unfortunately, "go test" intercepts signals, so we + // can't coordinate with the test by just sending e.g. SIGCONT, so we + // create a named pipe and wait for the upgrade script to write to it + // to signal that we should stop probing. + if err := syscall.Mkfifo(pipe, 0666); err != nil { + t.Fatalf("Failed to create pipe: %v", err) + } + defer os.Remove(pipe) + + clients := e2e.Setup(t) + names := test.ResourceNames{ + Service: "upgrade-probe", + Image: test.PizzaPlanet1, + } + defer test.TearDown(clients, names) + + objects, err := v1a1test.CreateRunLatestServiceLegacyReady(t, clients, &names) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + url := objects.Service.Status.URL.URL() + + // This polls until we get a 200 with the right body. + assertServiceResourcesUpdated(t, clients, names, url, test.PizzaPlanetText1) + + // Use log.Printf instead of t.Logf because we want to see failures + // inline with other logs instead of buffered until the end. + prober := test.RunRouteProber(log.Printf, clients, url) + defer test.AssertProberDefault(t, prober) + + // e2e-upgrade-test.sh will close this pipe to signal the upgrade is + // over, at which point we will finish the test and check the prober. + _, _ = ioutil.ReadFile(pipe) +} diff --git a/test/vendor/knative.dev/serving/test/upgrade/service_postdowngrade_test.go b/test/vendor/knative.dev/serving/test/upgrade/service_postdowngrade_test.go new file mode 100644 index 0000000000..00aa524b67 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upgrade/service_postdowngrade_test.go @@ -0,0 +1,72 @@ +// +build postdowngrade + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrade + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + _ "knative.dev/pkg/system/testing" + ptest "knative.dev/pkg/test" + + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + "knative.dev/serving/test/e2e" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestRunLatestServicePostDowngrade(t *testing.T) { + clients := e2e.Setup(t) + + var names test.ResourceNames + names.Service = serviceName + + t.Logf("Getting service %q", names.Service) + svc, err := clients.ServingAlphaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Service: %v", err) + } + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + names.Revision = svc.Status.LatestCreatedRevisionName + + url := svc.Status.URL + + t.Log("Check that we can hit the old service and get the old response.") + assertServiceResourcesUpdated(t, clients, names, url.URL(), "Re-energize yourself with a slice of pepperoni!") + + t.Log("Updating the Service to use a different image") + newImage := ptest.ImagePath(test.PizzaPlanet1) + if _, err := v1a1test.PatchServiceImage(t, clients, svc, newImage); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, newImage, err) + } + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + revisionName, err := v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision for image %s: %v", names.Service, test.PizzaPlanet1, err) + } + names.Revision = revisionName + + t.Log("When the Service reports as Ready, everything should be ready.") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic to Revision %s: %v", names.Service, names.Revision, err) + } + assertServiceResourcesUpdated(t, clients, names, url.URL(), "What a spaceport!") +} diff --git a/test/vendor/knative.dev/serving/test/upgrade/service_postupgrade_test.go b/test/vendor/knative.dev/serving/test/upgrade/service_postupgrade_test.go new file mode 100644 index 0000000000..cc83f57873 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upgrade/service_postupgrade_test.go @@ -0,0 +1,104 @@ +// +build postupgrade + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrade + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" + ptest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + "knative.dev/serving/test" + "knative.dev/serving/test/e2e" + v1test "knative.dev/serving/test/v1" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestRunLatestServicePostUpgrade(t *testing.T) { + t.Parallel() + updateService(serviceName, t) +} + +func TestRunLatestServicePostUpgradeFromScaleToZero(t *testing.T) { + t.Parallel() + updateService(scaleToZeroServiceName, t) +} + +// TestBYORevisionPostUpgrade attempts to update the RouteSpec of a Service using BYO Revision name. This +// test is meant to catch new defaults that break the immutability of BYO Revision name. +func TestBYORevisionPostUpgrade(t *testing.T) { + t.Parallel() + clients := e2e.Setup(t) + names := test.ResourceNames{ + Service: byoServiceName, + } + + if _, err := v1test.UpdateServiceRouteSpec(t, clients, names, v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: "example-tag", + RevisionName: byoRevName, + Percent: ptr.Int64(100), + }}, + }); err != nil { + t.Fatalf("Failed to update Service: %v", err) + } +} + +func updateService(serviceName string, t *testing.T) { + t.Helper() + clients := e2e.Setup(t) + names := test.ResourceNames{ + Service: serviceName, + } + + t.Logf("Getting service %q", names.Service) + svc, err := clients.ServingAlphaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get Service: %v", err) + } + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + names.Revision = svc.Status.LatestCreatedRevisionName + + routeURL := svc.Status.URL.URL() + + t.Log("Check that we can hit the old service and get the old response.") + assertServiceResourcesUpdated(t, clients, names, routeURL, test.PizzaPlanetText1) + + t.Log("Updating the Service to use a different image") + newImage := ptest.ImagePath(test.PizzaPlanet2) + if _, err := v1a1test.PatchServiceImage(t, clients, svc, newImage); err != nil { + t.Fatalf("Patch update for Service %s with new image %s failed: %v", names.Service, newImage, err) + } + + t.Log("Since the Service was updated a new Revision will be created and the Service will be updated") + revisionName, err := v1a1test.WaitForServiceLatestRevision(clients, names) + if err != nil { + t.Fatalf("Service %s was not updated with the Revision for image %s: %v", names.Service, test.PizzaPlanet2, err) + } + names.Revision = revisionName + + t.Log("When the Service reports as Ready, everything should be ready.") + if err := v1a1test.WaitForServiceState(clients.ServingAlphaClient, names.Service, v1a1test.IsServiceReady, "ServiceIsReady"); err != nil { + t.Fatalf("The Service %s was not marked as Ready to serve traffic to Revision %s: %v", names.Service, names.Revision, err) + } + assertServiceResourcesUpdated(t, clients, names, routeURL, test.PizzaPlanetText2) +} diff --git a/test/vendor/knative.dev/serving/test/upgrade/service_preupgrade_test.go b/test/vendor/knative.dev/serving/test/upgrade/service_preupgrade_test.go new file mode 100644 index 0000000000..b8b6f8756c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upgrade/service_preupgrade_test.go @@ -0,0 +1,95 @@ +// +build preupgrade + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrade + +import ( + "testing" + + "knative.dev/serving/pkg/apis/autoscaling" + v1 "knative.dev/serving/pkg/apis/serving/v1" + revisionresourcenames "knative.dev/serving/pkg/reconciler/revision/resources/names" + rtesting "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" + "knative.dev/serving/test/e2e" + v1test "knative.dev/serving/test/v1" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +func TestRunLatestServicePreUpgrade(t *testing.T) { + t.Parallel() + clients := e2e.Setup(t) + + var names test.ResourceNames + names.Service = serviceName + names.Image = test.PizzaPlanet1 + + resources, err := v1a1test.CreateRunLatestServiceLegacyReady(t, clients, &names, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.MinScaleAnnotationKey: "1", //make sure we don't scale to zero during the test + }), + ) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + url := resources.Service.Status.URL.URL() + assertServiceResourcesUpdated(t, clients, names, url, test.PizzaPlanetText1) +} + +func TestRunLatestServicePreUpgradeAndScaleToZero(t *testing.T) { + t.Parallel() + clients := e2e.Setup(t) + + var names test.ResourceNames + names.Service = scaleToZeroServiceName + names.Image = test.PizzaPlanet1 + + resources, err := v1a1test.CreateRunLatestServiceLegacyReady(t, clients, &names, + rtesting.WithConfigAnnotations(map[string]string{ + autoscaling.WindowAnnotationKey: autoscaling.WindowMin.String(), //make sure we scale to zero quickly + }), + ) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } + url := resources.Service.Status.URL.URL() + assertServiceResourcesUpdated(t, clients, names, url, test.PizzaPlanetText1) + + if err := e2e.WaitForScaleToZero(t, revisionresourcenames.Deployment(resources.Revision), clients); err != nil { + t.Fatalf("Could not scale to zero: %v", err) + } +} + +// TestBYORevisionUpgrade creates a Service that uses the BYO Revision name functionality. This test +// is meant to catch new defaults that break bring your own revision name immutability. +func TestBYORevisionPreUpgrade(t *testing.T) { + t.Parallel() + clients := e2e.Setup(t) + names := test.ResourceNames{ + Service: byoServiceName, + Image: test.PizzaPlanet1, + } + + _, err := v1test.CreateServiceReady(t, clients, &names, + func(svc *v1.Service) { + svc.Spec.ConfigurationSpec.Template.Name = byoRevName + }) + if err != nil { + t.Fatalf("Failed to create Service: %v", err) + } +} diff --git a/test/vendor/knative.dev/serving/test/upgrade/upgrade.go b/test/vendor/knative.dev/serving/test/upgrade/upgrade.go new file mode 100644 index 0000000000..e02f64b3c7 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upgrade/upgrade.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrade + +import ( + "fmt" + "net/url" + + // Mysteriously required to support GCP auth (required by k8s libs). + // Apparently just importing it is enough. @_@ side effects @_@. + // https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/test" + v1a1test "knative.dev/serving/test/v1alpha1" +) + +const ( + // These service names need to be stable, since we use them across + // multiple "go test" invocations. + serviceName = "pizzaplanet-upgrade-service" + scaleToZeroServiceName = "scale-to-zero-upgrade-service" + byoServiceName = "byo-revision-name-upgrade-test" + byoRevName = byoServiceName + "-" + "rev1" +) + +// Shamelessly cribbed from conformance/service_test. +func assertServiceResourcesUpdated(t pkgTest.TLegacy, clients *test.Clients, names test.ResourceNames, url *url.URL, expectedText string) { + t.Helper() + // TODO(#1178): Remove "Wait" from all checks below this point. + _, err := pkgTest.WaitForEndpointState( + clients.KubeClient, + t.Logf, + url, + v1a1test.RetryingRouteInconsistency(pkgTest.MatchesAllOf(pkgTest.IsStatusOK, pkgTest.EventuallyMatchesBody(expectedText))), + "WaitForEndpointToServeText", + test.ServingFlags.ResolvableDomain) + if err != nil { + t.Fatal(fmt.Sprintf("The endpoint for Route %s at %s didn't serve the expected text %q: %v", names.Route, url, expectedText, err)) + } +} diff --git a/test/vendor/knative.dev/serving/test/upload-test-images.sh b/test/vendor/knative.dev/serving/test/upload-test-images.sh new file mode 100755 index 0000000000..d02c76f9d0 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/upload-test-images.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit + +function upload_test_images() { + echo ">> Publishing test images" + # Script needs to be executed from the root directory + # to pickup .ko.yaml + cd "$( dirname "$0")/.." + local image_dir="test/test_images" + local docker_tag=$1 + local tag_option="" + if [ -n "${docker_tag}" ]; then + tag_option="--tags $docker_tag,latest" + fi + + # ko resolve is being used for the side-effect of publishing images, + # so the resulting yaml produced is ignored. + ko resolve ${tag_option} -RBf "${image_dir}" > /dev/null +} + +: ${KO_DOCKER_REPO:?"You must set 'KO_DOCKER_REPO', see DEVELOPMENT.md"} + +upload_test_images $@ diff --git a/test/vendor/knative.dev/serving/test/util.go b/test/vendor/knative.dev/serving/test/util.go new file mode 100644 index 0000000000..71ecd86697 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/util.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Knative Authors + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "net/http" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + "knative.dev/pkg/apis/duck" + "knative.dev/pkg/signals" +) + +const ( + // PollInterval is how frequently e2e tests will poll for updates. + PollInterval = 1 * time.Second + // PollTimeout is how long e2e tests will wait for resource updates when polling. + PollTimeout = 10 * time.Minute + + // HelloVolumePath is the path to the test volume. + HelloVolumePath = "/hello/world" +) + +// util.go provides shared utilities methods across knative serving test + +// ListenAndServeGracefully calls into ListenAndServeGracefullyWithPattern +// by passing handler to handle requests for "/" +func ListenAndServeGracefully(addr string, handler func(w http.ResponseWriter, r *http.Request)) { + ListenAndServeGracefullyWithHandler(addr, http.HandlerFunc(handler)) +} + +// ListenAndServeGracefullyWithPattern creates an HTTP server, listens on the defined address +// and handles incoming requests with the given handler. +// It blocks until SIGTERM is received and the underlying server has shutdown gracefully. +func ListenAndServeGracefullyWithHandler(addr string, handler http.Handler) { + server := http.Server{Addr: addr, Handler: h2c.NewHandler(handler, &http2.Server{})} + go server.ListenAndServe() + + <-signals.SetupSignalHandler() + server.Shutdown(context.Background()) +} + +// TODO(dangerd): Remove this and use duck.CreateBytePatch after release-0.9 +// CreateBytePatch is a helper function that creates the same content as +// CreatePatch, but returns in []byte format instead of JSONPatch. +func CreateBytePatch(before, after interface{}) ([]byte, error) { + patch, err := duck.CreatePatch(before, after) + if err != nil { + return nil, err + } + return patch.MarshalJSON() +} diff --git a/test/vendor/knative.dev/serving/test/v1/configuration.go b/test/vendor/knative.dev/serving/test/v1/configuration.go new file mode 100644 index 0000000000..859edec3ce --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1/configuration.go @@ -0,0 +1,158 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/test/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + pkgTest "knative.dev/pkg/test" + rtesting "knative.dev/serving/pkg/testing/v1" + "knative.dev/serving/test" +) + +// CreateConfiguration create a configuration resource in namespace with the name names.Config +// that uses the image specified by names.Image. +func CreateConfiguration(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.ConfigOption) (*v1.Configuration, error) { + config := Configuration(names, fopt...) + LogResourceObject(t, ResourceObjects{Config: config}) + return clients.ServingClient.Configs.Create(config) +} + +// PatchConfig patches the existing configuration passed in with the applied mutations. +// Returns the latest configuration object +func PatchConfig(t pkgTest.T, clients *test.Clients, svc *v1.Configuration, fopt ...rtesting.ConfigOption) (*v1.Configuration, error) { + newSvc := svc.DeepCopy() + for _, opt := range fopt { + opt(newSvc) + } + LogResourceObject(t, ResourceObjects{Config: newSvc}) + patchBytes, err := test.CreateBytePatch(svc, newSvc) + if err != nil { + return nil, err + } + return clients.ServingClient.Configs.Patch(svc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// WaitForConfigLatestRevision takes a revision in through names and compares it to the current state of LatestCreatedRevisionName in Configuration. +// Once an update is detected in the LatestCreatedRevisionName, the function waits for the created revision to be set in LatestReadyRevisionName +// before returning the name of the revision. +func WaitForConfigLatestRevision(clients *test.Clients, names test.ResourceNames) (string, error) { + var revisionName string + err := WaitForConfigurationState(clients.ServingClient, names.Config, func(c *v1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil + }, "ConfigurationUpdatedWithRevision") + if err != nil { + return "", err + } + err = WaitForConfigurationState(clients.ServingClient, names.Config, func(c *v1.Configuration) (bool, error) { + return (c.Status.LatestReadyRevisionName == revisionName), nil + }, "ConfigurationReadyWithRevision") + + return revisionName, err +} + +// ConfigurationSpec returns the spec of a configuration to be used throughout different +// CRD helpers. +func ConfigurationSpec(imagePath string) *v1.ConfigurationSpec { + return &v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: imagePath, + }}, + }, + }, + }, + } +} + +// Configuration returns a Configuration object in namespace with the name names.Config +// that uses the image specified by names.Image +func Configuration(names test.ResourceNames, fopt ...rtesting.ConfigOption) *v1.Configuration { + config := &v1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Config, + }, + Spec: *ConfigurationSpec(pkgTest.ImagePath(names.Image)), + } + + for _, opt := range fopt { + opt(config) + } + + return config +} + +// WaitForConfigurationState polls the status of the Configuration called name +// from client every PollInterval until inState returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForConfigurationState(client *test.ServingClients, name string, inState func(c *v1.Configuration) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForConfigurationState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1.Configuration + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("configuration %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckConfigurationState verifies the status of the Configuration called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForConfigurationState +func CheckConfigurationState(client *test.ServingClients, name string, inState func(r *v1.Configuration) (bool, error)) error { + c, err := client.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(c); err != nil { + return err + } else if !done { + return fmt.Errorf("configuration %q is not in desired state, got: %+v", name, c) + } + return nil +} + +// IsConfigurationReady will check the status conditions of the config and return true if the config is +// ready. This means it has at least created one revision and that has become ready. +func IsConfigurationReady(c *v1.Configuration) (bool, error) { + return c.Generation == c.Status.ObservedGeneration && c.Status.IsReady(), nil +} diff --git a/test/vendor/knative.dev/serving/test/v1/crd.go b/test/vendor/knative.dev/serving/test/v1/crd.go new file mode 100644 index 0000000000..908ab2daef --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1/crd.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/davecgh/go-spew/spew" + pkgTest "knative.dev/pkg/test" + v1 "knative.dev/serving/pkg/apis/serving/v1" +) + +// ResourceObjects holds types of the resource objects. +type ResourceObjects struct { + Route *v1.Route + Config *v1.Configuration + Service *v1.Service + Revision *v1.Revision +} + +// LogResourceObject logs the resource object with the resource name and value +func LogResourceObject(t pkgTest.T, value ResourceObjects) { + t.Log("", "resource", spew.Sprint(value)) +} diff --git a/test/vendor/knative.dev/serving/test/v1/revision.go b/test/vendor/knative.dev/serving/test/v1/revision.go new file mode 100644 index 0000000000..4a31c75ca2 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1/revision.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" + "knative.dev/serving/pkg/apis/serving" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/test" +) + +// WaitForRevisionState polls the status of the Revision called name +// from client every `PollInterval` until `inState` returns `true` indicating it +// is done, returns an error or timeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForRevisionState(client *test.ServingClients, name string, inState func(r *v1.Revision) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForRevision/%s/%s", name, desc)) + defer span.End() + + var lastState *v1.Revision + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Revisions.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("revision %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckRevisionState verifies the status of the Revision called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForRevisionState +func CheckRevisionState(client *test.ServingClients, name string, inState func(r *v1.Revision) (bool, error)) error { + r, err := client.Revisions.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(r); err != nil { + return err + } else if !done { + return fmt.Errorf("revision %q is not in desired state, got: %+v", name, r) + } + return nil +} + +// IsRevisionReady will check the status conditions of the revision and return true if the revision is +// ready to serve traffic. It will return false if the status indicates a state other than deploying +// or being ready. It will also return false if the type of the condition is unexpected. +func IsRevisionReady(r *v1.Revision) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} + +// IsRevisionPinned will check if the revision is pinned to a route. +func IsRevisionPinned(r *v1.Revision) (bool, error) { + _, pinned := r.Annotations[serving.RevisionLastPinnedAnnotationKey] + return pinned, nil +} + +// IsRevisionAtExpectedGeneration returns a function that will check if the annotations +// on the revision include an annotation for the generation and that the annotation is +// set to the expected value. +func IsRevisionAtExpectedGeneration(expectedGeneration string) func(r *v1.Revision) (bool, error) { + return func(r *v1.Revision) (bool, error) { + if a, ok := r.Labels[serving.ConfigurationGenerationLabelKey]; ok { + if a != expectedGeneration { + return true, fmt.Errorf("expected Revision %s to be labeled with generation %s but was %s instead", r.Name, expectedGeneration, a) + } + return true, nil + } + return true, fmt.Errorf("expected Revision %s to be labeled with generation %s but there was no label", r.Name, expectedGeneration) + } +} diff --git a/test/vendor/knative.dev/serving/test/v1/route.go b/test/vendor/knative.dev/serving/test/v1/route.go new file mode 100644 index 0000000000..353cbb3b40 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1/route.go @@ -0,0 +1,151 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "net/http" + + "github.com/davecgh/go-spew/spew" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/ptr" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" + v1 "knative.dev/serving/pkg/apis/serving/v1" + + pkgTest "knative.dev/pkg/test" + rtesting "knative.dev/serving/pkg/testing/v1" + "knative.dev/serving/test" +) + +// Route returns a Route object in namespace using the route and configuration +// names in names. +func Route(names test.ResourceNames, fopt ...rtesting.RouteOption) *v1.Route { + route := &v1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Route, + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: names.TrafficTarget, + ConfigurationName: names.Config, + Percent: ptr.Int64(100), + }}, + }, + } + + for _, opt := range fopt { + opt(route) + } + + return route +} + +// CreateRoute creates a route in the given namespace using the route name in names +func CreateRoute(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.RouteOption) (*v1.Route, error) { + route := Route(names, fopt...) + LogResourceObject(t, ResourceObjects{Route: route}) + return clients.ServingClient.Routes.Create(route) +} + +// WaitForRouteState polls the status of the Route called name from client every +// PollInterval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForRouteState(client *test.ServingClients, name string, inState func(r *v1.Route) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForRouteState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1.Route + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Routes.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("route %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckRouteState verifies the status of the Route called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForRouteState +func CheckRouteState(client *test.ServingClients, name string, inState func(r *v1.Route) (bool, error)) error { + r, err := client.Routes.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(r); err != nil { + return err + } else if !done { + return fmt.Errorf("route %q is not in desired state, got: %+v", name, r) + } + return nil +} + +// IsRouteReady will check the status conditions of the route and return true if the route is +// ready. +func IsRouteReady(r *v1.Route) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} + +// IsRouteNotReady will check the status conditions of the route and return true if the route is +// not ready. +func IsRouteNotReady(r *v1.Route) (bool, error) { + return !r.Status.IsReady(), nil +} + +// RetryingRouteInconsistency retries common requests seen when creating a new route +// - 404 until the route is propagated to the proxy +// - 503 to account for Openshift route inconsistency (https://jira.coreos.com/browse/SRVKS-157) +func RetryingRouteInconsistency(innerCheck spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusServiceUnavailable { + return false, nil + } + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return innerCheck(resp) + } +} + +// AllRouteTrafficAtRevision will check the revision that route r is routing +// traffic to and return true if 100% of the traffic is routing to revisionName. +func AllRouteTrafficAtRevision(names test.ResourceNames) func(r *v1.Route) (bool, error) { + return func(r *v1.Route) (bool, error) { + for _, tt := range r.Status.Traffic { + if tt.Percent != nil && *tt.Percent == 100 { + if tt.RevisionName != names.Revision { + return true, fmt.Errorf("expected traffic revision name to be %s but actually is %s: %s", names.Revision, tt.RevisionName, spew.Sprint(r)) + } + + if tt.Tag != names.TrafficTarget { + return true, fmt.Errorf("expected traffic target name to be %s but actually is %s: %s", names.TrafficTarget, tt.Tag, spew.Sprint(r)) + } + + return true, nil + } + } + return false, nil + } +} diff --git a/test/vendor/knative.dev/serving/test/v1/service.go b/test/vendor/knative.dev/serving/test/v1/service.go new file mode 100644 index 0000000000..1477056303 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1/service.go @@ -0,0 +1,256 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mattbaird/jsonpatch" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + rtesting "knative.dev/serving/pkg/testing/v1" + "knative.dev/serving/test" +) + +func validateCreatedServiceStatus(clients *test.Clients, names *test.ResourceNames) error { + return CheckServiceState(clients.ServingClient, names.Service, func(s *v1.Service) (bool, error) { + if s.Status.URL == nil || s.Status.URL.Host == "" { + return false, fmt.Errorf("url is not present in Service status: %v", s) + } + names.URL = s.Status.URL.URL() + if s.Status.LatestCreatedRevisionName == "" { + return false, fmt.Errorf("lastCreatedRevision is not present in Service status: %v", s) + } + names.Revision = s.Status.LatestCreatedRevisionName + if s.Status.LatestReadyRevisionName == "" { + return false, fmt.Errorf("lastReadyRevision is not present in Service status: %v", s) + } + if s.Status.ObservedGeneration != 1 { + return false, fmt.Errorf("observedGeneration is not 1 in Service status: %v", s) + } + return true, nil + }) +} + +// GetResourceObjects obtains the services resources from the k8s API server. +func GetResourceObjects(clients *test.Clients, names test.ResourceNames) (*ResourceObjects, error) { + routeObject, err := clients.ServingClient.Routes.Get(names.Route, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + serviceObject, err := clients.ServingClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + configObject, err := clients.ServingClient.Configs.Get(names.Config, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + revisionObject, err := clients.ServingClient.Revisions.Get(names.Revision, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return &ResourceObjects{ + Route: routeObject, + Service: serviceObject, + Config: configObject, + Revision: revisionObject, + }, nil +} + +// CreateServiceReady creates a new Service in state 'Ready'. This function expects Service and Image name +// passed in through 'names'. Names is updated with the Route and Configuration created by the Service +// and ResourceObjects is returned with the Service, Route, and Configuration objects. +// Returns error if the service does not come up correctly. +func CreateServiceReady(t pkgTest.T, clients *test.Clients, names *test.ResourceNames, fopt ...rtesting.ServiceOption) (*ResourceObjects, error) { + if names.Image == "" { + return nil, fmt.Errorf("expected non-empty Image name; got Image=%v", names.Image) + } + + t.Log("Creating a new Service", "service", names.Service) + svc, err := CreateService(t, clients, *names, fopt...) + if err != nil { + return nil, err + } + + // Populate Route and Configuration Objects with name + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + // If the Service name was not specified, populate it + if names.Service == "" { + names.Service = svc.Name + } + + t.Log("Waiting for Service to transition to Ready.", "service", names.Service) + if err := WaitForServiceState(clients.ServingClient, names.Service, IsServiceReady, "ServiceIsReady"); err != nil { + return nil, err + } + + t.Log("Checking to ensure Service Status is populated for Ready service") + err = validateCreatedServiceStatus(clients, names) + if err != nil { + return nil, err + } + + t.Log("Getting latest objects Created by Service") + resources, err := GetResourceObjects(clients, *names) + if err == nil { + t.Log("Successfully created Service", names.Service) + } + return resources, err +} + +// CreateService creates a service in namespace with the name names.Service and names.Image +func CreateService(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.ServiceOption) (*v1.Service, error) { + service := Service(names, fopt...) + LogResourceObject(t, ResourceObjects{Service: service}) + svc, err := clients.ServingClient.Services.Create(service) + return svc, err +} + +// PatchService patches the existing service passed in with the applied mutations. +// Returns the latest service object +func PatchService(t pkgTest.T, clients *test.Clients, svc *v1.Service, fopt ...rtesting.ServiceOption) (*v1.Service, error) { + newSvc := svc.DeepCopy() + for _, opt := range fopt { + opt(newSvc) + } + LogResourceObject(t, ResourceObjects{Service: newSvc}) + patchBytes, err := test.CreateBytePatch(svc, newSvc) + if err != nil { + return nil, err + } + return clients.ServingClient.Services.Patch(svc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// UpdateServiceRouteSpec updates a service to use the route name in names. +func UpdateServiceRouteSpec(t pkgTest.T, clients *test.Clients, names test.ResourceNames, rs v1.RouteSpec) (*v1.Service, error) { + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/spec/traffic", + Value: rs.Traffic, + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + return nil, err + } + return clients.ServingClient.Services.Patch(names.Service, types.JSONPatchType, patchBytes, "") +} + +// WaitForServiceLatestRevision takes a revision in through names and compares it to the current state of LatestCreatedRevisionName in Service. +// Once an update is detected in the LatestCreatedRevisionName, the function waits for the created revision to be set in LatestReadyRevisionName +// before returning the name of the revision. +func WaitForServiceLatestRevision(clients *test.Clients, names test.ResourceNames) (string, error) { + var revisionName string + if err := WaitForServiceState(clients.ServingClient, names.Service, func(s *v1.Service) (bool, error) { + if s.Status.LatestCreatedRevisionName != names.Revision { + revisionName = s.Status.LatestCreatedRevisionName + // We also check that the revision is pinned, meaning it's not a stale revision. + // Without this it might happen that the latest created revision is later overridden by a newer one + // and the following check for LatestReadyRevisionName would fail. + if revErr := CheckRevisionState(clients.ServingClient, revisionName, IsRevisionPinned); revErr != nil { + return false, nil + } + return true, nil + } + return false, nil + }, "ServiceUpdatedWithRevision"); err != nil { + return "", fmt.Errorf("LatestCreatedRevisionName not updated: %w", err) + } + if err := WaitForServiceState(clients.ServingClient, names.Service, func(s *v1.Service) (bool, error) { + return (s.Status.LatestReadyRevisionName == revisionName), nil + }, "ServiceReadyWithRevision"); err != nil { + return "", fmt.Errorf("LatestReadyRevisionName not updated with %s: %w", revisionName, err) + } + + return revisionName, nil +} + +// Service returns a Service object in namespace with the name names.Service +// that uses the image specified by names.Image. +func Service(names test.ResourceNames, fopt ...rtesting.ServiceOption) *v1.Service { + a := append([]rtesting.ServiceOption{ + rtesting.WithInlineConfigSpec(*ConfigurationSpec(pkgTest.ImagePath(names.Image))), + }, fopt...) + return rtesting.ServiceWithoutNamespace(names.Service, a...) +} + +// WaitForServiceState polls the status of the Service called name +// from client every `PollInterval` until `inState` returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForServiceState(client *test.ServingClients, name string, inState func(s *v1.Service) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForServiceState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1.Service + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Services.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("service %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckServiceState verifies the status of the Service called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForServiceState. +func CheckServiceState(client *test.ServingClients, name string, inState func(s *v1.Service) (bool, error)) error { + s, err := client.Services.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(s); err != nil { + return err + } else if !done { + return fmt.Errorf("service %q is not in desired state, got: %+v", name, s) + } + return nil +} + +// IsServiceReady will check the status conditions of the service and return true if the service is +// ready. This means that its configurations and routes have all reported ready. +func IsServiceReady(s *v1.Service) (bool, error) { + return s.Generation == s.Status.ObservedGeneration && s.Status.IsReady(), nil +} + +// IsServiceNotReady will check the status conditions of the service and return true if the service is +// not ready. +func IsServiceNotReady(s *v1.Service) (bool, error) { + return s.Generation == s.Status.ObservedGeneration && !s.Status.IsReady(), nil +} diff --git a/test/vendor/knative.dev/serving/test/v1alpha1/configuration.go b/test/vendor/knative.dev/serving/test/v1alpha1/configuration.go new file mode 100644 index 0000000000..ccda5d63aa --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1alpha1/configuration.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// configuration.go provides methods to perform actions on the configuration object. + +package v1alpha1 + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + + pkgTest "knative.dev/pkg/test" + v1alpha1testing "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" +) + +// CreateConfiguration create a configuration resource in namespace with the name names.Config +// that uses the image specified by names.Image. +func CreateConfiguration(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...v1alpha1testing.ConfigOption) (*v1alpha1.Configuration, error) { + config := Configuration(names, fopt...) + LogResourceObject(t, ResourceObjects{Config: config}) + return clients.ServingAlphaClient.Configs.Create(config) +} + +// PatchConfigImage patches the existing config passed in with a new imagePath. Returns the latest Configuration object +func PatchConfigImage(clients *test.Clients, cfg *v1alpha1.Configuration, imagePath string) (*v1alpha1.Configuration, error) { + newCfg := cfg.DeepCopy() + newCfg.Spec.GetTemplate().Spec.GetContainer().Image = imagePath + patchBytes, err := test.CreateBytePatch(cfg, newCfg) + if err != nil { + return nil, err + } + return clients.ServingAlphaClient.Configs.Patch(cfg.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// WaitForConfigLatestRevision takes a revision in through names and compares it to the current state of LatestCreatedRevisionName in Configuration. +// Once an update is detected in the LatestCreatedRevisionName, the function waits for the created revision to be set in LatestReadyRevisionName +// before returning the name of the revision. +func WaitForConfigLatestRevision(clients *test.Clients, names test.ResourceNames) (string, error) { + var revisionName string + err := WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil + }, "ConfigurationUpdatedWithRevision") + if err != nil { + return "", err + } + err = WaitForConfigurationState(clients.ServingAlphaClient, names.Config, func(c *v1alpha1.Configuration) (bool, error) { + return (c.Status.LatestReadyRevisionName == revisionName), nil + }, "ConfigurationReadyWithRevision") + + return revisionName, err +} + +// ConfigurationSpec returns the spec of a configuration to be used throughout different +// CRD helpers. +func ConfigurationSpec(imagePath string) *v1alpha1.ConfigurationSpec { + return &v1alpha1.ConfigurationSpec{ + Template: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + RevisionSpec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: imagePath, + }}, + }, + }, + }, + }, + } +} + +// LegacyConfigurationSpec returns the spec of a configuration to be used throughout different +// CRD helpers. +func LegacyConfigurationSpec(imagePath string) *v1alpha1.ConfigurationSpec { + return &v1alpha1.ConfigurationSpec{ + DeprecatedRevisionTemplate: &v1alpha1.RevisionTemplateSpec{ + Spec: v1alpha1.RevisionSpec{ + DeprecatedContainer: &corev1.Container{ + Image: imagePath, + }, + RevisionSpec: v1.RevisionSpec{}, + }, + }, + } +} + +// Configuration returns a Configuration object in namespace with the name names.Config +// that uses the image specified by names.Image +func Configuration(names test.ResourceNames, fopt ...v1alpha1testing.ConfigOption) *v1alpha1.Configuration { + config := &v1alpha1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Config, + }, + Spec: *ConfigurationSpec(pkgTest.ImagePath(names.Image)), + } + + for _, opt := range fopt { + opt(config) + } + + return config +} + +// WaitForConfigurationState polls the status of the Configuration called name +// from client every PollInterval until inState returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForConfigurationState(client *test.ServingAlphaClients, name string, inState func(c *v1alpha1.Configuration) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForConfigurationState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1alpha1.Configuration + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("configuration %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckConfigurationState verifies the status of the Configuration called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForConfigurationState +func CheckConfigurationState(client *test.ServingAlphaClients, name string, inState func(r *v1alpha1.Configuration) (bool, error)) error { + c, err := client.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(c); err != nil { + return err + } else if !done { + return fmt.Errorf("configuration %q is not in desired state, got: %+v", name, c) + } + return nil +} + +// ConfigurationHasCreatedRevision returns whether the Configuration has created a Revision. +func ConfigurationHasCreatedRevision(c *v1alpha1.Configuration) (bool, error) { + return c.Status.LatestCreatedRevisionName != "", nil +} + +// IsConfigurationReady will check the status conditions of the config and return true if the config is +// ready. This means it has at least created one revision and that has become ready. +func IsConfigurationReady(c *v1alpha1.Configuration) (bool, error) { + return c.Generation == c.Status.ObservedGeneration && c.Status.IsReady(), nil +} diff --git a/test/vendor/knative.dev/serving/test/v1alpha1/crd.go b/test/vendor/knative.dev/serving/test/v1alpha1/crd.go new file mode 100644 index 0000000000..1e98d0a34c --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1alpha1/crd.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/davecgh/go-spew/spew" + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1alpha1" +) + +// ResourceObjects holds types of the resource objects. +type ResourceObjects struct { + Route *v1alpha1.Route + Config *v1alpha1.Configuration + Service *v1alpha1.Service + Revision *v1alpha1.Revision +} + +// LogResourceObject logs the resource object with the resource name and value +func LogResourceObject(t pkgTest.T, value ResourceObjects) { + t.Log("", "resource", spew.Sprint(value)) +} diff --git a/test/vendor/knative.dev/serving/test/v1alpha1/ingress.go b/test/vendor/knative.dev/serving/test/v1alpha1/ingress.go new file mode 100644 index 0000000000..85f0313aeb --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1alpha1/ingress.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" + "knative.dev/serving/pkg/apis/networking/v1alpha1" + "knative.dev/serving/test" +) + +// WaitForIngressState polls the status of the Ingress called name from client every +// PollInterval until inState returns `true` indicating it is done, returns an +// error or PollTimeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForIngressState(client *test.NetworkingClients, name string, inState func(r *v1alpha1.Ingress) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForIngressState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1alpha1.Ingress + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Ingresses.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("ingress %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// IsIngressReady will check the status conditions of the ingress and return true if the ingress is +// ready. +func IsIngressReady(r *v1alpha1.Ingress) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} diff --git a/test/vendor/knative.dev/serving/test/v1alpha1/revision.go b/test/vendor/knative.dev/serving/test/v1alpha1/revision.go new file mode 100644 index 0000000000..de68db4a4b --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1alpha1/revision.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + "knative.dev/serving/test" +) + +// WaitForRevisionState polls the status of the Revision called name +// from client every `PollInterval` until `inState` returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForRevisionState(client *test.ServingAlphaClients, name string, inState func(r *v1alpha1.Revision) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForRevision/%s/%s", name, desc)) + defer span.End() + + var lastState *v1alpha1.Revision + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Revisions.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("revision %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckRevisionState verifies the status of the Revision called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForRevisionState +func CheckRevisionState(client *test.ServingAlphaClients, name string, inState func(r *v1alpha1.Revision) (bool, error)) error { + r, err := client.Revisions.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(r); err != nil { + return err + } else if !done { + return fmt.Errorf("revision %q is not in desired state, got: %+v", name, r) + } + return nil +} + +// IsRevisionReady will check the status conditions of the revision and return true if the revision is +// ready to serve traffic. It will return false if the status indicates a state other than deploying +// or being ready. It will also return false if the type of the condition is unexpected. +func IsRevisionReady(r *v1alpha1.Revision) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} + +// IsRevisionPinned will check if the revision is pinned to a route. +func IsRevisionPinned(r *v1alpha1.Revision) (bool, error) { + _, pinned := r.Annotations[serving.RevisionLastPinnedAnnotationKey] + return pinned, nil +} + +// IsRevisionAtExpectedGeneration returns a function that will check if the annotations +// on the revision include an annotation for the generation and that the annotation is +// set to the expected value. +func IsRevisionAtExpectedGeneration(expectedGeneration string) func(r *v1alpha1.Revision) (bool, error) { + return func(r *v1alpha1.Revision) (bool, error) { + if a, ok := r.Labels[serving.ConfigurationGenerationLabelKey]; ok { + if a != expectedGeneration { + return true, fmt.Errorf("expected Revision %s to be labeled with generation %s but was %s instead", r.Name, expectedGeneration, a) + } + return true, nil + } + return true, fmt.Errorf("expected Revision %s to be labeled with generation %s but there was no label", r.Name, expectedGeneration) + } +} diff --git a/test/vendor/knative.dev/serving/test/v1alpha1/route.go b/test/vendor/knative.dev/serving/test/v1alpha1/route.go new file mode 100644 index 0000000000..1431d2ef00 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1alpha1/route.go @@ -0,0 +1,138 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// route.go provides methods to perform actions on the route resource. + +package v1alpha1 + +import ( + "context" + "fmt" + "net/http" + + "github.com/davecgh/go-spew/spew" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + + v1alpha1testing "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" +) + +// CreateRoute creates a route in the given namespace using the route name in names +func CreateRoute(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...v1alpha1testing.RouteOption) (*v1alpha1.Route, error) { + fopt = append(fopt, v1alpha1testing.WithSpecTraffic(v1alpha1.TrafficTarget{ + TrafficTarget: v1.TrafficTarget{ + Tag: names.TrafficTarget, + ConfigurationName: names.Config, + Percent: ptr.Int64(100), + }, + })) + route := v1alpha1testing.Route(test.ServingNamespace, names.Route, fopt...) + LogResourceObject(t, ResourceObjects{Route: route}) + return clients.ServingAlphaClient.Routes.Create(route) +} + +// RetryingRouteInconsistency retries common requests seen when creating a new route +// - 404 until the route is propagated to the proxy +// - 503 to account for Openshift route inconsistency (https://jira.coreos.com/browse/SRVKS-157) +func RetryingRouteInconsistency(innerCheck spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusServiceUnavailable { + return false, nil + } + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return innerCheck(resp) + } +} + +// WaitForRouteState polls the status of the Route called name from client every +// PollInterval until inState returns `true` indicating it is done, returns an +// error or PollTimeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForRouteState(client *test.ServingAlphaClients, name string, inState func(r *v1alpha1.Route) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForRouteState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1alpha1.Route + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Routes.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("route %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckRouteState verifies the status of the Route called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForRouteState +func CheckRouteState(client *test.ServingAlphaClients, name string, inState func(r *v1alpha1.Route) (bool, error)) error { + r, err := client.Routes.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(r); err != nil { + return err + } else if !done { + return fmt.Errorf("route %q is not in desired state, got: %+v", name, r) + } + return nil +} + +// IsRouteReady will check the status conditions of the route and return true if the route is +// ready. +func IsRouteReady(r *v1alpha1.Route) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} + +// IsRouteNotReady will check the status conditions of the route and return true if the route is +// not ready. +func IsRouteNotReady(r *v1alpha1.Route) (bool, error) { + return !r.Status.IsReady(), nil +} + +// AllRouteTrafficAtRevision will check the revision that route r is routing +// traffic to and return true if 100% of the traffic is routing to revisionName. +func AllRouteTrafficAtRevision(names test.ResourceNames) func(r *v1alpha1.Route) (bool, error) { + return func(r *v1alpha1.Route) (bool, error) { + for _, tt := range r.Status.Traffic { + if tt.Percent != nil && *tt.Percent == 100 { + if tt.RevisionName != names.Revision { + return true, fmt.Errorf("expected traffic revision name to be %s but actually is %s: %s", names.Revision, tt.RevisionName, spew.Sprint(r)) + } + + if tt.Tag != names.TrafficTarget { + return true, fmt.Errorf("expected traffic target name to be %s but actually is %s: %s", names.TrafficTarget, tt.Tag, spew.Sprint(r)) + } + + return true, nil + } + } + return false, nil + } +} diff --git a/test/vendor/knative.dev/serving/test/v1alpha1/service.go b/test/vendor/knative.dev/serving/test/v1alpha1/service.go new file mode 100644 index 0000000000..bce4a5f7f1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1alpha1/service.go @@ -0,0 +1,578 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// service.go provides methods to perform actions on the service resource. + +package v1alpha1 + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "net/http" + "strings" + "sync" + "time" + + istiov1alpha3 "istio.io/api/networking/v1alpha3" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/watch" + "knative.dev/pkg/test/spoof" + + "github.com/mattbaird/jsonpatch" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" + "knative.dev/serving/pkg/apis/serving/v1alpha1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/networking" + rtesting "knative.dev/serving/pkg/testing/v1alpha1" + "knative.dev/serving/test" +) + +const ( + // Namespace is the namespace of the ingress gateway + Namespace = "knative-serving" + + // GatewayName is the name of the ingress gateway + GatewayName = networking.KnativeIngressGateway +) + +func validateCreatedServiceStatus(clients *test.Clients, names *test.ResourceNames) error { + return CheckServiceState(clients.ServingAlphaClient, names.Service, func(s *v1alpha1.Service) (bool, error) { + if s.Status.URL == nil || s.Status.URL.Host == "" { + return false, fmt.Errorf("URL is not present in Service status: %v", s) + } + names.URL = s.Status.URL.URL() + if s.Status.LatestCreatedRevisionName == "" { + return false, fmt.Errorf("LatestCreatedCreatedRevisionName is not present in Service status: %v", s) + } + names.Revision = s.Status.LatestCreatedRevisionName + if s.Status.LatestReadyRevisionName == "" { + return false, fmt.Errorf("LatestReadyRevisionName is not present in Service status: %v", s) + } + if s.Status.ObservedGeneration != 1 { + return false, fmt.Errorf("ObservedGeneration is not 1 in Service status: %v", s) + } + return true, nil + }) +} + +// GetResourceObjects obtains the services resources from the k8s API server. +func GetResourceObjects(clients *test.Clients, names test.ResourceNames) (*ResourceObjects, error) { + routeObject, err := clients.ServingAlphaClient.Routes.Get(names.Route, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + serviceObject, err := clients.ServingAlphaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + configObject, err := clients.ServingAlphaClient.Configs.Get(names.Config, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + revisionObject, err := clients.ServingAlphaClient.Revisions.Get(names.Revision, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return &ResourceObjects{ + Route: routeObject, + Service: serviceObject, + Config: configObject, + Revision: revisionObject, + }, nil +} + +// CreateRunLatestServiceReady creates a new Service in state 'Ready'. This function expects Service and Image name passed in through 'names'. +// Names is updated with the Route and Configuration created by the Service and ResourceObjects is returned with the Service, Route, and Configuration objects. +// If this function is called with https == true, the gateway MUST be restored afterwards. +// Returns error if the service does not come up correctly. +func CreateRunLatestServiceReady(t pkgTest.TLegacy, clients *test.Clients, names *test.ResourceNames, https bool, fopt ...rtesting.ServiceOption) (*ResourceObjects, *spoof.TransportOption, error) { + if names.Image == "" { + return nil, nil, fmt.Errorf("expected non-empty Image name; got Image=%v", names.Image) + } + + t.Log("Creating a new Service.", "service", names.Service) + svc, err := CreateLatestService(t, clients, *names, fopt...) + if err != nil { + return nil, nil, err + } + + // Populate Route and Configuration Objects with name + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + // If the Service name was not specified, populate it + if names.Service == "" { + names.Service = svc.Name + } + + t.Log("Waiting for Service to transition to Ready.", "service", names.Service) + if err = WaitForServiceState(clients.ServingAlphaClient, names.Service, IsServiceReady, "ServiceIsReady"); err != nil { + return nil, nil, err + } + + t.Log("Checking to ensure Service Status is populated for Ready service") + err = validateCreatedServiceStatus(clients, names) + if err != nil { + return nil, nil, err + } + + var httpsTransportOption *spoof.TransportOption + if https { + tlsOptions := &istiov1alpha3.Server_TLSOptions{ + Mode: istiov1alpha3.Server_TLSOptions_SIMPLE, + PrivateKey: "/etc/istio/ingressgateway-certs/tls.key", + ServerCertificate: "/etc/istio/ingressgateway-certs/tls.crt", + } + servers := []*istiov1alpha3.Server{{ + Hosts: []string{"*"}, + Port: &istiov1alpha3.Port{ + Name: "standard-https", + Number: 443, + Protocol: "HTTPS", + }, + Tls: tlsOptions, + }} + httpsTransportOption, err = setupHTTPS(t, clients.KubeClient, names.URL.Host) + if err != nil { + return nil, nil, err + } + setupGateway(t, clients, servers) + } + + t.Log("Getting latest objects Created by Service") + resources, err := GetResourceObjects(clients, *names) + if err == nil { + t.Log("Successfully created Service", names.Service) + } + return resources, httpsTransportOption, err +} + +// CreateRunLatestServiceLegacyReady creates a new Service in state 'Ready'. This function expects Service and Image name passed in through 'names'. +// Names is updated with the Route and Configuration created by the Service and ResourceObjects is returned with the Service, Route, and Configuration objects. +// Returns error if the service does not come up correctly. +func CreateRunLatestServiceLegacyReady(t pkgTest.T, clients *test.Clients, names *test.ResourceNames, fopt ...rtesting.ServiceOption) (*ResourceObjects, error) { + if names.Image == "" { + return nil, fmt.Errorf("expected non-empty Image name; got Image=%v", names.Image) + } + + t.Log("Creating a new Service.", "service", names.Service) + svc, err := CreateLatestServiceLegacy(t, clients, *names, fopt...) + if err != nil { + return nil, err + } + + // Populate Route and Configuration Objects with name + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + // If the Service name was not specified, populate it + if names.Service == "" { + names.Service = svc.Name + } + + t.Log("Waiting for Service to transition to Ready.", "service", names.Service) + if err := WaitForServiceState(clients.ServingAlphaClient, names.Service, IsServiceReady, "ServiceIsReady"); err != nil { + return nil, err + } + + t.Log("Checking to ensure Service Status is populated for Ready service", names.Service) + err = validateCreatedServiceStatus(clients, names) + if err != nil { + return nil, err + } + + t.Log("Getting latest objects Created by Service", names.Service) + resources, err := GetResourceObjects(clients, *names) + if err == nil { + t.Log("Successfully created Service", names.Service) + } + return resources, err +} + +// CreateLatestService creates a service in namespace with the name names.Service and names.Image +func CreateLatestService(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.ServiceOption) (*v1alpha1.Service, error) { + service := LatestService(names, fopt...) + LogResourceObject(t, ResourceObjects{Service: service}) + svc, err := clients.ServingAlphaClient.Services.Create(service) + return svc, err +} + +// CreateLatestServiceLegacy creates a service in namespace with the name names.Service and names.Image +func CreateLatestServiceLegacy(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.ServiceOption) (*v1alpha1.Service, error) { + service := LatestServiceLegacy(names, fopt...) + LogResourceObject(t, ResourceObjects{Service: service}) + svc, err := clients.ServingAlphaClient.Services.Create(service) + return svc, err +} + +// PatchServiceImage patches the existing service passed in with a new imagePath. Returns the latest service object +func PatchServiceImage(t pkgTest.T, clients *test.Clients, svc *v1alpha1.Service, imagePath string) (*v1alpha1.Service, error) { + newSvc := svc.DeepCopy() + if svc.Spec.DeprecatedRunLatest != nil { + newSvc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec.GetContainer().Image = imagePath + } else if svc.Spec.DeprecatedRelease != nil { + newSvc.Spec.DeprecatedRelease.Configuration.GetTemplate().Spec.GetContainer().Image = imagePath + } else if svc.Spec.DeprecatedPinned != nil { + newSvc.Spec.DeprecatedPinned.Configuration.GetTemplate().Spec.GetContainer().Image = imagePath + } else { + newSvc.Spec.ConfigurationSpec.GetTemplate().Spec.GetContainer().Image = imagePath + } + LogResourceObject(t, ResourceObjects{Service: newSvc}) + patchBytes, err := test.CreateBytePatch(svc, newSvc) + if err != nil { + return nil, err + } + return clients.ServingAlphaClient.Services.Patch(svc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// PatchService creates and applies a patch from the diff between curSvc and desiredSvc. Returns the latest service object. +func PatchService(t pkgTest.T, clients *test.Clients, curSvc *v1alpha1.Service, desiredSvc *v1alpha1.Service) (*v1alpha1.Service, error) { + LogResourceObject(t, ResourceObjects{Service: desiredSvc}) + patchBytes, err := test.CreateBytePatch(curSvc, desiredSvc) + if err != nil { + return nil, err + } + return clients.ServingAlphaClient.Services.Patch(curSvc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// UpdateServiceRouteSpec updates a service to use the route name in names. +func UpdateServiceRouteSpec(t pkgTest.T, clients *test.Clients, names test.ResourceNames, rs v1alpha1.RouteSpec) (*v1alpha1.Service, error) { + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/spec/traffic", + Value: rs.Traffic, + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + return nil, err + } + return clients.ServingAlphaClient.Services.Patch(names.Service, types.JSONPatchType, patchBytes, "") +} + +// PatchServiceTemplateMetadata patches an existing service by adding metadata to the service's RevisionTemplateSpec. +func PatchServiceTemplateMetadata(t pkgTest.T, clients *test.Clients, svc *v1alpha1.Service, metadata metav1.ObjectMeta) (*v1alpha1.Service, error) { + newSvc := svc.DeepCopy() + newSvc.Spec.ConfigurationSpec.Template.ObjectMeta = metadata + LogResourceObject(t, ResourceObjects{Service: newSvc}) + patchBytes, err := test.CreateBytePatch(svc, newSvc) + if err != nil { + return nil, err + } + return clients.ServingAlphaClient.Services.Patch(svc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// WaitForServiceLatestRevision takes a revision in through names and compares it to the current state of LatestCreatedRevisionName in Service. +// Once an update is detected in the LatestCreatedRevisionName, the function waits for the created revision to be set in LatestReadyRevisionName +// before returning the name of the revision. +func WaitForServiceLatestRevision(clients *test.Clients, names test.ResourceNames) (string, error) { + var revisionName string + if err := WaitForServiceState(clients.ServingAlphaClient, names.Service, func(s *v1alpha1.Service) (bool, error) { + if s.Status.LatestCreatedRevisionName != names.Revision { + revisionName = s.Status.LatestCreatedRevisionName + // We also check that the revision is pinned, meaning it's not a stale revision. + // Without this it might happen that the latest created revision is later overridden by a newer one + // and the following check for LatestReadyRevisionName would fail. + if revErr := CheckRevisionState(clients.ServingAlphaClient, revisionName, IsRevisionPinned); revErr != nil { + return false, nil + } + return true, nil + } + return false, nil + }, "ServiceUpdatedWithRevision"); err != nil { + return "", fmt.Errorf("LatestCreatedRevisionName not updated: %w", err) + } + if err := WaitForServiceState(clients.ServingAlphaClient, names.Service, func(s *v1alpha1.Service) (bool, error) { + return (s.Status.LatestReadyRevisionName == revisionName), nil + }, "ServiceReadyWithRevision"); err != nil { + return "", fmt.Errorf("LatestReadyRevisionName not updated with %s: %w", revisionName, err) + } + + return revisionName, nil +} + +// LatestService returns a Service object in namespace with the name names.Service +// that uses the image specified by names.Image. +func LatestService(names test.ResourceNames, fopt ...rtesting.ServiceOption) *v1alpha1.Service { + a := append([]rtesting.ServiceOption{ + rtesting.WithInlineConfigSpec(*ConfigurationSpec(pkgTest.ImagePath(names.Image))), + }, fopt...) + return rtesting.ServiceWithoutNamespace(names.Service, a...) +} + +// LatestServiceLegacy returns a DeprecatedRunLatest Service object in namespace with the name names.Service +// that uses the image specified by names.Image. +func LatestServiceLegacy(names test.ResourceNames, fopt ...rtesting.ServiceOption) *v1alpha1.Service { + a := append([]rtesting.ServiceOption{ + rtesting.WithRunLatestConfigSpec(*LegacyConfigurationSpec(pkgTest.ImagePath(names.Image))), + }, fopt...) + svc := rtesting.ServiceWithoutNamespace(names.Service, a...) + // Clear the name, which is put there by defaulting. + svc.Spec.DeprecatedRunLatest.Configuration.GetTemplate().Spec.GetContainer().Name = "" + return svc +} + +// WaitForServiceState polls the status of the Service called name +// from client every `PollInterval` until `inState` returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForServiceState(client *test.ServingAlphaClients, name string, inState func(s *v1alpha1.Service) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForServiceState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1alpha1.Service + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Services.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("service %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckServiceState verifies the status of the Service called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForServiceState. +func CheckServiceState(client *test.ServingAlphaClients, name string, inState func(s *v1alpha1.Service) (bool, error)) error { + s, err := client.Services.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(s); err != nil { + return err + } else if !done { + return fmt.Errorf("service %q is not in desired state, got: %+v", name, s) + } + return nil +} + +// IsServiceReady will check the status conditions of the service and return true if the service is +// ready. This means that its configurations and routes have all reported ready. +func IsServiceReady(s *v1alpha1.Service) (bool, error) { + return s.Generation == s.Status.ObservedGeneration && s.Status.IsReady(), nil +} + +// IsServiceNotReady checks the Ready status condition of the service and returns true only if Ready is set to False. +func IsServiceNotReady(s *v1alpha1.Service) (bool, error) { + result := s.Status.GetCondition(v1alpha1.ServiceConditionReady) + return s.Generation == s.Status.ObservedGeneration && result != nil && result.Status == corev1.ConditionFalse, nil +} + +// IsServiceRoutesNotReady checks the RoutesReady status of the service and returns true only if RoutesReady is set to False. +func IsServiceRoutesNotReady(s *v1alpha1.Service) (bool, error) { + result := s.Status.GetCondition(v1alpha1.ServiceConditionRoutesReady) + return s.Generation == s.Status.ObservedGeneration && result != nil && result.Status == corev1.ConditionFalse, nil +} + +// RestoreGateway updates the gateway object to the oldGateway +func RestoreGateway(t pkgTest.TLegacy, clients *test.Clients, oldGateway v1alpha3.Gateway) { + currGateway, err := clients.IstioClient.NetworkingV1alpha3().Gateways(Namespace).Get(GatewayName, metav1.GetOptions{}) + if err != nil { + t.Fatal(fmt.Sprintf("Failed to get Gateway %s/%s", Namespace, GatewayName)) + } + if equality.Semantic.DeepEqual(*currGateway, oldGateway) { + t.Log("Gateway not restored because it's still the same") + return + } + currGateway.Spec.Servers = oldGateway.Spec.Servers + if _, err := clients.IstioClient.NetworkingV1alpha3().Gateways(Namespace).Update(currGateway); err != nil { + t.Fatal(fmt.Sprintf("Failed to restore Gateway %s/%s: %v", Namespace, GatewayName, err)) + } +} + +// setupGateway updates the ingress Gateway to the provided Servers and waits until all Envoy pods have been updated. +func setupGateway(t pkgTest.TLegacy, clients *test.Clients, servers []*istiov1alpha3.Server) { + // Get the current Gateway + curGateway, err := clients.IstioClient.NetworkingV1alpha3().Gateways(Namespace).Get(GatewayName, metav1.GetOptions{}) + if err != nil { + t.Fatal(fmt.Sprintf("Failed to get Gateway %s/%s: %v", Namespace, GatewayName, err)) + } + + // Update its Spec + newGateway := curGateway.DeepCopy() + newGateway.Spec.Servers = servers + + // Update the Gateway + gw, err := clients.IstioClient.NetworkingV1alpha3().Gateways(Namespace).Update(newGateway) + if err != nil { + t.Fatal(fmt.Sprintf("Failed to update Gateway %s/%s: %v", Namespace, GatewayName, err)) + } + + var selectors []string + for k, v := range gw.Spec.Selector { + selectors = append(selectors, k+"="+v) + } + selector := strings.Join(selectors, ",") + + // Restart the Gateway pods: this is needed because Istio without SDS won't refresh the cert when the secret is updated + pods, err := clients.KubeClient.Kube.CoreV1().Pods("istio-system").List(metav1.ListOptions{LabelSelector: selector}) + if err != nil { + t.Fatal("Failed to list Gateway pods", "error", err.Error()) + } + + // TODO(bancel): there is a race condition here if a pod listed in the call above is deleted before calling watch below + + var wg sync.WaitGroup + wg.Add(len(pods.Items)) + wtch, err := clients.KubeClient.Kube.CoreV1().Pods("istio-system").Watch(metav1.ListOptions{LabelSelector: selector}) + if err != nil { + t.Fatal("Failed to watch Gateway pods", "error", err.Error()) + } + defer wtch.Stop() + + done := make(chan struct{}) + go func() { + for { + select { + case event := <-wtch.ResultChan(): + if event.Type == watch.Deleted { + wg.Done() + } + case <-done: + return + } + } + }() + + err = clients.KubeClient.Kube.CoreV1().Pods("istio-system").DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + t.Fatal("Failed to delete Gateway pods", "error", err.Error()) + } + + wg.Wait() + done <- struct{}{} +} + +// setupHTTPS creates a self-signed certificate, installs it as a Secret and returns an *http.Transport +// trusting the certificate as a root CA. +func setupHTTPS(t pkgTest.T, kubeClient *pkgTest.KubeClient, host string) (*spoof.TransportOption, error) { + t.Helper() + cert, key, err := generateCertificate(host) + if err != nil { + return nil, err + } + + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + + if ok := rootCAs.AppendCertsFromPEM(cert); !ok { + return nil, errors.New("failed to add the certificate to the root CA") + } + + kubeClient.Kube.CoreV1().Secrets("istio-system").Delete("istio-ingressgateway-certs", &metav1.DeleteOptions{}) + _, err = kubeClient.Kube.CoreV1().Secrets("istio-system").Create(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "istio-system", + Name: "istio-ingressgateway-certs", + }, + Type: corev1.SecretTypeTLS, + Data: map[string][]byte{ + "tls.key": key, + "tls.crt": cert, + }, + }) + if err != nil { + return nil, err + } + var transportOption spoof.TransportOption = func(transport *http.Transport) *http.Transport { + transport.TLSClientConfig = &tls.Config{RootCAs: rootCAs} + return transport + } + return &transportOption, nil +} + +// generateCertificate generates a self-signed certificate for the provided host and returns +// the PEM encoded certificate and private key. +func generateCertificate(host string) ([]byte, []byte, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate private key: %v", err) + } + + notBefore := time.Now().Add(-5 * time.Minute) + notAfter := notBefore.Add(2 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate serial number: %v", err) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Knative Serving"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, host) + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, nil, fmt.Errorf("failed to create the certificate: %v", err) + } + + var certBuf bytes.Buffer + if err := pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return nil, nil, fmt.Errorf("failed to encode the certificate: %v", err) + } + + var keyBuf bytes.Buffer + if err := pem.Encode(&keyBuf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return nil, nil, fmt.Errorf("failed to encode the private key: %v", err) + } + + return certBuf.Bytes(), keyBuf.Bytes(), nil +} diff --git a/test/vendor/knative.dev/serving/test/v1beta1/configuration.go b/test/vendor/knative.dev/serving/test/v1beta1/configuration.go new file mode 100644 index 0000000000..b39cd8e50f --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1beta1/configuration.go @@ -0,0 +1,159 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/test/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + pkgTest "knative.dev/pkg/test" + rtesting "knative.dev/serving/pkg/testing/v1beta1" + "knative.dev/serving/test" +) + +// CreateConfiguration create a configuration resource in namespace with the name names.Config +// that uses the image specified by names.Image. +func CreateConfiguration(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.ConfigOption) (*v1beta1.Configuration, error) { + config := Configuration(names, fopt...) + LogResourceObject(t, ResourceObjects{Config: config}) + return clients.ServingBetaClient.Configs.Create(config) +} + +// PatchConfig patches the existing configuration passed in with the applied mutations. +// Returns the latest configuration object +func PatchConfig(t pkgTest.T, clients *test.Clients, svc *v1beta1.Configuration, fopt ...rtesting.ConfigOption) (*v1beta1.Configuration, error) { + newSvc := svc.DeepCopy() + for _, opt := range fopt { + opt(newSvc) + } + LogResourceObject(t, ResourceObjects{Config: newSvc}) + patchBytes, err := test.CreateBytePatch(svc, newSvc) + if err != nil { + return nil, err + } + return clients.ServingBetaClient.Configs.Patch(svc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// WaitForConfigLatestRevision takes a revision in through names and compares it to the current state of LatestCreatedRevisionName in Configuration. +// Once an update is detected in the LatestCreatedRevisionName, the function waits for the created revision to be set in LatestReadyRevisionName +// before returning the name of the revision. +func WaitForConfigLatestRevision(clients *test.Clients, names test.ResourceNames) (string, error) { + var revisionName string + err := WaitForConfigurationState(clients.ServingBetaClient, names.Config, func(c *v1beta1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != names.Revision { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil + }, "ConfigurationUpdatedWithRevision") + if err != nil { + return "", err + } + err = WaitForConfigurationState(clients.ServingBetaClient, names.Config, func(c *v1beta1.Configuration) (bool, error) { + return (c.Status.LatestReadyRevisionName == revisionName), nil + }, "ConfigurationReadyWithRevision") + + return revisionName, err +} + +// ConfigurationSpec returns the spec of a configuration to be used throughout different +// CRD helpers. +func ConfigurationSpec(imagePath string) *v1.ConfigurationSpec { + return &v1.ConfigurationSpec{ + Template: v1.RevisionTemplateSpec{ + Spec: v1.RevisionSpec{ + PodSpec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: imagePath, + }}, + }, + }, + }, + } +} + +// Configuration returns a Configuration object in namespace with the name names.Config +// that uses the image specified by names.Image +func Configuration(names test.ResourceNames, fopt ...rtesting.ConfigOption) *v1beta1.Configuration { + config := &v1beta1.Configuration{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Config, + }, + Spec: *ConfigurationSpec(pkgTest.ImagePath(names.Image)), + } + + for _, opt := range fopt { + opt(config) + } + + return config +} + +// WaitForConfigurationState polls the status of the Configuration called name +// from client every PollInterval until inState returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForConfigurationState(client *test.ServingBetaClients, name string, inState func(c *v1beta1.Configuration) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForConfigurationState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1beta1.Configuration + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("configuration %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckConfigurationState verifies the status of the Configuration called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForConfigurationState +func CheckConfigurationState(client *test.ServingBetaClients, name string, inState func(r *v1beta1.Configuration) (bool, error)) error { + c, err := client.Configs.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(c); err != nil { + return err + } else if !done { + return fmt.Errorf("configuration %q is not in desired state, got: %+v", name, c) + } + return nil +} + +// IsConfigurationReady will check the status conditions of the config and return true if the config is +// ready. This means it has at least created one revision and that has become ready. +func IsConfigurationReady(c *v1beta1.Configuration) (bool, error) { + return c.Generation == c.Status.ObservedGeneration && c.Status.IsReady(), nil +} diff --git a/test/vendor/knative.dev/serving/test/v1beta1/crd.go b/test/vendor/knative.dev/serving/test/v1beta1/crd.go new file mode 100644 index 0000000000..0f631d73e9 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1beta1/crd.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "github.com/davecgh/go-spew/spew" + pkgTest "knative.dev/pkg/test" + "knative.dev/serving/pkg/apis/serving/v1beta1" +) + +// ResourceObjects holds types of the resource objects. +type ResourceObjects struct { + Route *v1beta1.Route + Config *v1beta1.Configuration + Service *v1beta1.Service + Revision *v1beta1.Revision +} + +// LogResourceObject logs the resource object with the resource name and value +func LogResourceObject(t pkgTest.T, value ResourceObjects) { + t.Log("", "resource", spew.Sprint(value)) +} diff --git a/test/vendor/knative.dev/serving/test/v1beta1/revision.go b/test/vendor/knative.dev/serving/test/v1beta1/revision.go new file mode 100644 index 0000000000..19bf1d07b4 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1beta1/revision.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" + "knative.dev/serving/pkg/apis/serving" + "knative.dev/serving/pkg/apis/serving/v1beta1" + "knative.dev/serving/test" +) + +// WaitForRevisionState polls the status of the Revision called name +// from client every `PollInterval` until `inState` returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForRevisionState(client *test.ServingBetaClients, name string, inState func(r *v1beta1.Revision) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForRevision/%s/%s", name, desc)) + defer span.End() + + var lastState *v1beta1.Revision + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Revisions.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("revision %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckRevisionState verifies the status of the Revision called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForRevisionState +func CheckRevisionState(client *test.ServingBetaClients, name string, inState func(r *v1beta1.Revision) (bool, error)) error { + r, err := client.Revisions.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(r); err != nil { + return err + } else if !done { + return fmt.Errorf("revision %q is not in desired state, got: %+v", name, r) + } + return nil +} + +// IsRevisionReady will check the status conditions of the revision and return true if the revision is +// ready to serve traffic. It will return false if the status indicates a state other than deploying +// or being ready. It will also return false if the type of the condition is unexpected. +func IsRevisionReady(r *v1beta1.Revision) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} + +// IsRevisionPinned will check if the revision is pinned to a route. +func IsRevisionPinned(r *v1beta1.Revision) (bool, error) { + _, pinned := r.Annotations[serving.RevisionLastPinnedAnnotationKey] + return pinned, nil +} + +// IsRevisionAtExpectedGeneration returns a function that will check if the annotations +// on the revision include an annotation for the generation and that the annotation is +// set to the expected value. +func IsRevisionAtExpectedGeneration(expectedGeneration string) func(r *v1beta1.Revision) (bool, error) { + return func(r *v1beta1.Revision) (bool, error) { + if a, ok := r.Labels[serving.ConfigurationGenerationLabelKey]; ok { + if a != expectedGeneration { + return true, fmt.Errorf("expected Revision %s to be labeled with generation %s but was %s instead", r.Name, expectedGeneration, a) + } + return true, nil + } + return true, fmt.Errorf("expected Revision %s to be labeled with generation %s but there was no label", r.Name, expectedGeneration) + } +} diff --git a/test/vendor/knative.dev/serving/test/v1beta1/route.go b/test/vendor/knative.dev/serving/test/v1beta1/route.go new file mode 100644 index 0000000000..31121302f3 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1beta1/route.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "net/http" + + "github.com/davecgh/go-spew/spew" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "knative.dev/pkg/ptr" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + "knative.dev/pkg/test/spoof" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + rtesting "knative.dev/serving/pkg/testing/v1beta1" + "knative.dev/serving/test" +) + +// Route returns a Route object in namespace using the route and configuration +// names in names. +func Route(names test.ResourceNames, fopt ...rtesting.RouteOption) *v1beta1.Route { + route := &v1beta1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.Route, + }, + Spec: v1.RouteSpec{ + Traffic: []v1.TrafficTarget{{ + Tag: names.TrafficTarget, + ConfigurationName: names.Config, + Percent: ptr.Int64(100), + }}, + }, + } + + for _, opt := range fopt { + opt(route) + } + + return route +} + +// CreateRoute creates a route in the given namespace using the route name in names +func CreateRoute(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.RouteOption) (*v1beta1.Route, error) { + route := Route(names, fopt...) + LogResourceObject(t, ResourceObjects{Route: route}) + return clients.ServingBetaClient.Routes.Create(route) +} + +// WaitForRouteState polls the status of the Route called name from client every +// PollInterval until inState returns `true` indicating it is done, returns an +// error or PollTimeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForRouteState(client *test.ServingBetaClients, name string, inState func(r *v1beta1.Route) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForRouteState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1beta1.Route + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Routes.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("route %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckRouteState verifies the status of the Route called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForRouteState +func CheckRouteState(client *test.ServingBetaClients, name string, inState func(r *v1beta1.Route) (bool, error)) error { + r, err := client.Routes.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(r); err != nil { + return err + } else if !done { + return fmt.Errorf("route %q is not in desired state, got: %+v", name, r) + } + return nil +} + +// IsRouteReady will check the status conditions of the route and return true if the route is +// ready. +func IsRouteReady(r *v1beta1.Route) (bool, error) { + return r.Generation == r.Status.ObservedGeneration && r.Status.IsReady(), nil +} + +// IsRouteNotReady will check the status conditions of the route and return true if the route is +// not ready. +func IsRouteNotReady(r *v1beta1.Route) (bool, error) { + return !r.Status.IsReady(), nil +} + +// RetryingRouteInconsistency retries common requests seen when creating a new route +// - 404 until the route is propagated to the proxy +// - 503 to account for Openshift route inconsistency (https://jira.coreos.com/browse/SRVKS-157) +func RetryingRouteInconsistency(innerCheck spoof.ResponseChecker) spoof.ResponseChecker { + return func(resp *spoof.Response) (bool, error) { + if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusServiceUnavailable { + return false, nil + } + // If we didn't match any retryable codes, invoke the ResponseChecker that we wrapped. + return innerCheck(resp) + } +} + +// AllRouteTrafficAtRevision will check the revision that route r is routing +// traffic to and return true if 100% of the traffic is routing to revisionName. +func AllRouteTrafficAtRevision(names test.ResourceNames) func(r *v1beta1.Route) (bool, error) { + return func(r *v1beta1.Route) (bool, error) { + for _, tt := range r.Status.Traffic { + if tt.Percent != nil && *tt.Percent == 100 { + if tt.RevisionName != names.Revision { + return true, fmt.Errorf("expected traffic revision name to be %s but actually is %s: %s", names.Revision, tt.RevisionName, spew.Sprint(r)) + } + + if tt.Tag != names.TrafficTarget { + return true, fmt.Errorf("expected traffic target name to be %s but actually is %s: %s", names.TrafficTarget, tt.Tag, spew.Sprint(r)) + } + + return true, nil + } + } + return false, nil + } +} diff --git a/test/vendor/knative.dev/serving/test/v1beta1/service.go b/test/vendor/knative.dev/serving/test/v1beta1/service.go new file mode 100644 index 0000000000..e02a355df1 --- /dev/null +++ b/test/vendor/knative.dev/serving/test/v1beta1/service.go @@ -0,0 +1,255 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mattbaird/jsonpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + pkgTest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + v1 "knative.dev/serving/pkg/apis/serving/v1" + "knative.dev/serving/pkg/apis/serving/v1beta1" + serviceresourcenames "knative.dev/serving/pkg/reconciler/service/resources/names" + rtesting "knative.dev/serving/pkg/testing/v1beta1" + "knative.dev/serving/test" +) + +func validateCreatedServiceStatus(clients *test.Clients, names *test.ResourceNames) error { + return CheckServiceState(clients.ServingBetaClient, names.Service, func(s *v1beta1.Service) (bool, error) { + if s.Status.URL == nil || s.Status.URL.Host == "" { + return false, fmt.Errorf("URL is not present in Service status: %v", s) + } + names.URL = s.Status.URL.URL() + if s.Status.LatestCreatedRevisionName == "" { + return false, fmt.Errorf("LatestCreatedRevisionName is not present in Service status: %v", s) + } + names.Revision = s.Status.LatestCreatedRevisionName + if s.Status.LatestReadyRevisionName == "" { + return false, fmt.Errorf("LatestReadyRevisionName is not present in Service status: %v", s) + } + if s.Status.ObservedGeneration != 1 { + return false, fmt.Errorf("ObservedGeneration is not 1 in Service status: %v", s) + } + return true, nil + }) +} + +// GetResourceObjects obtains the services resources from the k8s API server. +func GetResourceObjects(clients *test.Clients, names test.ResourceNames) (*ResourceObjects, error) { + routeObject, err := clients.ServingBetaClient.Routes.Get(names.Route, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + serviceObject, err := clients.ServingBetaClient.Services.Get(names.Service, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + configObject, err := clients.ServingBetaClient.Configs.Get(names.Config, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + revisionObject, err := clients.ServingBetaClient.Revisions.Get(names.Revision, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return &ResourceObjects{ + Route: routeObject, + Service: serviceObject, + Config: configObject, + Revision: revisionObject, + }, nil +} + +// CreateServiceReady creates a new Service in state 'Ready'. This function expects Service and Image name +// passed in through 'names'. Names is updated with the Route and Configuration created by the Service +// and ResourceObjects is returned with the Service, Route, and Configuration objects. +// Returns error if the service does not come up correctly. +func CreateServiceReady(t pkgTest.T, clients *test.Clients, names *test.ResourceNames, fopt ...rtesting.ServiceOption) (*ResourceObjects, error) { + if names.Image == "" { + return nil, fmt.Errorf("expected non-empty Image name; got Image=%v", names.Image) + } + + t.Log("Creating a new Service.", "service", names.Service) + svc, err := CreateService(t, clients, *names, fopt...) + if err != nil { + return nil, err + } + + // Populate Route and Configuration Objects with name + names.Route = serviceresourcenames.Route(svc) + names.Config = serviceresourcenames.Configuration(svc) + + // If the Service name was not specified, populate it + if names.Service == "" { + names.Service = svc.Name + } + + t.Log("Waiting for Service to transition to Ready.", "service", names.Service) + if err := WaitForServiceState(clients.ServingBetaClient, names.Service, IsServiceReady, "ServiceIsReady"); err != nil { + return nil, err + } + + t.Log("Checking to ensure Service Status is populated for Ready service") + err = validateCreatedServiceStatus(clients, names) + if err != nil { + return nil, err + } + + t.Log("Getting latest objects Created by Service", names.Service) + resources, err := GetResourceObjects(clients, *names) + if err == nil { + t.Log("Successfully created Service", names.Service) + } + return resources, err +} + +// CreateService creates a service in namespace with the name names.Service and names.Image +func CreateService(t pkgTest.T, clients *test.Clients, names test.ResourceNames, fopt ...rtesting.ServiceOption) (*v1beta1.Service, error) { + service := Service(names, fopt...) + LogResourceObject(t, ResourceObjects{Service: service}) + svc, err := clients.ServingBetaClient.Services.Create(service) + return svc, err +} + +// PatchService patches the existing service passed in with the applied mutations. +// Returns the latest service object +func PatchService(t pkgTest.T, clients *test.Clients, svc *v1beta1.Service, fopt ...rtesting.ServiceOption) (*v1beta1.Service, error) { + newSvc := svc.DeepCopy() + for _, opt := range fopt { + opt(newSvc) + } + LogResourceObject(t, ResourceObjects{Service: newSvc}) + patchBytes, err := test.CreateBytePatch(svc, newSvc) + if err != nil { + return nil, err + } + return clients.ServingBetaClient.Services.Patch(svc.ObjectMeta.Name, types.JSONPatchType, patchBytes, "") +} + +// UpdateServiceRouteSpec updates a service to use the route name in names. +func UpdateServiceRouteSpec(t pkgTest.T, clients *test.Clients, names test.ResourceNames, rs v1.RouteSpec) (*v1beta1.Service, error) { + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/spec/traffic", + Value: rs.Traffic, + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + return nil, err + } + return clients.ServingBetaClient.Services.Patch(names.Service, types.JSONPatchType, patchBytes, "") +} + +// WaitForServiceLatestRevision takes a revision in through names and compares it to the current state of LatestCreatedRevisionName in Service. +// Once an update is detected in the LatestCreatedRevisionName, the function waits for the created revision to be set in LatestReadyRevisionName +// before returning the name of the revision. +func WaitForServiceLatestRevision(clients *test.Clients, names test.ResourceNames) (string, error) { + var revisionName string + if err := WaitForServiceState(clients.ServingBetaClient, names.Service, func(s *v1beta1.Service) (bool, error) { + if s.Status.LatestCreatedRevisionName != names.Revision { + revisionName = s.Status.LatestCreatedRevisionName + // We also check that the revision is pinned, meaning it's not a stale revision. + // Without this it might happen that the latest created revision is later overridden by a newer one + // and the following check for LatestReadyRevisionName would fail. + if revErr := CheckRevisionState(clients.ServingBetaClient, revisionName, IsRevisionPinned); revErr != nil { + return false, nil + } + return true, nil + } + return false, nil + }, "ServiceUpdatedWithRevision"); err != nil { + return "", fmt.Errorf("LatestCreatedRevisionName not updated: %w", err) + } + if err := WaitForServiceState(clients.ServingBetaClient, names.Service, func(s *v1beta1.Service) (bool, error) { + return (s.Status.LatestReadyRevisionName == revisionName), nil + }, "ServiceReadyWithRevision"); err != nil { + return "", fmt.Errorf("LatestReadyRevisionName not updated with %s: %w", revisionName, err) + } + + return revisionName, nil +} + +// Service returns a Service object in namespace with the name names.Service +// that uses the image specified by names.Image. +func Service(names test.ResourceNames, fopt ...rtesting.ServiceOption) *v1beta1.Service { + a := append([]rtesting.ServiceOption{ + rtesting.WithInlineConfigSpec(*ConfigurationSpec(pkgTest.ImagePath(names.Image))), + }, fopt...) + return rtesting.ServiceWithoutNamespace(names.Service, a...) +} + +// WaitForServiceState polls the status of the Service called name +// from client every `PollInterval` until `inState` returns `true` indicating it +// is done, returns an error or PollTimeout. desc will be used to name the metric +// that is emitted to track how long it took for name to get into the state checked by inState. +func WaitForServiceState(client *test.ServingBetaClients, name string, inState func(s *v1beta1.Service) (bool, error), desc string) error { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForServiceState/%s/%s", name, desc)) + defer span.End() + + var lastState *v1beta1.Service + waitErr := wait.PollImmediate(test.PollInterval, test.PollTimeout, func() (bool, error) { + var err error + lastState, err = client.Services.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(lastState) + }) + + if waitErr != nil { + return fmt.Errorf("service %q is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return nil +} + +// CheckServiceState verifies the status of the Service called name from client +// is in a particular state by calling `inState` and expecting `true`. +// This is the non-polling variety of WaitForServiceState. +func CheckServiceState(client *test.ServingBetaClients, name string, inState func(s *v1beta1.Service) (bool, error)) error { + s, err := client.Services.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + if done, err := inState(s); err != nil { + return err + } else if !done { + return fmt.Errorf("service %q is not in desired state, got: %+v", name, s) + } + return nil +} + +// IsServiceReady will check the status conditions of the service and return true if the service is +// ready. This means that its configurations and routes have all reported ready. +func IsServiceReady(s *v1beta1.Service) (bool, error) { + return s.Generation == s.Status.ObservedGeneration && s.Status.IsReady(), nil +} + +// IsServiceNotReady will check the status conditions of the service and return true if the service is +// not ready. +func IsServiceNotReady(s *v1beta1.Service) (bool, error) { + return s.Generation == s.Status.ObservedGeneration && !s.Status.IsReady(), nil +} diff --git a/test/vendor/knative.dev/serving/third_party/OWNERS b/test/vendor/knative.dev/serving/third_party/OWNERS new file mode 100644 index 0000000000..4b085321d8 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/OWNERS @@ -0,0 +1,14 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +# Istio +- networking-approvers +# Monitoring Configs +- monitoring-approvers + + +reviewers: +# Istio +- networking-reviewers +# Monitoring Configs +- monitoring-reviewers diff --git a/test/vendor/knative.dev/serving/third_party/VENDOR-LICENSE b/test/vendor/knative.dev/serving/third_party/VENDOR-LICENSE new file mode 100644 index 0000000000..704b52ddf6 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/VENDOR-LICENSE @@ -0,0 +1,13188 @@ + +=========================================================== +Import: knative.dev/serving/vendor/cloud.google.com/go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/contrib.go.opencensus.io/exporter/ocagent + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/contrib.go.opencensus.io/exporter/prometheus + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/contrib.go.opencensus.io/exporter/stackdriver + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/contrib.go.opencensus.io/exporter/zipkin + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/Azure/azure-sdk-for-go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/Azure/go-autorest + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/NYTimes/gziphandler + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/PuerkitoBio/purell + +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/PuerkitoBio/urlesc + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/aws/aws-sdk-go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/beorn7/perks + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/blang/semver + +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/census-instrumentation/opencensus-proto + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/coreos/etcd + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/coreos/go-systemd + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/davecgh/go-spew + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/dgrijalva/jwt-go + +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/docker/docker + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/docker/go-connections + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/docker/go-units + + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/emicklei/go-restful + +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/emicklei/go-restful-swagger12 + +Copyright (c) 2017 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/evanphx/json-patch + +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/ghodss/yaml + +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/go-openapi/jsonpointer + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/go-openapi/jsonreference + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/go-openapi/spec + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/go-openapi/swag + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/gobuffalo/envy + +The MIT License (MIT) +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/gogo/protobuf + +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/golang/glog + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/golang/groupcache + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/golang/protobuf + +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/google/go-cmp + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/google/go-containerregistry + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/google/gofuzz + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/google/uuid + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/googleapis/gax-go + +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/googleapis/gnostic + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/gorilla/websocket + +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/grpc-ecosystem/go-grpc-prometheus + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/grpc-ecosystem/grpc-gateway + +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/hashicorp/golang-lru + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/imdario/mergo + +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/jetstack/cert-manager + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/jmespath/go-jmespath + +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/joho/godotenv + +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/json-iterator/go + +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/kelseyhightower/envconfig + +Copyright (c) 2013 Kelsey Hightower + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/kubernetes-incubator/custom-metrics-apiserver + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/mailru/easyjson + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/markbates/inflect + +Copyright (c) 2011 Chris Farmiloe + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/mattbaird/jsonpatch + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/matttproud/golang_protobuf_extensions + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/modern-go/concurrent + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/modern-go/reflect2 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/munnerz/goautoneg + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/openzipkin/zipkin-go + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2017 The OpenZipkin Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/pborman/uuid + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/prometheus/client_golang + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/prometheus/client_model + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/prometheus/common + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/prometheus/procfs + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/rogpeppe/go-internal + +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/github.com/spf13/pflag + +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/go.opencensus.io + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +=========================================================== +Import: knative.dev/serving/vendor/go.uber.org/atomic + +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/go.uber.org/multierr + +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/go.uber.org/zap + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/crypto + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/net + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/oauth2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/sync + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/sys + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/text + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/golang.org/x/time + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/google.golang.org/api + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/google.golang.org/genproto + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/google.golang.org/grpc + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/gopkg.in/inf.v0 + +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +=========================================================== +Import: knative.dev/serving/vendor/gopkg.in/natefinch/lumberjack.v2 + +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +=========================================================== +Import: knative.dev/serving/vendor/gopkg.in/yaml.v2 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/istio.io/api + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/istio.io/client-go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/istio.io/gogo-genproto + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2019 Istio Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/api + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/apimachinery + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/apiserver + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/client-go + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/component-base + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/klog + +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/kube-openapi + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/kubernetes + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/metrics + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/k8s.io/utils + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/knative.dev/caching + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/knative.dev/pkg + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/sigs.k8s.io/structured-merge-diff + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +=========================================================== +Import: knative.dev/serving/vendor/sigs.k8s.io/yaml + +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/test/vendor/knative.dev/serving/third_party/ambassador-latest/README.md b/test/vendor/knative.dev/serving/third_party/ambassador-latest/README.md new file mode 100644 index 0000000000..b7775b2204 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/ambassador-latest/README.md @@ -0,0 +1,6 @@ +### Ambassador 0.86.1 + +`ambassador-rbac.yaml` and `ambassador-service.yaml` are required to install +Ambassador. These need to be updated every every time Ambassador's version is +bumped. Detailed instructions on installing Ambassador are available +[here](https://www.getambassador.io/user-guide/getting-started/). diff --git a/test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-rbac.yaml b/test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-rbac.yaml new file mode 100644 index 0000000000..f3a2f662b7 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-rbac.yaml @@ -0,0 +1,338 @@ +--- +apiVersion: v1 +kind: Service +metadata: + labels: + service: ambassador-admin + name: ambassador-admin +spec: + type: NodePort + ports: + - name: ambassador-admin + port: 8877 + targetPort: 8877 + selector: + service: ambassador +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: ambassador +rules: +- apiGroups: [""] + resources: [ "endpoints", "namespaces", "secrets", "services" ] + verbs: ["get", "list", "watch"] +- apiGroups: [ "getambassador.io" ] + resources: [ "*" ] + verbs: ["get", "list", "watch"] +- apiGroups: [ "apiextensions.k8s.io" ] + resources: [ "customresourcedefinitions" ] + verbs: ["get", "list", "watch"] +- apiGroups: [ "networking.internal.knative.dev" ] + resources: [ "clusteringresses", "ingresses" ] + verbs: ["get", "list", "watch"] +- apiGroups: [ "networking.internal.knative.dev" ] + resources: [ "ingresses/status", "clusteringresses/status" ] + verbs: ["update"] +- apiGroups: [ "extensions" ] + resources: [ "ingresses" ] + verbs: ["get", "list", "watch"] +- apiGroups: [ "extensions" ] + resources: [ "ingresses/status" ] + verbs: ["update"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ambassador +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: ambassador +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ambassador +subjects: +- kind: ServiceAccount + name: ambassador + namespace: default +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: authservices.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: authservices + singular: authservice + kind: AuthService + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consulresolvers.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: consulresolvers + singular: consulresolver + kind: ConsulResolver +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubernetesendpointresolvers.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: kubernetesendpointresolvers + singular: kubernetesendpointresolver + kind: KubernetesEndpointResolver +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubernetesserviceresolvers.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: kubernetesserviceresolvers + singular: kubernetesserviceresolver + kind: KubernetesServiceResolver +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: mappings.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: mappings + singular: mapping + kind: Mapping + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: modules.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: modules + singular: module + kind: Module + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ratelimitservices.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: ratelimitservices + singular: ratelimitservice + kind: RateLimitService + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: tcpmappings.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: tcpmappings + singular: tcpmapping + kind: TCPMapping + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: tlscontexts.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: tlscontexts + singular: tlscontext + kind: TLSContext + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: tracingservices.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: tracingservices + singular: tracingservice + kind: TracingService + categories: + - ambassador-crds +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: logservices.getambassador.io +spec: + group: getambassador.io + version: v1 + versions: + - name: v1 + served: true + storage: true + scope: Namespaced + names: + plural: logservices + singular: logservice + kind: LogService + categories: + - ambassador-crds +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ambassador +spec: + replicas: 3 + selector: + matchLabels: + service: ambassador + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + "consul.hashicorp.com/connect-inject": "false" + labels: + service: ambassador + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + serviceAccountName: ambassador + containers: + - name: ambassador + image: quay.io/datawire/ambassador:0.86.1 + resources: + limits: + cpu: 1 + memory: 400Mi + requests: + cpu: 200m + memory: 100Mi + env: + - name: AMBASSADOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 8080 + - name: https + containerPort: 8443 + - name: admin + containerPort: 8877 + livenessProbe: + httpGet: + path: /ambassador/v0/check_alive + port: 8877 + initialDelaySeconds: 30 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /ambassador/v0/check_ready + port: 8877 + initialDelaySeconds: 30 + periodSeconds: 3 + volumeMounts: + - name: ambassador-pod-info + mountPath: /tmp/ambassador-pod-info + volumes: + - name: ambassador-pod-info + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + restartPolicy: Always + securityContext: + runAsUser: 8888 diff --git a/test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-service.yaml b/test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-service.yaml new file mode 100644 index 0000000000..9d8b3b6beb --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/ambassador-latest/ambassador-service.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ambassador + labels: + app.kubernetes.io/component: ambassador-service +spec: + type: LoadBalancer + externalTrafficPolicy: Local + ports: + - port: 80 + targetPort: 8080 + name: http + - port: 443 + targetPort: 8443 + name: https + selector: + service: ambassador diff --git a/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager-crds.yaml b/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager-crds.yaml new file mode 100644 index 0000000000..500a68bfe5 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager-crds.yaml @@ -0,0 +1,5425 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: CertificateRequest is a type to represent a Certificate Signing + Request + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateRequestSpec defines the desired state of CertificateRequest + type: object + required: + - csr + - issuerRef + properties: + csr: + description: Byte slice containing the PEM encoded CertificateSigningRequest + type: string + format: byte + duration: + description: Requested certificate default Duration + type: string + isCA: + description: IsCA will mark the resulting certificate as valid for signing. + This implies that the 'cert sign' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If + the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the CertificateRequest + will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. The group field refers to the API group + of the issuer which defaults to 'cert-manager.io' if empty. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + usages: + description: Usages is the set of x509 actions that are enabled for + a given key. Defaults are ('digital signature', 'key encipherment') + if empty + type: array + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: + https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Valid KeyUsage values are as follows: "signing", "digital signature", + "content commitment", "key encipherment", "key agreement", "data + encipherment", "cert sign", "crl sign", "encipher only", "decipher + only", "any", "server auth", "client auth", "code signing", "email + protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec + user", "timestamping", "ocsp signing", "microsoft sgc", "netscape + sgc"' + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: CertificateStatus defines the observed state of CertificateRequest + and resulting signed certificate. + type: object + properties: + ca: + description: Byte slice containing the PEM encoded certificate authority + of the signed certificate. + type: string + format: byte + certificate: + description: Byte slice containing a PEM encoded signed certificate + resulting from the given certificate signing request. + type: string + format: byte + conditions: + type: array + items: + description: CertificateRequestCondition contains condition information + for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + failureTime: + description: FailureTime stores the time that this CertificateRequest + failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.secretName + name: Secret + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Certificate is a type to represent a Certificate from ACME + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate. A + valid Certificate requires at least one of a CommonName, DNSName, or URISAN + to be valid. + type: object + required: + - issuerRef + - secretName + properties: + commonName: + description: CommonName is a common name to be used on the Certificate. + The CommonName should have a length of 64 characters or fewer to avoid + generating invalid CSRs. + type: string + dnsNames: + description: DNSNames is a list of subject alt names to be used on the + Certificate. + type: array + items: + type: string + duration: + description: Certificate default Duration + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses to be used on the + Certificate + type: array + items: + type: string + isCA: + description: IsCA will mark this Certificate as valid for signing. This + implies that the 'cert sign' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. + If the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the Certificate will + be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + keyAlgorithm: + description: KeyAlgorithm is the private key algorithm of the corresponding + private key for this certificate. If provided, allowed values are + either "rsa" or "ecdsa" If KeyAlgorithm is specified and KeySize is + not provided, key size of 256 will be used for "ecdsa" key algorithm + and key size of 2048 will be used for "rsa" key algorithm. + type: string + enum: + - rsa + - ecdsa + keyEncoding: + description: KeyEncoding is the private key cryptography standards (PKCS) + for this certificate's private key to be encoded in. If provided, + allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, + respectively. If KeyEncoding is not specified, then PKCS#1 will be + used by default. + type: string + enum: + - pkcs1 + - pkcs8 + keySize: + description: KeySize is the key bit size of the corresponding private + key for this certificate. If provided, value must be between 2048 + and 8192 inclusive when KeyAlgorithm is empty or is set to "rsa", + and value must be one of (256, 384, 521) when KeyAlgorithm is set + to "ecdsa". + type: integer + organization: + description: Organization is the organization to be used on the Certificate + type: array + items: + type: string + renewBefore: + description: Certificate renew before expiration duration + type: string + secretName: + description: SecretName is the name of the secret resource to store + this secret in + type: string + uriSANs: + description: URISANs is a list of URI Subject Alternative Names to be + set on this Certificate. + type: array + items: + type: string + usages: + description: Usages is the set of x509 actions that are enabled for + a given key. Defaults are ('digital signature', 'key encipherment') + if empty + type: array + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: + https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Valid KeyUsage values are as follows: "signing", "digital signature", + "content commitment", "key encipherment", "key agreement", "data + encipherment", "cert sign", "crl sign", "encipher only", "decipher + only", "any", "server auth", "client auth", "code signing", "email + protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec + user", "timestamping", "ocsp signing", "microsoft sgc", "netscape + sgc"' + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: CertificateStatus defines the observed state of Certificate + type: object + properties: + conditions: + type: array + items: + description: CertificateCondition contains condition information for + an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + lastFailureTime: + type: string + format: date-time + notAfter: + description: The expiration time of the certificate stored in the secret + named by this resource in spec.secretName. + type: string + format: date-time + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.dnsName + name: Domain + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: acme.cert-manager.io + preserveUnknownFields: false + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME + server + type: object + required: + - metadata + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - authzURL + - dnsName + - issuerRef + - key + - token + - type + - url + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource + that this challenge is a part of. + type: string + dnsName: + description: DNSName is the identifier that this challenge is for, e.g. + example.com. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Challenge. If the Issuer does + not exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Challenge will be marked + as failed. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + key: + description: Key is the ACME challenge key for this challenge + type: string + solver: + description: Solver contains the domain solving configuration that should + be used to solve this challenge resource. + type: object + properties: + dns01: + type: object + properties: + acmedns: + description: ACMEIssuerDNS01ProviderAcmeDNS is a structure containing + the configuration for ACME-DNS servers + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + host: + type: string + akamai: + description: ACMEIssuerDNS01ProviderAkamai is a structure containing + the DNS configuration for Akamai DNS—Zone Record Management + API + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + clientTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + serviceConsumerDomain: + type: string + azuredns: + description: ACMEIssuerDNS01ProviderAzureDNS is a structure + containing the configuration for Azure DNS + type: object + required: + - clientID + - clientSecretSecretRef + - resourceGroupName + - subscriptionID + - tenantID + properties: + clientID: + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + environment: + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + type: string + clouddns: + description: ACMEIssuerDNS01ProviderCloudDNS is a structure + containing the DNS configuration for Google Cloud DNS + type: object + required: + - project + properties: + project: + type: string + serviceAccountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + cloudflare: + description: ACMEIssuerDNS01ProviderCloudflare is a structure + containing the DNS configuration for Cloudflare + type: object + required: + - email + properties: + apiKeySecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + apiTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + email: + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider + should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: ACMEIssuerDNS01ProviderDigitalOcean is a structure + containing the DNS configuration for DigitalOcean Domains + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + rfc2136: + description: ACMEIssuerDNS01ProviderRFC2136 is a structure containing + the configuration for RFC2136 DNS + type: object + required: + - nameserver + properties: + nameserver: + description: 'The IP address of the DNS supporting RFC2136. + Required. Note: FQDN is not a valid value, only IP.' + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting + RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` + are defined. Supported values are (case-insensitive): + ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or + ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If + ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG + value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + route53: + description: ACMEIssuerDNS01ProviderRoute53 is a structure containing + the Route 53 configuration for AWS + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. + If not set we fall-back to using env vars, shared credentials + file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this + zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName + api call. + type: string + region: + description: Always set the region when using AccessKeyID + and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider + will assume using either the explicit credentials AccessKeyID/SecretAccessKey + or the inferred credentials from environment variables, + shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. + If not set we fall-back to using env vars, shared credentials + file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + webhook: + description: ACMEIssuerDNS01ProviderWebhook specifies configuration + for a webhook DNS01 provider, including where to POST ChallengePayload + resources. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed + to the webhook apiserver when challenges are processed. + This can contain arbitrary JSON data. Secret values should + not be specified in this stanza. If secret values are + needed (e.g. credentials for a DNS service), you should + use a SecretKeySelector to reference a Secret resource. + For details on the schema of this field, consult the webhook + provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when + POSTing ChallengePayload resources to the webhook apiserver. + This should be the same as the GroupName specified in + the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in + the webhook provider implementation. This will typically + be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: ACMEChallengeSolverHTTP01 contains configuration detailing + how to solve HTTP01 challenges within a Kubernetes cluster. Typically + this is accomplished through creating 'routes' of some description + that configure ingress controllers to direct traffic to 'solver + pods', which are responsible for responding to the ACME server's + HTTP requests. + type: object + properties: + ingress: + description: The ingress based HTTP01 challenge solver will + solve challenges by creating or modifying Ingress resources + in order to route requests for '/.well-known/acme-challenge/XYZ' + to 'challenge solver' pods that are provisioned by cert-manager + for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress + resources to solve ACME challenges that use this challenge + solver. Only one of 'class' or 'name' may be specified. + type: string + name: + description: The name of the ingress resource that should + have ACME challenge solving routes inserted into it in + order to solve HTTP01 challenges. This is typically used + in conjunction with ingress controllers like ingress-gce, + which maintains a 1:1 mapping between external IPs and + ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the + ACME challenge solver pods used for HTTP01 challenges + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to + solve HTTP01 challenges. Only the 'labels' and 'annotations' + fields may be set. If labels or annotations overlap + with in-built values, the values here will override + the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to + the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the + created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 + challenge solver pod. Only the 'nodeSelector', 'affinity' + and 'tolerations' fields are supported currently. + All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + affinity expressions specified by this + field, but it may choose a node that violates + one or more of the expressions. The node + that is most preferred is the one with + the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a + sum by iterating through the elements + of this field and adding "weight" to the + sum if the node matches the corresponding + matchExpressions; the node(s) with the + highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no + objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + type: object + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + matchFields: + description: A list of node selector + requirements by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met at + scheduling time, the pod will not be scheduled + onto the node. If the affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to an update), the system may or may + not try to eventually evict the pod from + its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + type: array + items: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + matchFields: + description: A list of node selector + requirements by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same + node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + affinity expressions specified by this + field, but it may choose a node that violates + one or more of the expressions. The node + that is most preferred is the one with + the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a + sum by iterating through the elements + of this field and adding "weight" to the + sum if the node has pods which matches + the corresponding podAffinityTerm; the + node(s) with the highest sum are the most + preferred. + type: array + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key and + values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to a + set of values. Valid + operators are In, + NotIn, Exists and + DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, the + values array must + be non-empty. If the + operator is Exists + or DoesNotExist, the + values array must + be empty. This array + is replaced during + a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in + the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be + co-located (affinity) or not + co-located (anti-affinity) with + the pods matching the labelSelector + in the specified namespaces, + where co-located is defined + as running on a node whose value + of the label with key topologyKey + matches that of any node on + which any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met at + scheduling time, the pod will not be scheduled + onto the node. If the affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to a pod label update), the system + may or may not try to eventually evict + the pod from its node. When there are + multiple elements, the lists of nodes + corresponding to each podAffinityTerm + are intersected, i.e. all terms must be + satisfied. + type: array + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); null + or empty list means "this pod's + namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the + same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + anti-affinity expressions specified by + this field, but it may choose a node that + violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of the + scheduling requirements (resource request, + requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and + adding "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with the + highest sum are the most preferred. + type: array + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key and + values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to a + set of values. Valid + operators are In, + NotIn, Exists and + DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, the + values array must + be non-empty. If the + operator is Exists + or DoesNotExist, the + values array must + be empty. This array + is replaced during + a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in + the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be + co-located (affinity) or not + co-located (anti-affinity) with + the pods matching the labelSelector + in the specified namespaces, + where co-located is defined + as running on a node whose value + of the label with key topologyKey + matches that of any node on + which any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at + scheduling time, the pod will not be scheduled + onto the node. If the anti-affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to a pod label update), the system + may or may not try to eventually evict + the pod from its node. When there are + multiple elements, the lists of nodes + corresponding to each podAffinityTerm + are intersected, i.e. all terms must be + satisfied. + type: array + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); null + or empty list means "this pod's + namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must + be true for the pod to fit on a node. Selector + which must match a node''s labels for the pod + to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached + to tolerates any taint that matches the triple + using the matching operator + . + type: object + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the + toleration applies to. Empty means match + all taint keys. If the key is empty, operator + must be Exists; this combination means to + match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists + and Equal. Defaults to Equal. Exists is + equivalent to wildcard for value, so that + a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration (which + must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By + default, it is not set, which means tolerate + the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the + toleration matches to. If the operator is + Exists, the value should be empty, otherwise + just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver + service + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used + to solve. If specified and a match is found, a dnsNames selector + will take precedence over a dnsZones selector. If multiple + solvers match with the same dnsNames value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used + to solve. The most specific DNS zone match specified here + will take precedence over other DNS zone matches, so a solver + specifying sys.example.com will be selected over one specifying + example.com for the domain www.sys.example.com. If multiple + solvers match with the same dnsZones value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set + of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: Token is the ACME challenge token for this challenge. + type: string + type: + description: Type is the type of ACME challenge this resource represents, + e.g. "dns01" or "http01" + type: string + url: + description: URL is the URL of the ACME Challenge resource for this + challenge. This can be used to lookup details about the status of + this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a wildcard + identifier, for example '*.example.com' + type: boolean + status: + type: object + properties: + presented: + description: Presented will be set to true if the challenge values for + this challenge are currently 'presented'. This *does not* imply the + self check is passing. Only that the values have been 'submitted' + for the appropriate challenge mechanism (i.e. the DNS01 TXT record + has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Processing is used to denote whether this challenge should + be processed or not. This field will only be set to true by the 'scheduling' + component. It will only be set to false by the 'challenges' controller, + after the challenge has reached a final state or timed out. If this + field is set to false, the challenge controller will not take any + more action. + type: boolean + reason: + description: Reason contains human readable information on why the Challenge + is in the current state. + type: string + state: + description: State contains the current 'state' of the challenge. If + not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IssuerSpec is the specification of an Issuer. This includes + any configuration required for the issuer. + type: object + properties: + acme: + description: ACMEIssuer contains the specification for an ACME issuer + type: object + required: + - privateKeySecretRef + - server + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + type: array + items: + type: object + properties: + dns01: + type: object + properties: + acmedns: + description: ACMEIssuerDNS01ProviderAcmeDNS is a structure + containing the configuration for ACME-DNS servers + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + host: + type: string + akamai: + description: ACMEIssuerDNS01ProviderAkamai is a structure + containing the DNS configuration for Akamai DNS—Zone + Record Management API + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + serviceConsumerDomain: + type: string + azuredns: + description: ACMEIssuerDNS01ProviderAzureDNS is a structure + containing the configuration for Azure DNS + type: object + required: + - clientID + - clientSecretSecretRef + - resourceGroupName + - subscriptionID + - tenantID + properties: + clientID: + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + environment: + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + type: string + clouddns: + description: ACMEIssuerDNS01ProviderCloudDNS is a structure + containing the DNS configuration for Google Cloud DNS + type: object + required: + - project + properties: + project: + type: string + serviceAccountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + cloudflare: + description: ACMEIssuerDNS01ProviderCloudflare is a structure + containing the DNS configuration for Cloudflare + type: object + required: + - email + properties: + apiKeySecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + apiTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + email: + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider + should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: ACMEIssuerDNS01ProviderDigitalOcean is a + structure containing the DNS configuration for DigitalOcean + Domains + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + rfc2136: + description: ACMEIssuerDNS01ProviderRFC2136 is a structure + containing the configuration for RFC2136 DNS + type: object + required: + - nameserver + properties: + nameserver: + description: 'The IP address of the DNS supporting + RFC2136. Required. Note: FQDN is not a valid value, + only IP.' + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the + DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` + and ``tsigKeyName`` are defined. Supported values + are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, + ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. + If ``tsigSecretSecretRef`` is defined, this field + is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the + TSIG value. If ``tsigKeyName`` is defined, this + field is required. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + route53: + description: ACMEIssuerDNS01ProviderRoute53 is a structure + containing the Route 53 configuration for AWS + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only + this zone in Route53 and will not do an lookup using + the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID + and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 + provider will assume using either the explicit credentials + AccessKeyID/SecretAccessKey or the inferred credentials + from environment variables, shared credentials file + or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + webhook: + description: ACMEIssuerDNS01ProviderWebhook specifies + configuration for a webhook DNS01 provider, including + where to POST ChallengePayload resources. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should + be passed to the webhook apiserver when challenges + are processed. This can contain arbitrary JSON data. + Secret values should not be specified in this stanza. + If secret values are needed (e.g. credentials for + a DNS service), you should use a SecretKeySelector + to reference a Secret resource. For details on the + schema of this field, consult the webhook provider + implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used + when POSTing ChallengePayload resources to the webhook + apiserver. This should be the same as the GroupName + specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined + in the webhook provider implementation. This will + typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: ACMEChallengeSolverHTTP01 contains configuration + detailing how to solve HTTP01 challenges within a Kubernetes + cluster. Typically this is accomplished through creating + 'routes' of some description that configure ingress controllers + to direct traffic to 'solver pods', which are responsible + for responding to the ACME server's HTTP requests. + type: object + properties: + ingress: + description: The ingress based HTTP01 challenge solver + will solve challenges by creating or modifying Ingress + resources in order to route requests for '/.well-known/acme-challenge/XYZ' + to 'challenge solver' pods that are provisioned by cert-manager + for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating + Ingress resources to solve ACME challenges that + use this challenge solver. Only one of 'class' or + 'name' may be specified. + type: string + name: + description: The name of the ingress resource that + should have ACME challenge solving routes inserted + into it in order to solve HTTP01 challenges. This + is typically used in conjunction with ingress controllers + like ingress-gce, which maintains a 1:1 mapping + between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure + the ACME challenge solver pods used for HTTP01 challenges + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod + used to solve HTTP01 challenges. Only the 'labels' + and 'annotations' fields may be set. If labels + or annotations overlap with in-built values, + the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added + to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to + the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the + HTTP01 challenge solver pod. Only the 'nodeSelector', + 'affinity' and 'tolerations' fields are supported + currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node matches the + corresponding matchExpressions; + the node(s) with the highest sum + are the most preferred. + type: array + items: + description: An empty preferred + scheduling term matches all objects + with implicit weight 0 (i.e. it's + a no-op). A null preferred scheduling + term matches no objects (i.e. + is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector + term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to an update), the system + may or may not try to eventually + evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list + of node selector terms. The + terms are ORed. + type: array + items: + description: A null or empty + node selector term matches + no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of + the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other + pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node has pods + which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to a pod label update), + the system may or may not try to + eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting + this pod in the same node, zone, etc. + as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + anti-affinity expressions, etc.), + compute a sum by iterating through + the elements of this field and adding + "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity + requirements specified by this field + are not met at scheduling time, + the pod will not be scheduled onto + the node. If the anti-affinity requirements + specified by this field cease to + be met at some point during pod + execution (e.g. due to a pod label + update), the system may or may not + try to eventually evict the pod + from its node. When there are multiple + elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which + must be true for the pod to fit on a node. + Selector which must match a node''s labels + for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is + attached to tolerates any taint that matches + the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match + all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that + the toleration applies to. Empty means + match all taint keys. If the key is + empty, operator must be Exists; this + combination means to match all values + and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration + (which must be of effect NoExecute, + otherwise this field is ignored) tolerates + the taint. By default, it is not set, + which means tolerate the taint forever + (do not evict). Zero and negative + values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value + the toleration matches to. If the + operator is Exists, the value should + be empty, otherwise just a regular + string. + type: string + serviceType: + description: Optional service type for Kubernetes + solver service + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + additionalProperties: + type: string + ca: + type: object + required: + - secretName + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + selfSigned: + type: object + vault: + type: object + required: + - auth + - path + - server + properties: + auth: + description: Vault authentication + type: object + properties: + appRole: + description: This Secret contains a AppRole and Secret + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + kubernetes: + description: This contains a Role and Secret with a ServiceAccount + token to authenticate with vault. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path + to use when authenticating with Vault. For example, setting + a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` + to authenticate with Vault. If unspecified, the default + value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role + to assume. A Role binds a Kubernetes ServiceAccount with + a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes + ServiceAccount JWT used for authenticating with Vault. + Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + tokenSecretRef: + description: This Secret contains the Vault token key + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + type: string + format: byte + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + venafi: + description: VenafiIssuer describes issuer configuration details for + Venafi Cloud. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + - url + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for Venafi Cloud + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for the Venafi TPP instance + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + status: + description: IssuerStatus contains status information about an Issuer + type: object + properties: + acme: + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + conditions: + type: array + items: + description: IssuerCondition contains condition information for an + Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IssuerSpec is the specification of an Issuer. This includes + any configuration required for the issuer. + type: object + properties: + acme: + description: ACMEIssuer contains the specification for an ACME issuer + type: object + required: + - privateKeySecretRef + - server + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + type: array + items: + type: object + properties: + dns01: + type: object + properties: + acmedns: + description: ACMEIssuerDNS01ProviderAcmeDNS is a structure + containing the configuration for ACME-DNS servers + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + host: + type: string + akamai: + description: ACMEIssuerDNS01ProviderAkamai is a structure + containing the DNS configuration for Akamai DNS—Zone + Record Management API + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + serviceConsumerDomain: + type: string + azuredns: + description: ACMEIssuerDNS01ProviderAzureDNS is a structure + containing the configuration for Azure DNS + type: object + required: + - clientID + - clientSecretSecretRef + - resourceGroupName + - subscriptionID + - tenantID + properties: + clientID: + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + environment: + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + type: string + clouddns: + description: ACMEIssuerDNS01ProviderCloudDNS is a structure + containing the DNS configuration for Google Cloud DNS + type: object + required: + - project + properties: + project: + type: string + serviceAccountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + cloudflare: + description: ACMEIssuerDNS01ProviderCloudflare is a structure + containing the DNS configuration for Cloudflare + type: object + required: + - email + properties: + apiKeySecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + apiTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + email: + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider + should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: ACMEIssuerDNS01ProviderDigitalOcean is a + structure containing the DNS configuration for DigitalOcean + Domains + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + rfc2136: + description: ACMEIssuerDNS01ProviderRFC2136 is a structure + containing the configuration for RFC2136 DNS + type: object + required: + - nameserver + properties: + nameserver: + description: 'The IP address of the DNS supporting + RFC2136. Required. Note: FQDN is not a valid value, + only IP.' + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the + DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` + and ``tsigKeyName`` are defined. Supported values + are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, + ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. + If ``tsigSecretSecretRef`` is defined, this field + is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the + TSIG value. If ``tsigKeyName`` is defined, this + field is required. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + route53: + description: ACMEIssuerDNS01ProviderRoute53 is a structure + containing the Route 53 configuration for AWS + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only + this zone in Route53 and will not do an lookup using + the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID + and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 + provider will assume using either the explicit credentials + AccessKeyID/SecretAccessKey or the inferred credentials + from environment variables, shared credentials file + or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + webhook: + description: ACMEIssuerDNS01ProviderWebhook specifies + configuration for a webhook DNS01 provider, including + where to POST ChallengePayload resources. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should + be passed to the webhook apiserver when challenges + are processed. This can contain arbitrary JSON data. + Secret values should not be specified in this stanza. + If secret values are needed (e.g. credentials for + a DNS service), you should use a SecretKeySelector + to reference a Secret resource. For details on the + schema of this field, consult the webhook provider + implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used + when POSTing ChallengePayload resources to the webhook + apiserver. This should be the same as the GroupName + specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined + in the webhook provider implementation. This will + typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: ACMEChallengeSolverHTTP01 contains configuration + detailing how to solve HTTP01 challenges within a Kubernetes + cluster. Typically this is accomplished through creating + 'routes' of some description that configure ingress controllers + to direct traffic to 'solver pods', which are responsible + for responding to the ACME server's HTTP requests. + type: object + properties: + ingress: + description: The ingress based HTTP01 challenge solver + will solve challenges by creating or modifying Ingress + resources in order to route requests for '/.well-known/acme-challenge/XYZ' + to 'challenge solver' pods that are provisioned by cert-manager + for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating + Ingress resources to solve ACME challenges that + use this challenge solver. Only one of 'class' or + 'name' may be specified. + type: string + name: + description: The name of the ingress resource that + should have ACME challenge solving routes inserted + into it in order to solve HTTP01 challenges. This + is typically used in conjunction with ingress controllers + like ingress-gce, which maintains a 1:1 mapping + between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure + the ACME challenge solver pods used for HTTP01 challenges + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod + used to solve HTTP01 challenges. Only the 'labels' + and 'annotations' fields may be set. If labels + or annotations overlap with in-built values, + the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added + to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to + the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the + HTTP01 challenge solver pod. Only the 'nodeSelector', + 'affinity' and 'tolerations' fields are supported + currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node matches the + corresponding matchExpressions; + the node(s) with the highest sum + are the most preferred. + type: array + items: + description: An empty preferred + scheduling term matches all objects + with implicit weight 0 (i.e. it's + a no-op). A null preferred scheduling + term matches no objects (i.e. + is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector + term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to an update), the system + may or may not try to eventually + evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list + of node selector terms. The + terms are ORed. + type: array + items: + description: A null or empty + node selector term matches + no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of + the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other + pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node has pods + which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to a pod label update), + the system may or may not try to + eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting + this pod in the same node, zone, etc. + as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + anti-affinity expressions, etc.), + compute a sum by iterating through + the elements of this field and adding + "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity + requirements specified by this field + are not met at scheduling time, + the pod will not be scheduled onto + the node. If the anti-affinity requirements + specified by this field cease to + be met at some point during pod + execution (e.g. due to a pod label + update), the system may or may not + try to eventually evict the pod + from its node. When there are multiple + elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which + must be true for the pod to fit on a node. + Selector which must match a node''s labels + for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is + attached to tolerates any taint that matches + the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match + all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that + the toleration applies to. Empty means + match all taint keys. If the key is + empty, operator must be Exists; this + combination means to match all values + and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration + (which must be of effect NoExecute, + otherwise this field is ignored) tolerates + the taint. By default, it is not set, + which means tolerate the taint forever + (do not evict). Zero and negative + values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value + the toleration matches to. If the + operator is Exists, the value should + be empty, otherwise just a regular + string. + type: string + serviceType: + description: Optional service type for Kubernetes + solver service + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + additionalProperties: + type: string + ca: + type: object + required: + - secretName + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + selfSigned: + type: object + vault: + type: object + required: + - auth + - path + - server + properties: + auth: + description: Vault authentication + type: object + properties: + appRole: + description: This Secret contains a AppRole and Secret + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + kubernetes: + description: This contains a Role and Secret with a ServiceAccount + token to authenticate with vault. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path + to use when authenticating with Vault. For example, setting + a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` + to authenticate with Vault. If unspecified, the default + value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role + to assume. A Role binds a Kubernetes ServiceAccount with + a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes + ServiceAccount JWT used for authenticating with Vault. + Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + tokenSecretRef: + description: This Secret contains the Vault token key + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + type: string + format: byte + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + venafi: + description: VenafiIssuer describes issuer configuration details for + Venafi Cloud. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + - url + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for Venafi Cloud + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for the Venafi TPP instance + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + status: + description: IssuerStatus contains status information about an Issuer + type: object + properties: + acme: + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + conditions: + type: array + items: + description: IssuerCondition contains condition information for an + Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: acme.cert-manager.io + preserveUnknownFields: false + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - csr + - issuerRef + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded + CSR. If CommonName is not specified, the first DNSName specified will + be used as the CommonName. At least one of CommonName or a DNSNames + must be set. This field must match the corresponding field on the + DER encoded CSR. + type: string + csr: + description: Certificate signing request bytes in DER encoding. This + will be used when finalizing the order. This field must be set on + the order. + type: string + format: byte + dnsNames: + description: DNSNames is a list of DNS names that should be included + as part of the Order validation process. If CommonName is not specified, + the first DNSName specified will be used as the CommonName. At least + one of CommonName or a DNSNames must be set. This field must match + the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Order. If the Issuer does not + exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Order will be marked as + failed. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server + on what authoriations must be completed in order to validate the DNS + names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME + server on an authorization that must be completed in order validate + a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered + by the ACME server. One of these challenge types will be selected + when validating the DNS name and an appropriate Challenge resource + will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the + ACME server for an Order. An appropriate Challenge resource + can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for + this challenge. This is used to compute the 'key' that + must also be presented. + type: string + type: + description: Type is the type of challenge being offered, + e.g. http-01, dns-01 + type: string + url: + description: URL is the URL of this challenge. It can be + used to retrieve additional metadata about the Challenge + from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part + of this authorization + type: string + url: + description: URL is the URL of the Authorization that must be + completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for + a wildcard DNS name. If this is true, the identifier will be + the *non-wildcard* version of the DNS name. For example, if + '*.example.com' is the DNS name being validated, this field + will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for + this Order. This field will be populated after the order has been + successfully finalized with the ACME server, and the order has transitioned + to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This + is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates + for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why + the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. + States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the + resource is first created. The Order controller will populate this + field when the Order is first processed. This field will be immutable + after it is initially set. + type: string + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- diff --git a/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager.yaml b/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager.yaml new file mode 100644 index 0000000000..1ee179c025 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/cert-manager.yaml @@ -0,0 +1,6406 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: CertificateRequest is a type to represent a Certificate Signing + Request + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateRequestSpec defines the desired state of CertificateRequest + type: object + required: + - csr + - issuerRef + properties: + csr: + description: Byte slice containing the PEM encoded CertificateSigningRequest + type: string + format: byte + duration: + description: Requested certificate default Duration + type: string + isCA: + description: IsCA will mark the resulting certificate as valid for signing. + This implies that the 'cert sign' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If + the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the CertificateRequest + will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. The group field refers to the API group + of the issuer which defaults to 'cert-manager.io' if empty. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + usages: + description: Usages is the set of x509 actions that are enabled for + a given key. Defaults are ('digital signature', 'key encipherment') + if empty + type: array + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: + https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Valid KeyUsage values are as follows: "signing", "digital signature", + "content commitment", "key encipherment", "key agreement", "data + encipherment", "cert sign", "crl sign", "encipher only", "decipher + only", "any", "server auth", "client auth", "code signing", "email + protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec + user", "timestamping", "ocsp signing", "microsoft sgc", "netscape + sgc"' + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: CertificateStatus defines the observed state of CertificateRequest + and resulting signed certificate. + type: object + properties: + ca: + description: Byte slice containing the PEM encoded certificate authority + of the signed certificate. + type: string + format: byte + certificate: + description: Byte slice containing a PEM encoded signed certificate + resulting from the given certificate signing request. + type: string + format: byte + conditions: + type: array + items: + description: CertificateRequestCondition contains condition information + for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + failureTime: + description: FailureTime stores the time that this CertificateRequest + failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.secretName + name: Secret + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Certificate is a type to represent a Certificate from ACME + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateSpec defines the desired state of Certificate. A + valid Certificate requires at least one of a CommonName, DNSName, or URISAN + to be valid. + type: object + required: + - issuerRef + - secretName + properties: + commonName: + description: CommonName is a common name to be used on the Certificate. + The CommonName should have a length of 64 characters or fewer to avoid + generating invalid CSRs. + type: string + dnsNames: + description: DNSNames is a list of subject alt names to be used on the + Certificate. + type: array + items: + type: string + duration: + description: Certificate default Duration + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses to be used on the + Certificate + type: array + items: + type: string + isCA: + description: IsCA will mark this Certificate as valid for signing. This + implies that the 'cert sign' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. + If the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the Certificate will + be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + keyAlgorithm: + description: KeyAlgorithm is the private key algorithm of the corresponding + private key for this certificate. If provided, allowed values are + either "rsa" or "ecdsa" If KeyAlgorithm is specified and KeySize is + not provided, key size of 256 will be used for "ecdsa" key algorithm + and key size of 2048 will be used for "rsa" key algorithm. + type: string + enum: + - rsa + - ecdsa + keyEncoding: + description: KeyEncoding is the private key cryptography standards (PKCS) + for this certificate's private key to be encoded in. If provided, + allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, + respectively. If KeyEncoding is not specified, then PKCS#1 will be + used by default. + type: string + enum: + - pkcs1 + - pkcs8 + keySize: + description: KeySize is the key bit size of the corresponding private + key for this certificate. If provided, value must be between 2048 + and 8192 inclusive when KeyAlgorithm is empty or is set to "rsa", + and value must be one of (256, 384, 521) when KeyAlgorithm is set + to "ecdsa". + type: integer + organization: + description: Organization is the organization to be used on the Certificate + type: array + items: + type: string + renewBefore: + description: Certificate renew before expiration duration + type: string + secretName: + description: SecretName is the name of the secret resource to store + this secret in + type: string + uriSANs: + description: URISANs is a list of URI Subject Alternative Names to be + set on this Certificate. + type: array + items: + type: string + usages: + description: Usages is the set of x509 actions that are enabled for + a given key. Defaults are ('digital signature', 'key encipherment') + if empty + type: array + items: + description: 'KeyUsage specifies valid usage contexts for keys. See: + https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Valid KeyUsage values are as follows: "signing", "digital signature", + "content commitment", "key encipherment", "key agreement", "data + encipherment", "cert sign", "crl sign", "encipher only", "decipher + only", "any", "server auth", "client auth", "code signing", "email + protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec + user", "timestamping", "ocsp signing", "microsoft sgc", "netscape + sgc"' + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: CertificateStatus defines the observed state of Certificate + type: object + properties: + conditions: + type: array + items: + description: CertificateCondition contains condition information for + an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + lastFailureTime: + type: string + format: date-time + notAfter: + description: The expiration time of the certificate stored in the secret + named by this resource in spec.secretName. + type: string + format: date-time + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.dnsName + name: Domain + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: acme.cert-manager.io + preserveUnknownFields: false + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME + server + type: object + required: + - metadata + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - authzURL + - dnsName + - issuerRef + - key + - token + - type + - url + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource + that this challenge is a part of. + type: string + dnsName: + description: DNSName is the identifier that this challenge is for, e.g. + example.com. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Challenge. If the Issuer does + not exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Challenge will be marked + as failed. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + key: + description: Key is the ACME challenge key for this challenge + type: string + solver: + description: Solver contains the domain solving configuration that should + be used to solve this challenge resource. + type: object + properties: + dns01: + type: object + properties: + acmedns: + description: ACMEIssuerDNS01ProviderAcmeDNS is a structure containing + the configuration for ACME-DNS servers + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + host: + type: string + akamai: + description: ACMEIssuerDNS01ProviderAkamai is a structure containing + the DNS configuration for Akamai DNS—Zone Record Management + API + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + clientTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + serviceConsumerDomain: + type: string + azuredns: + description: ACMEIssuerDNS01ProviderAzureDNS is a structure + containing the configuration for Azure DNS + type: object + required: + - clientID + - clientSecretSecretRef + - resourceGroupName + - subscriptionID + - tenantID + properties: + clientID: + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + environment: + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + type: string + clouddns: + description: ACMEIssuerDNS01ProviderCloudDNS is a structure + containing the DNS configuration for Google Cloud DNS + type: object + required: + - project + properties: + project: + type: string + serviceAccountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + cloudflare: + description: ACMEIssuerDNS01ProviderCloudflare is a structure + containing the DNS configuration for Cloudflare + type: object + required: + - email + properties: + apiKeySecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + apiTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + email: + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider + should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: ACMEIssuerDNS01ProviderDigitalOcean is a structure + containing the DNS configuration for DigitalOcean Domains + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + rfc2136: + description: ACMEIssuerDNS01ProviderRFC2136 is a structure containing + the configuration for RFC2136 DNS + type: object + required: + - nameserver + properties: + nameserver: + description: 'The IP address of the DNS supporting RFC2136. + Required. Note: FQDN is not a valid value, only IP.' + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting + RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` + are defined. Supported values are (case-insensitive): + ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or + ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If + ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG + value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + route53: + description: ACMEIssuerDNS01ProviderRoute53 is a structure containing + the Route 53 configuration for AWS + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. + If not set we fall-back to using env vars, shared credentials + file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only this + zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName + api call. + type: string + region: + description: Always set the region when using AccessKeyID + and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider + will assume using either the explicit credentials AccessKeyID/SecretAccessKey + or the inferred credentials from environment variables, + shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. + If not set we fall-back to using env vars, shared credentials + file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + webhook: + description: ACMEIssuerDNS01ProviderWebhook specifies configuration + for a webhook DNS01 provider, including where to POST ChallengePayload + resources. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed + to the webhook apiserver when challenges are processed. + This can contain arbitrary JSON data. Secret values should + not be specified in this stanza. If secret values are + needed (e.g. credentials for a DNS service), you should + use a SecretKeySelector to reference a Secret resource. + For details on the schema of this field, consult the webhook + provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when + POSTing ChallengePayload resources to the webhook apiserver. + This should be the same as the GroupName specified in + the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in + the webhook provider implementation. This will typically + be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: ACMEChallengeSolverHTTP01 contains configuration detailing + how to solve HTTP01 challenges within a Kubernetes cluster. Typically + this is accomplished through creating 'routes' of some description + that configure ingress controllers to direct traffic to 'solver + pods', which are responsible for responding to the ACME server's + HTTP requests. + type: object + properties: + ingress: + description: The ingress based HTTP01 challenge solver will + solve challenges by creating or modifying Ingress resources + in order to route requests for '/.well-known/acme-challenge/XYZ' + to 'challenge solver' pods that are provisioned by cert-manager + for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress + resources to solve ACME challenges that use this challenge + solver. Only one of 'class' or 'name' may be specified. + type: string + name: + description: The name of the ingress resource that should + have ACME challenge solving routes inserted into it in + order to solve HTTP01 challenges. This is typically used + in conjunction with ingress controllers like ingress-gce, + which maintains a 1:1 mapping between external IPs and + ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the + ACME challenge solver pods used for HTTP01 challenges + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to + solve HTTP01 challenges. Only the 'labels' and 'annotations' + fields may be set. If labels or annotations overlap + with in-built values, the values here will override + the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to + the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the + created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 + challenge solver pod. Only the 'nodeSelector', 'affinity' + and 'tolerations' fields are supported currently. + All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + affinity expressions specified by this + field, but it may choose a node that violates + one or more of the expressions. The node + that is most preferred is the one with + the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a + sum by iterating through the elements + of this field and adding "weight" to the + sum if the node matches the corresponding + matchExpressions; the node(s) with the + highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling + term matches all objects with implicit + weight 0 (i.e. it's a no-op). A null + preferred scheduling term matches no + objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + type: object + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + matchFields: + description: A list of node selector + requirements by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met at + scheduling time, the pod will not be scheduled + onto the node. If the affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to an update), the system may or may + not try to eventually evict the pod from + its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + type: array + items: + description: A null or empty node + selector term matches no objects. + The requirements of them are ANDed. + The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + matchFields: + description: A list of node selector + requirements by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: Represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of + string values. If the + operator is In or NotIn, + the values array must + be non-empty. If the operator + is Exists or DoesNotExist, + the values array must + be empty. If the operator + is Gt or Lt, the values + array must have a single + element, which will be + interpreted as an integer. + This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same + node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + affinity expressions specified by this + field, but it may choose a node that violates + one or more of the expressions. The node + that is most preferred is the one with + the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a + sum by iterating through the elements + of this field and adding "weight" to the + sum if the node has pods which matches + the corresponding podAffinityTerm; the + node(s) with the highest sum are the most + preferred. + type: array + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key and + values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to a + set of values. Valid + operators are In, + NotIn, Exists and + DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, the + values array must + be non-empty. If the + operator is Exists + or DoesNotExist, the + values array must + be empty. This array + is replaced during + a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in + the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be + co-located (affinity) or not + co-located (anti-affinity) with + the pods matching the labelSelector + in the specified namespaces, + where co-located is defined + as running on a node whose value + of the label with key topologyKey + matches that of any node on + which any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not met at + scheduling time, the pod will not be scheduled + onto the node. If the affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to a pod label update), the system + may or may not try to eventually evict + the pod from its node. When there are + multiple elements, the lists of nodes + corresponding to each podAffinityTerm + are intersected, i.e. all terms must be + satisfied. + type: array + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); null + or empty list means "this pod's + namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the + same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to + schedule pods to nodes that satisfy the + anti-affinity expressions specified by + this field, but it may choose a node that + violates one or more of the expressions. + The node that is most preferred is the + one with the greatest sum of weights, + i.e. for each node that meets all of the + scheduling requirements (resource request, + requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and + adding "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with the + highest sum are the most preferred. + type: array + items: + description: The weights of all of the + matched WeightedPodAffinityTerm fields + are added per-node to find the most + preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key and + values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to a + set of values. Valid + operators are In, + NotIn, Exists and + DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, the + values array must + be non-empty. If the + operator is Exists + or DoesNotExist, the + values array must + be empty. This array + is replaced during + a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is + a map of {key,value} pairs. + A single {key,value} in + the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", + the operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be + co-located (affinity) or not + co-located (anti-affinity) with + the pods matching the labelSelector + in the specified namespaces, + where co-located is defined + as running on a node whose value + of the label with key topologyKey + matches that of any node on + which any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated with + matching the corresponding podAffinityTerm, + in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at + scheduling time, the pod will not be scheduled + onto the node. If the anti-affinity requirements + specified by this field cease to be met + at some point during pod execution (e.g. + due to a pod label update), the system + may or may not try to eventually evict + the pod from its node. When there are + multiple elements, the lists of nodes + corresponding to each podAffinityTerm + are intersected, i.e. all terms must be + satisfied. + type: array + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this + pod should be co-located (affinity) + or not co-located (anti-affinity) with, + where co-located is defined as running + on a node whose value of the label with + key matches that of any + node on which a pod of the set of pods + is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); null + or empty list means "this pod's + namespace" + type: array + items: + type: string + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which must + be true for the pod to fit on a node. Selector + which must match a node''s labels for the pod + to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached + to tolerates any taint that matches the triple + using the matching operator + . + type: object + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the + toleration applies to. Empty means match + all taint keys. If the key is empty, operator + must be Exists; this combination means to + match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists + and Equal. Defaults to Equal. Exists is + equivalent to wildcard for value, so that + a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration (which + must be of effect NoExecute, otherwise this + field is ignored) tolerates the taint. By + default, it is not set, which means tolerate + the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the + toleration matches to. If the operator is + Exists, the value should be empty, otherwise + just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver + service + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used + to solve. If specified and a match is found, a dnsNames selector + will take precedence over a dnsZones selector. If multiple + solvers match with the same dnsNames value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used + to solve. The most specific DNS zone match specified here + will take precedence over other DNS zone matches, so a solver + specifying sys.example.com will be selected over one specifying + example.com for the domain www.sys.example.com. If multiple + solvers match with the same dnsZones value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set + of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: Token is the ACME challenge token for this challenge. + type: string + type: + description: Type is the type of ACME challenge this resource represents, + e.g. "dns01" or "http01" + type: string + url: + description: URL is the URL of the ACME Challenge resource for this + challenge. This can be used to lookup details about the status of + this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a wildcard + identifier, for example '*.example.com' + type: boolean + status: + type: object + properties: + presented: + description: Presented will be set to true if the challenge values for + this challenge are currently 'presented'. This *does not* imply the + self check is passing. Only that the values have been 'submitted' + for the appropriate challenge mechanism (i.e. the DNS01 TXT record + has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Processing is used to denote whether this challenge should + be processed or not. This field will only be set to true by the 'scheduling' + component. It will only be set to false by the 'challenges' controller, + after the challenge has reached a final state or timed out. If this + field is set to false, the challenge controller will not take any + more action. + type: boolean + reason: + description: Reason contains human readable information on why the Challenge + is in the current state. + type: string + state: + description: State contains the current 'state' of the challenge. If + not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IssuerSpec is the specification of an Issuer. This includes + any configuration required for the issuer. + type: object + properties: + acme: + description: ACMEIssuer contains the specification for an ACME issuer + type: object + required: + - privateKeySecretRef + - server + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + type: array + items: + type: object + properties: + dns01: + type: object + properties: + acmedns: + description: ACMEIssuerDNS01ProviderAcmeDNS is a structure + containing the configuration for ACME-DNS servers + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + host: + type: string + akamai: + description: ACMEIssuerDNS01ProviderAkamai is a structure + containing the DNS configuration for Akamai DNS—Zone + Record Management API + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + serviceConsumerDomain: + type: string + azuredns: + description: ACMEIssuerDNS01ProviderAzureDNS is a structure + containing the configuration for Azure DNS + type: object + required: + - clientID + - clientSecretSecretRef + - resourceGroupName + - subscriptionID + - tenantID + properties: + clientID: + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + environment: + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + type: string + clouddns: + description: ACMEIssuerDNS01ProviderCloudDNS is a structure + containing the DNS configuration for Google Cloud DNS + type: object + required: + - project + properties: + project: + type: string + serviceAccountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + cloudflare: + description: ACMEIssuerDNS01ProviderCloudflare is a structure + containing the DNS configuration for Cloudflare + type: object + required: + - email + properties: + apiKeySecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + apiTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + email: + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider + should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: ACMEIssuerDNS01ProviderDigitalOcean is a + structure containing the DNS configuration for DigitalOcean + Domains + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + rfc2136: + description: ACMEIssuerDNS01ProviderRFC2136 is a structure + containing the configuration for RFC2136 DNS + type: object + required: + - nameserver + properties: + nameserver: + description: 'The IP address of the DNS supporting + RFC2136. Required. Note: FQDN is not a valid value, + only IP.' + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the + DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` + and ``tsigKeyName`` are defined. Supported values + are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, + ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. + If ``tsigSecretSecretRef`` is defined, this field + is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the + TSIG value. If ``tsigKeyName`` is defined, this + field is required. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + route53: + description: ACMEIssuerDNS01ProviderRoute53 is a structure + containing the Route 53 configuration for AWS + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only + this zone in Route53 and will not do an lookup using + the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID + and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 + provider will assume using either the explicit credentials + AccessKeyID/SecretAccessKey or the inferred credentials + from environment variables, shared credentials file + or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + webhook: + description: ACMEIssuerDNS01ProviderWebhook specifies + configuration for a webhook DNS01 provider, including + where to POST ChallengePayload resources. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should + be passed to the webhook apiserver when challenges + are processed. This can contain arbitrary JSON data. + Secret values should not be specified in this stanza. + If secret values are needed (e.g. credentials for + a DNS service), you should use a SecretKeySelector + to reference a Secret resource. For details on the + schema of this field, consult the webhook provider + implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used + when POSTing ChallengePayload resources to the webhook + apiserver. This should be the same as the GroupName + specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined + in the webhook provider implementation. This will + typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: ACMEChallengeSolverHTTP01 contains configuration + detailing how to solve HTTP01 challenges within a Kubernetes + cluster. Typically this is accomplished through creating + 'routes' of some description that configure ingress controllers + to direct traffic to 'solver pods', which are responsible + for responding to the ACME server's HTTP requests. + type: object + properties: + ingress: + description: The ingress based HTTP01 challenge solver + will solve challenges by creating or modifying Ingress + resources in order to route requests for '/.well-known/acme-challenge/XYZ' + to 'challenge solver' pods that are provisioned by cert-manager + for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating + Ingress resources to solve ACME challenges that + use this challenge solver. Only one of 'class' or + 'name' may be specified. + type: string + name: + description: The name of the ingress resource that + should have ACME challenge solving routes inserted + into it in order to solve HTTP01 challenges. This + is typically used in conjunction with ingress controllers + like ingress-gce, which maintains a 1:1 mapping + between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure + the ACME challenge solver pods used for HTTP01 challenges + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod + used to solve HTTP01 challenges. Only the 'labels' + and 'annotations' fields may be set. If labels + or annotations overlap with in-built values, + the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added + to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to + the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the + HTTP01 challenge solver pod. Only the 'nodeSelector', + 'affinity' and 'tolerations' fields are supported + currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node matches the + corresponding matchExpressions; + the node(s) with the highest sum + are the most preferred. + type: array + items: + description: An empty preferred + scheduling term matches all objects + with implicit weight 0 (i.e. it's + a no-op). A null preferred scheduling + term matches no objects (i.e. + is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector + term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to an update), the system + may or may not try to eventually + evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list + of node selector terms. The + terms are ORed. + type: array + items: + description: A null or empty + node selector term matches + no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of + the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other + pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node has pods + which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to a pod label update), + the system may or may not try to + eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting + this pod in the same node, zone, etc. + as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + anti-affinity expressions, etc.), + compute a sum by iterating through + the elements of this field and adding + "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity + requirements specified by this field + are not met at scheduling time, + the pod will not be scheduled onto + the node. If the anti-affinity requirements + specified by this field cease to + be met at some point during pod + execution (e.g. due to a pod label + update), the system may or may not + try to eventually evict the pod + from its node. When there are multiple + elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which + must be true for the pod to fit on a node. + Selector which must match a node''s labels + for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is + attached to tolerates any taint that matches + the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match + all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that + the toleration applies to. Empty means + match all taint keys. If the key is + empty, operator must be Exists; this + combination means to match all values + and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration + (which must be of effect NoExecute, + otherwise this field is ignored) tolerates + the taint. By default, it is not set, + which means tolerate the taint forever + (do not evict). Zero and negative + values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value + the toleration matches to. If the + operator is Exists, the value should + be empty, otherwise just a regular + string. + type: string + serviceType: + description: Optional service type for Kubernetes + solver service + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + additionalProperties: + type: string + ca: + type: object + required: + - secretName + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + selfSigned: + type: object + vault: + type: object + required: + - auth + - path + - server + properties: + auth: + description: Vault authentication + type: object + properties: + appRole: + description: This Secret contains a AppRole and Secret + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + kubernetes: + description: This contains a Role and Secret with a ServiceAccount + token to authenticate with vault. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path + to use when authenticating with Vault. For example, setting + a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` + to authenticate with Vault. If unspecified, the default + value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role + to assume. A Role binds a Kubernetes ServiceAccount with + a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes + ServiceAccount JWT used for authenticating with Vault. + Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + tokenSecretRef: + description: This Secret contains the Vault token key + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + type: string + format: byte + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + venafi: + description: VenafiIssuer describes issuer configuration details for + Venafi Cloud. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + - url + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for Venafi Cloud + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for the Venafi TPP instance + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + status: + description: IssuerStatus contains status information about an Issuer + type: object + properties: + acme: + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + conditions: + type: array + items: + description: IssuerCondition contains condition information for an + Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: cert-manager.io + preserveUnknownFields: false + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IssuerSpec is the specification of an Issuer. This includes + any configuration required for the issuer. + type: object + properties: + acme: + description: ACMEIssuer contains the specification for an ACME issuer + type: object + required: + - privateKeySecretRef + - server + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + type: array + items: + type: object + properties: + dns01: + type: object + properties: + acmedns: + description: ACMEIssuerDNS01ProviderAcmeDNS is a structure + containing the configuration for ACME-DNS servers + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + host: + type: string + akamai: + description: ACMEIssuerDNS01ProviderAkamai is a structure + containing the DNS configuration for Akamai DNS—Zone + Record Management API + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + clientTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + serviceConsumerDomain: + type: string + azuredns: + description: ACMEIssuerDNS01ProviderAzureDNS is a structure + containing the configuration for Azure DNS + type: object + required: + - clientID + - clientSecretSecretRef + - resourceGroupName + - subscriptionID + - tenantID + properties: + clientID: + type: string + clientSecretSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + environment: + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + type: string + resourceGroupName: + type: string + subscriptionID: + type: string + tenantID: + type: string + clouddns: + description: ACMEIssuerDNS01ProviderCloudDNS is a structure + containing the DNS configuration for Google Cloud DNS + type: object + required: + - project + properties: + project: + type: string + serviceAccountSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + cloudflare: + description: ACMEIssuerDNS01ProviderCloudflare is a structure + containing the DNS configuration for Cloudflare + type: object + required: + - email + properties: + apiKeySecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + apiTokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + email: + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider + should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: ACMEIssuerDNS01ProviderDigitalOcean is a + structure containing the DNS configuration for DigitalOcean + Domains + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + rfc2136: + description: ACMEIssuerDNS01ProviderRFC2136 is a structure + containing the configuration for RFC2136 DNS + type: object + required: + - nameserver + properties: + nameserver: + description: 'The IP address of the DNS supporting + RFC2136. Required. Note: FQDN is not a valid value, + only IP.' + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the + DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` + and ``tsigKeyName`` are defined. Supported values + are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, + ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. + If ``tsigSecretSecretRef`` is defined, this field + is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the + TSIG value. If ``tsigKeyName`` is defined, this + field is required. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + route53: + description: ACMEIssuerDNS01ProviderRoute53 is a structure + containing the Route 53 configuration for AWS + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + hostedZoneID: + description: If set, the provider will manage only + this zone in Route53 and will not do an lookup using + the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID + and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 + provider will assume using either the explicit credentials + AccessKeyID/SecretAccessKey or the inferred credentials + from environment variables, shared credentials file + or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: The SecretAccessKey is used for authentication. + If not set we fall-back to using env vars, shared + credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. + Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + webhook: + description: ACMEIssuerDNS01ProviderWebhook specifies + configuration for a webhook DNS01 provider, including + where to POST ChallengePayload resources. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should + be passed to the webhook apiserver when challenges + are processed. This can contain arbitrary JSON data. + Secret values should not be specified in this stanza. + If secret values are needed (e.g. credentials for + a DNS service), you should use a SecretKeySelector + to reference a Secret resource. For details on the + schema of this field, consult the webhook provider + implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used + when POSTing ChallengePayload resources to the webhook + apiserver. This should be the same as the GroupName + specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined + in the webhook provider implementation. This will + typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: ACMEChallengeSolverHTTP01 contains configuration + detailing how to solve HTTP01 challenges within a Kubernetes + cluster. Typically this is accomplished through creating + 'routes' of some description that configure ingress controllers + to direct traffic to 'solver pods', which are responsible + for responding to the ACME server's HTTP requests. + type: object + properties: + ingress: + description: The ingress based HTTP01 challenge solver + will solve challenges by creating or modifying Ingress + resources in order to route requests for '/.well-known/acme-challenge/XYZ' + to 'challenge solver' pods that are provisioned by cert-manager + for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating + Ingress resources to solve ACME challenges that + use this challenge solver. Only one of 'class' or + 'name' may be specified. + type: string + name: + description: The name of the ingress resource that + should have ACME challenge solving routes inserted + into it in order to solve HTTP01 challenges. This + is typically used in conjunction with ingress controllers + like ingress-gce, which maintains a 1:1 mapping + between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure + the ACME challenge solver pods used for HTTP01 challenges + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod + used to solve HTTP01 challenges. Only the 'labels' + and 'annotations' fields may be set. If labels + or annotations overlap with in-built values, + the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added + to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to + the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the + HTTP01 challenge solver pod. Only the 'nodeSelector', + 'affinity' and 'tolerations' fields are supported + currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node matches the + corresponding matchExpressions; + the node(s) with the highest sum + are the most preferred. + type: array + items: + description: An empty preferred + scheduling term matches all objects + with implicit weight 0 (i.e. it's + a no-op). A null preferred scheduling + term matches no objects (i.e. + is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector + term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to an update), the system + may or may not try to eventually + evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list + of node selector terms. The + terms are ORed. + type: array + items: + description: A null or empty + node selector term matches + no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of + the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other + pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node has pods + which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to a pod label update), + the system may or may not try to + eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting + this pod in the same node, zone, etc. + as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + anti-affinity expressions, etc.), + compute a sum by iterating through + the elements of this field and adding + "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity + requirements specified by this field + are not met at scheduling time, + the pod will not be scheduled onto + the node. If the anti-affinity requirements + specified by this field cease to + be met at some point during pod + execution (e.g. due to a pod label + update), the system may or may not + try to eventually evict the pod + from its node. When there are multiple + elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + nodeSelector: + description: 'NodeSelector is a selector which + must be true for the pod to fit on a node. + Selector which must match a node''s labels + for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is + attached to tolerates any taint that matches + the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match + all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that + the toleration applies to. Empty means + match all taint keys. If the key is + empty, operator must be Exists; this + combination means to match all values + and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration + (which must be of effect NoExecute, + otherwise this field is ignored) tolerates + the taint. By default, it is not set, + which means tolerate the taint forever + (do not evict). Zero and negative + values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value + the toleration matches to. If the + operator is Exists, the value should + be empty, otherwise just a regular + string. + type: string + serviceType: + description: Optional service type for Kubernetes + solver service + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + additionalProperties: + type: string + ca: + type: object + required: + - secretName + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + selfSigned: + type: object + vault: + type: object + required: + - auth + - path + - server + properties: + auth: + description: Vault authentication + type: object + properties: + appRole: + description: This Secret contains a AppRole and Secret + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + kubernetes: + description: This contains a Role and Secret with a ServiceAccount + token to authenticate with vault. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path + to use when authenticating with Vault. For example, setting + a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` + to authenticate with Vault. If unspecified, the default + value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role + to assume. A Role binds a Kubernetes ServiceAccount with + a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes + ServiceAccount JWT used for authenticating with Vault. + Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + tokenSecretRef: + description: This Secret contains the Vault token key + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + type: string + format: byte + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + venafi: + description: VenafiIssuer describes issuer configuration details for + Venafi Cloud. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + - url + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for Venafi Cloud + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + url: + description: URL is the base URL for the Venafi TPP instance + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + status: + description: IssuerStatus contains status information about an Issuer + type: object + properties: + acme: + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + conditions: + type: array + items: + description: IssuerCondition contains condition information for an + Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, currently ('Ready'). + type: string + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: acme.cert-manager.io + preserveUnknownFields: false + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - csr + - issuerRef + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded + CSR. If CommonName is not specified, the first DNSName specified will + be used as the CommonName. At least one of CommonName or a DNSNames + must be set. This field must match the corresponding field on the + DER encoded CSR. + type: string + csr: + description: Certificate signing request bytes in DER encoding. This + will be used when finalizing the order. This field must be set on + the order. + type: string + format: byte + dnsNames: + description: DNSNames is a list of DNS names that should be included + as part of the Order validation process. If CommonName is not specified, + the first DNSName specified will be used as the CommonName. At least + one of CommonName or a DNSNames must be set. This field must match + the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Order. If the Issuer does not + exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Order will be marked as + failed. + type: object + required: + - name + properties: + group: + type: string + kind: + type: string + name: + type: string + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server + on what authoriations must be completed in order to validate the DNS + names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME + server on an authorization that must be completed in order validate + a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered + by the ACME server. One of these challenge types will be selected + when validating the DNS name and an appropriate Challenge resource + will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the + ACME server for an Order. An appropriate Challenge resource + can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for + this challenge. This is used to compute the 'key' that + must also be presented. + type: string + type: + description: Type is the type of challenge being offered, + e.g. http-01, dns-01 + type: string + url: + description: URL is the URL of this challenge. It can be + used to retrieve additional metadata about the Challenge + from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part + of this authorization + type: string + url: + description: URL is the URL of the Authorization that must be + completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for + a wildcard DNS name. If this is true, the identifier will be + the *non-wildcard* version of the DNS name. For example, if + '*.example.com' is the DNS name being validated, this field + will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for + this Order. This field will be populated after the order has been + successfully finalized with the ACME server, and the order has transitioned + to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This + is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates + for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why + the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. + States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the + resource is first created. The Order controller will populate this + field when the Order is first processed. This field will be immutable + after it is initially set. + type: string + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +--- +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager-cainjector + namespace: "cert-manager" + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager + namespace: "cert-manager" + annotations: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager-webhook + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: "cert-manager" + kind: ServiceAccount + +--- +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + # Used for leader election by the controller + # TODO: refine the permission to *just* the leader election configmap + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create", "update", "patch"] + +--- + +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +# Source: cert-manager/templates/webhook-rbac.yaml +### Webhook ### +--- +# apiserver gets the auth-delegator role to delegate auth decisions to +# the core apiserver +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:auth-delegator + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager + +--- + +# apiserver gets the ability to read authentication. This allows it to +# read the specific configmap that has the requestheader-* entries to +# api agg +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:webhook-authentication-reader + namespace: kube-system + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:webhook-requester + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: +- apiGroups: + - admission.cert-manager.io + resources: + - certificates + - certificaterequests + - issuers + - clusterissuers + verbs: + - create +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + # Used for leader election by the controller + # TODO: refine the permission to *just* the leader election configmap + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create", "update", "patch"] + +--- + +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager + +--- + +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + +--- + +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["extensions"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + +--- +# Source: cert-manager/templates/service.yaml + +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: "cert-manager" + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +spec: + type: ClusterIP + ports: + - name: https + port: 443 + targetPort: 10250 + selector: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: "cert-manager" + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +spec: + replicas: 1 + selector: + matchLabels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + annotations: + spec: + serviceAccountName: cert-manager-cainjector + containers: + - name: cert-manager + image: "quay.io/jetstack/cert-manager-cainjector:v0.12.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + {} + + +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: "cert-manager" + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +spec: + replicas: 1 + selector: + matchLabels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + containers: + - name: cert-manager + image: "quay.io/jetstack/cert-manager-controller:v0.12.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + - --webhook-namespace=$(POD_NAMESPACE) + - --webhook-ca-secret=cert-manager-webhook-ca + - --webhook-serving-secret=cert-manager-webhook-tls + - --webhook-dns-names=cert-manager-webhook,cert-manager-webhook.cert-manager,cert-manager-webhook.cert-manager.svc + ports: + - containerPort: 9402 + protocol: TCP + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: 10m + memory: 32Mi + + +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 +spec: + replicas: 1 + selector: + matchLabels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + annotations: + spec: + serviceAccountName: cert-manager-webhook + containers: + - name: cert-manager + image: "quay.io/jetstack/cert-manager-webhook:v0.12.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=10250 + - --tls-cert-file=/certs/tls.crt + - --tls-private-key-file=/certs/tls.key + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + {} + + volumeMounts: + - name: certs + mountPath: /certs + volumes: + - name: certs + secret: + secretName: cert-manager-webhook-tls +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-tls" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - "*/*" + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: "cert-manager" + path: /mutate +--- +# Source: cert-manager/templates/cainjector-psp-clusterrole.yaml + + +--- +# Source: cert-manager/templates/cainjector-psp-clusterrolebinding.yaml + + +--- +# Source: cert-manager/templates/cainjector-psp.yaml + + +--- +# Source: cert-manager/templates/psp-clusterrole.yaml + + +--- +# Source: cert-manager/templates/psp-clusterrolebinding.yaml + + +--- +# Source: cert-manager/templates/psp.yaml + + +--- +# Source: cert-manager/templates/servicemonitor.yaml + + +--- +# Source: cert-manager/templates/webhook-psp-clusterrole.yaml + + +--- +# Source: cert-manager/templates/webhook-psp-clusterrolebinding.yaml + + +--- +# Source: cert-manager/templates/webhook-psp.yaml + + +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.12.0 + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-tls" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - "*/*" + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: "cert-manager" + path: /mutate diff --git a/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/download-cert-manager.sh b/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/download-cert-manager.sh new file mode 100755 index 0000000000..077c9b4c42 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/cert-manager-0.12.0/download-cert-manager.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +# Download and unpack cert-manager +CERT_MANAGER_VERSION=0.12.0 +ARCHIVE_DOWNLOAD_URL=https://github.com/jetstack/cert-manager/archive/v${CERT_MANAGER_VERSION}.tar.gz +YAML_URL=https://github.com/jetstack/cert-manager/releases/download/v${CERT_MANAGER_VERSION}/cert-manager.yaml + +wget $ARCHIVE_DOWNLOAD_URL +tar xzf v${CERT_MANAGER_VERSION}.tar.gz + +( +# subshell in downloaded directory +cd cert-manager-${CERT_MANAGER_VERSION} || exit + +# Copy the CRD yaml file +cp deploy/manifests/00-crds.yaml ../cert-manager-crds.yaml +) + +# Download the cert-manager yaml file +wget $YAML_URL + +# Clean up. +rm -rf cert-manager-${CERT_MANAGER_VERSION} +rm v${CERT_MANAGER_VERSION}.tar.gz diff --git a/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager-crds.yaml b/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager-crds.yaml new file mode 100644 index 0000000000..89a5154ddb --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager-crds.yaml @@ -0,0 +1,1426 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: certificates.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.secretName + name: Secret + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: Certificate + plural: certificates + shortNames: + - cert + - certs + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + acme: + description: ACME contains configuration specific to ACME Certificates. + Notably, this contains details on how the domain names listed on this + Certificate resource should be 'solved', i.e. mapping HTTP01 and DNS01 + providers to DNS names. + properties: + config: + items: + properties: + domains: + description: Domains is the list of domains that this SolverConfig + applies to. + items: + type: string + type: array + required: + - domains + type: object + type: array + required: + - config + type: object + commonName: + description: CommonName is a common name to be used on the Certificate. + If no CommonName is given, then the first entry in DNSNames is used + as the CommonName. The CommonName should have a length of 64 characters + or fewer to avoid generating invalid CSRs; in order to have longer + domain names, set the CommonName (or first DNSNames entry) to have + 64 characters or fewer, and then add the longer domain name to DNSNames. + type: string + dnsNames: + description: DNSNames is a list of subject alt names to be used on the + Certificate. If no CommonName is given, then the first entry in DNSNames + is used as the CommonName and must have a length of 64 characters + or fewer. + items: + type: string + type: array + duration: + description: Certificate default Duration + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses to be used on the + Certificate + items: + type: string + type: array + isCA: + description: IsCA will mark this Certificate as valid for signing. This + implies that the 'signing' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. + If the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the Certificate will + be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + keyAlgorithm: + description: KeyAlgorithm is the private key algorithm of the corresponding + private key for this certificate. If provided, allowed values are + either "rsa" or "ecdsa" If KeyAlgorithm is specified and KeySize is + not provided, key size of 256 will be used for "ecdsa" key algorithm + and key size of 2048 will be used for "rsa" key algorithm. + enum: + - rsa + - ecdsa + type: string + keyEncoding: + description: KeyEncoding is the private key cryptography standards (PKCS) + for this certificate's private key to be encoded in. If provided, + allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, + respectively. If KeyEncoding is not specified, then PKCS#1 will be + used by default. + type: string + keySize: + description: KeySize is the key bit size of the corresponding private + key for this certificate. If provided, value must be between 2048 + and 8192 inclusive when KeyAlgorithm is empty or is set to "rsa", + and value must be one of (256, 384, 521) when KeyAlgorithm is set + to "ecdsa". + format: int64 + type: integer + organization: + description: Organization is the organization to be used on the Certificate + items: + type: string + type: array + renewBefore: + description: Certificate renew before expiration duration + type: string + secretName: + description: SecretName is the name of the secret resource to store + this secret in + type: string + required: + - secretName + - issuerRef + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + lastFailureTime: + format: date-time + type: string + notAfter: + description: The expiration time of the certificate stored in the secret + named by this resource in spec.secretName. + format: date-time + type: string + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: certificaterequests.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: CertificateRequest + plural: certificaterequests + shortNames: + - cr + - crs + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + csr: + description: Byte slice containing the PEM encoded CertificateSigningRequest + format: byte + type: string + duration: + description: Requested certificate default Duration + type: string + isCA: + description: IsCA will mark the resulting certificate as valid for signing. + This implies that the 'signing' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If + the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the CertificateRequest + will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. The group field refers to the API group + of the issuer which defaults to 'certmanager.k8s.io' if empty. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + required: + - issuerRef + type: object + status: + properties: + ca: + description: Byte slice containing the PEM encoded certificate authority + of the signed certificate. + format: byte + type: string + certificate: + description: Byte slice containing a PEM encoded signed certificate + resulting from the given certificate signing request. + format: byte + type: string + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: challenges.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.dnsName + name: Domain + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: Challenge + plural: challenges + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource + that this challenge is a part of. + type: string + config: + description: 'Config specifies the solver configuration for this challenge. + Only **one** of ''config'' or ''solver'' may be specified, and if + both are specified then no action will be performed on the Challenge + resource. DEPRECATED: the ''solver'' field should be specified instead' + type: object + dnsName: + description: DNSName is the identifier that this challenge is for, e.g. + example.com. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Challenge. If the Issuer does + not exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Challenge will be marked + as failed. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + key: + description: Key is the ACME challenge key for this challenge + type: string + solver: + description: Solver contains the domain solving configuration that should + be used to solve this challenge resource. Only **one** of 'config' + or 'solver' may be specified, and if both are specified then no action + will be performed on the Challenge resource. + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be used + to solve. If specified and a match is found, a dnsNames selector + will take precedence over a dnsZones selector. If multiple + solvers match with the same dnsNames value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used + to solve. The most specific DNS zone match specified here + will take precedence over other DNS zone matches, so a solver + specifying sys.example.com will be selected over one specifying + example.com for the domain www.sys.example.com. If multiple + solvers match with the same dnsZones value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the set + of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + token: + description: Token is the ACME challenge token for this challenge. + type: string + type: + description: Type is the type of ACME challenge this resource represents, + e.g. "dns01" or "http01" + type: string + url: + description: URL is the URL of the ACME Challenge resource for this + challenge. This can be used to lookup details about the status of + this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a wildcard + identifier, for example '*.example.com' + type: boolean + required: + - authzURL + - type + - url + - dnsName + - token + - key + - wildcard + - issuerRef + type: object + status: + properties: + presented: + description: Presented will be set to true if the challenge values for + this challenge are currently 'presented'. This *does not* imply the + self check is passing. Only that the values have been 'submitted' + for the appropriate challenge mechanism (i.e. the DNS01 TXT record + has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Processing is used to denote whether this challenge should + be processed or not. This field will only be set to true by the 'scheduling' + component. It will only be set to false by the 'challenges' controller, + after the challenge has reached a final state or timed out. If this + field is set to false, the challenge controller will not take any + more action. + type: boolean + reason: + description: Reason contains human readable information on why the Challenge + is in the current state. + type: string + state: + description: State contains the current 'state' of the challenge. If + not set, the state of the challenge is unknown. + enum: + - "" + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + required: + - processing + - presented + - reason + type: object + required: + - metadata + - spec + - status + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: clusterissuers.certmanager.k8s.io +spec: + group: certmanager.k8s.io + names: + kind: ClusterIssuer + plural: clusterissuers + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + acme: + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + items: + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + type: object + type: object + type: array + required: + - server + - privateKeySecretRef + type: object + ca: + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + type: object + vault: + properties: + auth: + description: Vault authentication + properties: + appRole: + description: This Secret contains a AppRole and Secret + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + tokenSecretRef: + description: This Secret contains the Vault token key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + type: object + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + format: byte + type: string + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + required: + - auth + - server + - path + type: object + venafi: + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud + type: string + required: + - url + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for the Venafi TPP instance + type: string + required: + - url + - credentialsRef + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + properties: + acme: + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: issuers.certmanager.k8s.io +spec: + group: certmanager.k8s.io + names: + kind: Issuer + plural: issuers + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + acme: + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + items: + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + type: object + type: object + type: array + required: + - server + - privateKeySecretRef + type: object + ca: + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + type: object + vault: + properties: + auth: + description: Vault authentication + properties: + appRole: + description: This Secret contains a AppRole and Secret + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + tokenSecretRef: + description: This Secret contains the Vault token key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + type: object + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + format: byte + type: string + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + required: + - auth + - server + - path + type: object + venafi: + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud + type: string + required: + - url + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for the Venafi TPP instance + type: string + required: + - url + - credentialsRef + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + properties: + acme: + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: orders.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: Order + plural: orders + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded + CSR. If CommonName is not specified, the first DNSName specified will + be used as the CommonName. At least one of CommonName or a DNSNames + must be set. This field must match the corresponding field on the + DER encoded CSR. + type: string + config: + description: 'Config specifies a mapping from DNS identifiers to how + those identifiers should be solved when performing ACME challenges. + A config entry must exist for each domain listed in DNSNames and CommonName. + Only **one** of ''config'' or ''solvers'' may be specified, and if + both are specified then no action will be performed on the Order resource. This + field will be removed when support for solver config specified on + the Certificate under certificate.spec.acme has been removed. DEPRECATED: + this field will be removed in future. Solver configuration must instead + be provided on ACME Issuer resources.' + items: + properties: + domains: + description: Domains is the list of domains that this SolverConfig + applies to. + items: + type: string + type: array + required: + - domains + type: object + type: array + csr: + description: Certificate signing request bytes in DER encoding. This + will be used when finalizing the order. This field must be set on + the order. + format: byte + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included + as part of the Order validation process. If CommonName is not specified, + the first DNSName specified will be used as the CommonName. At least + one of CommonName or a DNSNames must be set. This field must match + the corresponding field on the DER encoded CSR. + items: + type: string + type: array + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Order. If the Issuer does not + exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Order will be marked as + failed. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + required: + - csr + - issuerRef + type: object + status: + properties: + certificate: + description: Certificate is a copy of the PEM encoded certificate for + this Order. This field will be populated after the order has been + successfully finalized with the ACME server, and the order has transitioned + to the 'valid' state. + format: byte + type: string + challenges: + description: Challenges is a list of ChallengeSpecs for Challenges that + must be created in order to complete this Order. + items: + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource + that this challenge is a part of. + type: string + config: + description: 'Config specifies the solver configuration for this + challenge. Only **one** of ''config'' or ''solver'' may be specified, + and if both are specified then no action will be performed on + the Challenge resource. DEPRECATED: the ''solver'' field should + be specified instead' + type: object + dnsName: + description: DNSName is the identifier that this challenge is + for, e.g. example.com. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type + Issuer which should be used to create this Challenge. If the + Issuer does not exist, processing will be retried. If the Issuer + is not an 'ACME' Issuer, an error will be returned and the Challenge + will be marked as failed. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + key: + description: Key is the ACME challenge key for this challenge + type: string + solver: + description: Solver contains the domain solving configuration + that should be used to solve this challenge resource. Only **one** + of 'config' or 'solver' may be specified, and if both are specified + then no action will be performed on the Challenge resource. + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + type: object + type: object + token: + description: Token is the ACME challenge token for this challenge. + type: string + type: + description: Type is the type of ACME challenge this resource + represents, e.g. "dns01" or "http01" + type: string + url: + description: URL is the URL of the ACME Challenge resource for + this challenge. This can be used to lookup details about the + status of this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a + wildcard identifier, for example '*.example.com' + type: boolean + required: + - authzURL + - type + - url + - dnsName + - token + - key + - wildcard + - issuerRef + type: object + type: array + failureTime: + description: FailureTime stores the time that this order failed. This + is used to influence garbage collection and back-off. + format: date-time + type: string + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates + for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why + the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. + States 'success' and 'expired' are 'final' + enum: + - "" + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL of the Order. This will initially be empty when the + resource is first created. The Order controller will populate this + field when the Order is first processed. This field will be immutable + after it is initially set. + type: string + type: object + required: + - metadata + - spec + - status + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- diff --git a/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager.yaml b/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager.yaml new file mode 100644 index 0000000000..01e3087c3d --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/cert-manager.yaml @@ -0,0 +1,2405 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: certificates.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.secretName + name: Secret + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: Certificate + plural: certificates + shortNames: + - cert + - certs + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + acme: + description: ACME contains configuration specific to ACME Certificates. + Notably, this contains details on how the domain names listed on this + Certificate resource should be 'solved', i.e. mapping HTTP01 and DNS01 + providers to DNS names. + properties: + config: + items: + properties: + domains: + description: Domains is the list of domains that this SolverConfig + applies to. + items: + type: string + type: array + required: + - domains + type: object + type: array + required: + - config + type: object + commonName: + description: CommonName is a common name to be used on the Certificate. + If no CommonName is given, then the first entry in DNSNames is used + as the CommonName. The CommonName should have a length of 64 characters + or fewer to avoid generating invalid CSRs; in order to have longer + domain names, set the CommonName (or first DNSNames entry) to have + 64 characters or fewer, and then add the longer domain name to DNSNames. + type: string + dnsNames: + description: DNSNames is a list of subject alt names to be used on the + Certificate. If no CommonName is given, then the first entry in DNSNames + is used as the CommonName and must have a length of 64 characters + or fewer. + items: + type: string + type: array + duration: + description: Certificate default Duration + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses to be used on the + Certificate + items: + type: string + type: array + isCA: + description: IsCA will mark this Certificate as valid for signing. This + implies that the 'signing' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. + If the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the Certificate will + be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + keyAlgorithm: + description: KeyAlgorithm is the private key algorithm of the corresponding + private key for this certificate. If provided, allowed values are + either "rsa" or "ecdsa" If KeyAlgorithm is specified and KeySize is + not provided, key size of 256 will be used for "ecdsa" key algorithm + and key size of 2048 will be used for "rsa" key algorithm. + enum: + - rsa + - ecdsa + type: string + keyEncoding: + description: KeyEncoding is the private key cryptography standards (PKCS) + for this certificate's private key to be encoded in. If provided, + allowed values are "pkcs1" and "pkcs8" standing for PKCS#1 and PKCS#8, + respectively. If KeyEncoding is not specified, then PKCS#1 will be + used by default. + type: string + keySize: + description: KeySize is the key bit size of the corresponding private + key for this certificate. If provided, value must be between 2048 + and 8192 inclusive when KeyAlgorithm is empty or is set to "rsa", + and value must be one of (256, 384, 521) when KeyAlgorithm is set + to "ecdsa". + format: int64 + type: integer + organization: + description: Organization is the organization to be used on the Certificate + items: + type: string + type: array + renewBefore: + description: Certificate renew before expiration duration + type: string + secretName: + description: SecretName is the name of the secret resource to store + this secret in + type: string + required: + - secretName + - issuerRef + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + lastFailureTime: + format: date-time + type: string + notAfter: + description: The expiration time of the certificate stored in the secret + named by this resource in spec.secretName. + format: date-time + type: string + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: certificaterequests.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: CertificateRequest + plural: certificaterequests + shortNames: + - cr + - crs + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + csr: + description: Byte slice containing the PEM encoded CertificateSigningRequest + format: byte + type: string + duration: + description: Requested certificate default Duration + type: string + isCA: + description: IsCA will mark the resulting certificate as valid for signing. + This implies that the 'signing' usage is set + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If + the 'kind' field is not set, or set to 'Issuer', an Issuer resource + with the given name in the same namespace as the CertificateRequest + will be used. If the 'kind' field is set to 'ClusterIssuer', a ClusterIssuer + with the provided name will be used. The 'name' field in this stanza + is required at all times. The group field refers to the API group + of the issuer which defaults to 'certmanager.k8s.io' if empty. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + required: + - issuerRef + type: object + status: + properties: + ca: + description: Byte slice containing the PEM encoded certificate authority + of the signed certificate. + format: byte + type: string + certificate: + description: Byte slice containing a PEM encoded signed certificate + resulting from the given certificate signing request. + format: byte + type: string + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: challenges.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.dnsName + name: Domain + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: Challenge + plural: challenges + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource + that this challenge is a part of. + type: string + config: + description: 'Config specifies the solver configuration for this challenge. + Only **one** of ''config'' or ''solver'' may be specified, and if + both are specified then no action will be performed on the Challenge + resource. DEPRECATED: the ''solver'' field should be specified instead' + type: object + dnsName: + description: DNSName is the identifier that this challenge is for, e.g. + example.com. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Challenge. If the Issuer does + not exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Challenge will be marked + as failed. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + key: + description: Key is the ACME challenge key for this challenge + type: string + solver: + description: Solver contains the domain solving configuration that should + be used to solve this challenge resource. Only **one** of 'config' + or 'solver' may be specified, and if both are specified then no action + will be performed on the Challenge resource. + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be used + to solve. If specified and a match is found, a dnsNames selector + will take precedence over a dnsZones selector. If multiple + solvers match with the same dnsNames value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be used + to solve. The most specific DNS zone match specified here + will take precedence over other DNS zone matches, so a solver + specifying sys.example.com will be selected over one specifying + example.com for the domain www.sys.example.com. If multiple + solvers match with the same dnsZones value, the solver with + the most matching labels in matchLabels will be selected. + If neither has more matches, the solver defined earlier in + the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the set + of certificate's that this challenge solver will apply to. + type: object + type: object + type: object + token: + description: Token is the ACME challenge token for this challenge. + type: string + type: + description: Type is the type of ACME challenge this resource represents, + e.g. "dns01" or "http01" + type: string + url: + description: URL is the URL of the ACME Challenge resource for this + challenge. This can be used to lookup details about the status of + this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a wildcard + identifier, for example '*.example.com' + type: boolean + required: + - authzURL + - type + - url + - dnsName + - token + - key + - wildcard + - issuerRef + type: object + status: + properties: + presented: + description: Presented will be set to true if the challenge values for + this challenge are currently 'presented'. This *does not* imply the + self check is passing. Only that the values have been 'submitted' + for the appropriate challenge mechanism (i.e. the DNS01 TXT record + has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Processing is used to denote whether this challenge should + be processed or not. This field will only be set to true by the 'scheduling' + component. It will only be set to false by the 'challenges' controller, + after the challenge has reached a final state or timed out. If this + field is set to false, the challenge controller will not take any + more action. + type: boolean + reason: + description: Reason contains human readable information on why the Challenge + is in the current state. + type: string + state: + description: State contains the current 'state' of the challenge. If + not set, the state of the challenge is unknown. + enum: + - "" + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + required: + - processing + - presented + - reason + type: object + required: + - metadata + - spec + - status + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: clusterissuers.certmanager.k8s.io +spec: + group: certmanager.k8s.io + names: + kind: ClusterIssuer + plural: clusterissuers + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + acme: + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + items: + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + type: object + type: object + type: array + required: + - server + - privateKeySecretRef + type: object + ca: + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + type: object + vault: + properties: + auth: + description: Vault authentication + properties: + appRole: + description: This Secret contains a AppRole and Secret + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + tokenSecretRef: + description: This Secret contains the Vault token key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + type: object + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + format: byte + type: string + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + required: + - auth + - server + - path + type: object + venafi: + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud + type: string + required: + - url + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for the Venafi TPP instance + type: string + required: + - url + - credentialsRef + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + properties: + acme: + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: issuers.certmanager.k8s.io +spec: + group: certmanager.k8s.io + names: + kind: Issuer + plural: issuers + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + acme: + properties: + email: + description: Email is the email for this account + type: string + privateKeySecretRef: + description: PrivateKey is the name of a secret containing the private + key for this user account. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + server: + description: Server is the ACME server URL + type: string + skipTLSVerify: + description: If true, skip verifying the ACME server TLS certificate + type: boolean + solvers: + description: Solvers is a list of challenge solvers that will be + used to solve ACME challenges for the matching domains. + items: + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + type: object + type: object + type: array + required: + - server + - privateKeySecretRef + type: object + ca: + properties: + secretName: + description: SecretName is the name of the secret used to sign Certificates + issued by this Issuer. + type: string + required: + - secretName + type: object + selfSigned: + type: object + vault: + properties: + auth: + description: Vault authentication + properties: + appRole: + description: This Secret contains a AppRole and Secret + properties: + path: + description: Where the authentication path is mounted in + Vault. + type: string + roleId: + type: string + secretRef: + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + required: + - path + - roleId + - secretRef + type: object + tokenSecretRef: + description: This Secret contains the Vault token key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + type: object + caBundle: + description: Base64 encoded CA bundle to validate Vault server certificate. + Only used if the Server URL is using HTTPS protocol. This parameter + is ignored for plain HTTP protocol connection. If not set the + system root certificates are used to validate the TLS connection. + format: byte + type: string + path: + description: Vault URL path to the certificate role + type: string + server: + description: Server is the vault connection address + type: string + required: + - auth + - server + - path + type: object + venafi: + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. + Only one of TPP or Cloud may be specified. + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for + the Venafi Cloud API token. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for Venafi Cloud + type: string + required: + - url + - apiTokenSecretRef + type: object + tpp: + description: TPP specifies Trust Protection Platform configuration + settings. Only one of TPP or Cloud may be specified. + properties: + caBundle: + description: CABundle is a PEM encoded TLS certifiate to use + to verify connections to the TPP instance. If specified, system + roots will not be used and the issuing CA for the TPP instance + must be verifiable using the provided root. If not specified, + the connection will be verified using the cert-manager system + root certificates. + format: byte + type: string + credentialsRef: + description: CredentialsRef is a reference to a Secret containing + the username and password for the TPP server. The secret must + contain two keys, 'username' and 'password'. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + required: + - name + type: object + url: + description: URL is the base URL for the Venafi TPP instance + type: string + required: + - url + - credentialsRef + type: object + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. + All requests made to the Venafi platform will be restricted by + the named zone policy. This field is required. + type: string + required: + - zone + type: object + type: object + status: + properties: + acme: + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the + latest registered ACME account, in order to track changes made + to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also + be used to retrieve account details from the CA + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the details + of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for + the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - type + - status + type: object + type: array + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: orders.certmanager.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.state + name: State + type: string + - JSONPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - JSONPath: .status.reason + name: Reason + priority: 1 + type: string + - JSONPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. + name: Age + type: date + group: certmanager.k8s.io + names: + kind: Order + plural: orders + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded + CSR. If CommonName is not specified, the first DNSName specified will + be used as the CommonName. At least one of CommonName or a DNSNames + must be set. This field must match the corresponding field on the + DER encoded CSR. + type: string + config: + description: 'Config specifies a mapping from DNS identifiers to how + those identifiers should be solved when performing ACME challenges. + A config entry must exist for each domain listed in DNSNames and CommonName. + Only **one** of ''config'' or ''solvers'' may be specified, and if + both are specified then no action will be performed on the Order resource. This + field will be removed when support for solver config specified on + the Certificate under certificate.spec.acme has been removed. DEPRECATED: + this field will be removed in future. Solver configuration must instead + be provided on ACME Issuer resources.' + items: + properties: + domains: + description: Domains is the list of domains that this SolverConfig + applies to. + items: + type: string + type: array + required: + - domains + type: object + type: array + csr: + description: Certificate signing request bytes in DER encoding. This + will be used when finalizing the order. This field must be set on + the order. + format: byte + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included + as part of the Order validation process. If CommonName is not specified, + the first DNSName specified will be used as the CommonName. At least + one of CommonName or a DNSNames must be set. This field must match + the corresponding field on the DER encoded CSR. + items: + type: string + type: array + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer + which should be used to create this Order. If the Issuer does not + exist, processing will be retried. If the Issuer is not an 'ACME' + Issuer, an error will be returned and the Order will be marked as + failed. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + required: + - csr + - issuerRef + type: object + status: + properties: + certificate: + description: Certificate is a copy of the PEM encoded certificate for + this Order. This field will be populated after the order has been + successfully finalized with the ACME server, and the order has transitioned + to the 'valid' state. + format: byte + type: string + challenges: + description: Challenges is a list of ChallengeSpecs for Challenges that + must be created in order to complete this Order. + items: + properties: + authzURL: + description: AuthzURL is the URL to the ACME Authorization resource + that this challenge is a part of. + type: string + config: + description: 'Config specifies the solver configuration for this + challenge. Only **one** of ''config'' or ''solver'' may be specified, + and if both are specified then no action will be performed on + the Challenge resource. DEPRECATED: the ''solver'' field should + be specified instead' + type: object + dnsName: + description: DNSName is the identifier that this challenge is + for, e.g. example.com. + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type + Issuer which should be used to create this Challenge. If the + Issuer does not exist, processing will be retried. If the Issuer + is not an 'ACME' Issuer, an error will be returned and the Challenge + will be marked as failed. + properties: + group: + type: string + kind: + type: string + name: + type: string + required: + - name + type: object + key: + description: Key is the ACME challenge key for this challenge + type: string + solver: + description: Solver contains the domain solving configuration + that should be used to solve this challenge resource. Only **one** + of 'config' or 'solver' may be specified, and if both are specified + then no action will be performed on the Challenge resource. + properties: + selector: + description: Selector selects a set of DNSNames on the Certificate + resource that should be solved using this challenge solver. + properties: + dnsNames: + description: List of DNSNames that this solver will be + used to solve. If specified and a match is found, a + dnsNames selector will take precedence over a dnsZones + selector. If multiple solvers match with the same dnsNames + value, the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + dnsZones: + description: List of DNSZones that this solver will be + used to solve. The most specific DNS zone match specified + here will take precedence over other DNS zone matches, + so a solver specifying sys.example.com will be selected + over one specifying example.com for the domain www.sys.example.com. + If multiple solvers match with the same dnsZones value, + the solver with the most matching labels in matchLabels + will be selected. If neither has more matches, the solver + defined earlier in the list will be selected. + items: + type: string + type: array + matchLabels: + description: A label selector that is used to refine the + set of certificate's that this challenge solver will + apply to. + type: object + type: object + type: object + token: + description: Token is the ACME challenge token for this challenge. + type: string + type: + description: Type is the type of ACME challenge this resource + represents, e.g. "dns01" or "http01" + type: string + url: + description: URL is the URL of the ACME Challenge resource for + this challenge. This can be used to lookup details about the + status of this challenge. + type: string + wildcard: + description: Wildcard will be true if this challenge is for a + wildcard identifier, for example '*.example.com' + type: boolean + required: + - authzURL + - type + - url + - dnsName + - token + - key + - wildcard + - issuerRef + type: object + type: array + failureTime: + description: FailureTime stores the time that this order failed. This + is used to influence garbage collection and back-off. + format: date-time + type: string + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates + for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why + the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. + States 'success' and 'expired' are 'final' + enum: + - "" + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + type: string + url: + description: URL of the Order. This will initially be empty when the + resource is first created. The Order controller will populate this + field when the Order is first processed. This field will be immutable + after it is initially set. + type: string + type: object + required: + - metadata + - spec + - status + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + certmanager.k8s.io/disable-validation: "true" + +--- +--- +# Source: cert-manager/charts/cainjector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager-cainjector + namespace: "cert-manager" + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cainjector-v0.9.1 + +--- +# Source: cert-manager/charts/webhook/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager-webhook + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 + +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager + namespace: "cert-manager" + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 + +--- +# Source: cert-manager/charts/cainjector/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cainjector-v0.9.1 +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps", "events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cainjector-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: "cert-manager" + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-leaderelection + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + # Used for leader election by the controller + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create", "update", "patch"] + +--- + +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers", "orders"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates/finalizers"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders"] + verbs: ["create", "delete"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders", "orders/status"] + verbs: ["update"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders", "clusterissuers", "issuers", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["certmanager.k8s.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + # Use to update challenge resource status + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["certmanager.k8s.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + +--- + +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["extensions"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-leaderelection + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-leaderelection +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: "cert-manager" + kind: ServiceAccount + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + +--- +# Source: cert-manager/charts/webhook/templates/rbac.yaml +### Webhook ### +--- +# apiserver gets the auth-delegator role to delegate auth decisions to +# the core apiserver +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:auth-delegator + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager + +--- + +# apiserver gets the ability to read authentication. This allows it to +# read the specific configmap that has the requestheader-* entries to +# api agg +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:webhook-authentication-reader + namespace: kube-system + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:webhook-requester + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +rules: +- apiGroups: + - admission.certmanager.k8s.io + resources: + - certificates + - certificaterequests + - issuers + - clusterissuers + verbs: + - create + +--- +# Source: cert-manager/charts/webhook/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +spec: + type: ClusterIP + ports: + - name: https + port: 443 + targetPort: 6443 + selector: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + +--- +# Source: cert-manager/charts/cainjector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: "cert-manager" + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cainjector-v0.9.1 +spec: + replicas: 1 + selector: + matchLabels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cainjector-v0.9.1 + annotations: + spec: + serviceAccountName: cert-manager-cainjector + containers: + - name: cainjector + image: "quay.io/jetstack/cert-manager-cainjector:v0.9.1" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=$(POD_NAMESPACE) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + {} + + +--- +# Source: cert-manager/charts/webhook/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +spec: + replicas: 1 + selector: + matchLabels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 + annotations: + spec: + serviceAccountName: cert-manager-webhook + containers: + - name: webhook + image: "quay.io/jetstack/cert-manager-webhook:v0.9.1" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=6443 + - --tls-cert-file=/certs/tls.crt + - --tls-private-key-file=/certs/tls.key + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + {} + + volumeMounts: + - name: certs + mountPath: /certs + volumes: + - name: certs + secret: + secretName: cert-manager-webhook-webhook-tls + +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: "cert-manager" + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 +spec: + replicas: 1 + selector: + matchLabels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: cert-manager-v0.9.1 + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + containers: + - name: cert-manager + image: "quay.io/jetstack/cert-manager-controller:v0.9.1" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=$(POD_NAMESPACE) + ports: + - containerPort: 9402 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: 10m + memory: 32Mi + + +--- +# Source: cert-manager/charts/webhook/templates/apiservice.yaml +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.admission.certmanager.k8s.io + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 + annotations: + certmanager.k8s.io/inject-ca-from: "cert-manager/cert-manager-webhook-webhook-tls" +spec: + group: admission.certmanager.k8s.io + groupPriorityMinimum: 1000 + versionPriority: 15 + service: + name: cert-manager-webhook + namespace: "cert-manager" + version: v1beta1 + +--- +# Source: cert-manager/charts/webhook/templates/pki.yaml +--- +# Create a selfsigned Issuer, in order to create a root CA certificate for +# signing webhook serving certificates +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + name: cert-manager-webhook-selfsign + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +spec: + selfSigned: {} + +--- + +# Generate a CA Certificate used to sign certificates for the webhook +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: cert-manager-webhook-ca + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +spec: + secretName: cert-manager-webhook-ca + duration: 43800h # 5y + issuerRef: + name: cert-manager-webhook-selfsign + commonName: "ca.webhook.cert-manager" + isCA: true + +--- + +# Create an Issuer that uses the above generated CA certificate to issue certs +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + name: cert-manager-webhook-ca + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +spec: + ca: + secretName: cert-manager-webhook-ca + +--- + +# Finally, generate a serving certificate for the webhook to use +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: cert-manager-webhook-webhook-tls + namespace: "cert-manager" + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 +spec: + secretName: cert-manager-webhook-webhook-tls + duration: 8760h # 1y + issuerRef: + name: cert-manager-webhook-ca + dnsNames: + - cert-manager-webhook + - cert-manager-webhook.cert-manager + - cert-manager-webhook.cert-manager.svc + +--- +# Source: cert-manager/templates/servicemonitor.yaml + + +--- +# Source: cert-manager/charts/webhook/templates/validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/managed-by: Tiller + helm.sh/chart: webhook-v0.9.1 + annotations: + certmanager.k8s.io/inject-apiserver-ca: "true" +webhooks: + - name: certificates.admission.certmanager.k8s.io + namespaceSelector: + matchExpressions: + - key: "certmanager.k8s.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "certmanager.k8s.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - certificates + failurePolicy: Fail + clientConfig: + service: + name: kubernetes + namespace: default + path: /apis/admission.certmanager.k8s.io/v1beta1/certificates + - name: issuers.admission.certmanager.k8s.io + namespaceSelector: + matchExpressions: + - key: "certmanager.k8s.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "certmanager.k8s.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - issuers + failurePolicy: Fail + clientConfig: + service: + name: kubernetes + namespace: default + path: /apis/admission.certmanager.k8s.io/v1beta1/issuers + - name: clusterissuers.admission.certmanager.k8s.io + namespaceSelector: + matchExpressions: + - key: "certmanager.k8s.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "certmanager.k8s.io" + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - clusterissuers + failurePolicy: Fail + clientConfig: + service: + name: kubernetes + namespace: default + path: /apis/admission.certmanager.k8s.io/v1beta1/clusterissuers + diff --git a/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/download-cert-manager.sh b/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/download-cert-manager.sh new file mode 100755 index 0000000000..148a1cdb55 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/cert-manager-0.9.1/download-cert-manager.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env bash + +# Download and unpack cert-manager +CERT_MANAGER_VERSION=0.9.1 +ARCHIVE_DOWNLOAD_URL=https://github.com/jetstack/cert-manager/archive/v${CERT_MANAGER_VERSION}.tar.gz +YAML_URL=https://github.com/jetstack/cert-manager/releases/download/v${CERT_MANAGER_VERSION}/cert-manager.yaml + +wget $ARCHIVE_DOWNLOAD_URL +tar xzf v${CERT_MANAGER_VERSION}.tar.gz + +( +# subshell in downloaded directory +cd cert-manager-${CERT_MANAGER_VERSION} || exit + +# Copy the CRD yaml file +cp deploy/manifests/00-crds.yaml ../cert-manager-crds.yaml +) + +# Download the cert-manager yaml file +wget $YAML_URL + +# Clean up. +rm -rf cert-manager-${CERT_MANAGER_VERSION} +rm v${CERT_MANAGER_VERSION}.tar.gz diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/OWNERS b/test/vendor/knative.dev/serving/third_party/config/monitoring/OWNERS new file mode 100644 index 0000000000..ab8e1f2983 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- monitoring-approvers + +reviewers: +- monitoring-reviewers + +labels: +- area/monitoring diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/LICENSE b/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/elasticsearch.yaml b/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/elasticsearch.yaml new file mode 100644 index 0000000000..70d82ba299 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/elasticsearch.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch-logging + namespace: knative-monitoring + labels: + app: elasticsearch-logging + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Elasticsearch" +spec: + ports: + - port: 9200 + protocol: TCP + targetPort: db + selector: + app: elasticsearch-logging +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: elasticsearch-logging + namespace: knative-monitoring + labels: + app: elasticsearch-logging + kubernetes.io/cluster-service: "true" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: elasticsearch-logging + labels: + app: elasticsearch-logging + kubernetes.io/cluster-service: "true" +rules: +- apiGroups: + - "" + resources: + - "services" + - "namespaces" + - "endpoints" + verbs: + - "get" +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: knative-monitoring + name: elasticsearch-logging + labels: + app: elasticsearch-logging + kubernetes.io/cluster-service: "true" +subjects: +- kind: ServiceAccount + name: elasticsearch-logging + namespace: knative-monitoring + apiGroup: "" +roleRef: + kind: ClusterRole + name: elasticsearch-logging + apiGroup: "" +--- +# Elasticsearch deployment itself +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: elasticsearch-logging + namespace: knative-monitoring + labels: + app: elasticsearch-logging + version: v5.6.4 + kubernetes.io/cluster-service: "true" +spec: + serviceName: elasticsearch-logging + replicas: 2 + selector: + matchLabels: + app: elasticsearch-logging + version: v5.6.4 + template: + metadata: + labels: + app: elasticsearch-logging + version: v5.6.4 + kubernetes.io/cluster-service: "true" + spec: + serviceAccountName: elasticsearch-logging + containers: + - image: k8s.gcr.io/elasticsearch:v5.6.4 + name: elasticsearch-logging + resources: + limits: + cpu: 1000m + requests: + cpu: 100m + ports: + - containerPort: 9200 + name: db + protocol: TCP + - containerPort: 9300 + name: transport + protocol: TCP + volumeMounts: + - name: elasticsearch-logging + mountPath: /data + env: + - name: "NAMESPACE" + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: elasticsearch-logging + emptyDir: {} + # Elasticsearch requires vm.max_map_count to be at least 262144. + # If your OS already sets up this number to a higher value, feel free + # to remove this init container. + initContainers: + - image: alpine:3.6 + command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] + name: elasticsearch-logging-init + securityContext: + privileged: true diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/kibana.yaml b/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/kibana.yaml new file mode 100644 index 0000000000..cf13fc1cd1 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/logging/elasticsearch/kibana.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: Service +metadata: + name: kibana-logging + namespace: knative-monitoring + labels: + app: kibana-logging + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Kibana" +spec: + type: NodePort + selector: + app: kibana-logging + ports: + - port: 5601 + protocol: TCP + targetPort: ui +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana-logging + namespace: knative-monitoring + labels: + app: kibana-logging + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + matchLabels: + app: kibana-logging + template: + metadata: + labels: + app: kibana-logging + spec: + containers: + - name: kibana-logging + image: docker.elastic.co/kibana/kibana:5.6.4 + resources: + limits: + cpu: 1000m + requests: + cpu: 100m + env: + - name: ELASTICSEARCH_URL + value: http://elasticsearch-logging:9200 + - name: SERVER_BASEPATH + value: /api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy + - name: XPACK_MONITORING_ENABLED + value: "false" + - name: XPACK_SECURITY_ENABLED + value: "false" + ports: + - containerPort: 5601 + name: ui + protocol: TCP diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/LICENSE b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/kube-state-metrics.yaml b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/kube-state-metrics.yaml new file mode 100644 index 0000000000..238b342f0c --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/kubernetes/kube-state-metrics.yaml @@ -0,0 +1,172 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-state-metrics + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: kube-state-metrics-resizer + namespace: knative-monitoring +rules: +- apiGroups: [""] + resources: + - pods + verbs: ["get"] +- apiGroups: ["extensions"] + resources: + - deployments + resourceNames: ["kube-state-metrics"] + verbs: ["get", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: kube-state-metrics + namespace: knative-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kube-state-metrics-resizer +subjects: +- kind: ServiceAccount + name: kube-state-metrics + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +# kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: kube-state-metrics + namespace: knative-monitoring +rules: +- apiGroups: [""] + resources: + - configmaps + - secrets + - nodes + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - namespaces + - endpoints + verbs: ["list", "watch"] +- apiGroups: ["extensions"] + resources: + - daemonsets + - deployments + - replicasets + - ingresses + verbs: ["list", "watch"] +- apiGroups: ["apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: ["list", "watch"] +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: ["list", "watch"] +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kube-state-metrics + namespace: knative-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-state-metrics +subjects: +- kind: ServiceAccount + name: kube-state-metrics + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kube-state-metrics + name: kube-state-metrics + namespace: knative-monitoring +spec: + selector: + matchLabels: + app: kube-state-metrics + replicas: 1 + template: + metadata: + labels: + app: kube-state-metrics + spec: + serviceAccountName: kube-state-metrics + containers: + - name: kube-state-metrics + image: quay.io/coreos/kube-state-metrics:v1.7.2 + ports: + - name: http-metrics + containerPort: 8080 + - name: telemetry + containerPort: 8081 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-state-metrics + namespace: knative-monitoring + labels: + app: kube-state-metrics + annotations: + prometheus.io/scrape: 'true' +spec: + ports: + - name: http-metrics + port: 8080 + targetPort: http-metrics + protocol: TCP + - name: telemetry + port: 8081 + targetPort: telemetry + protocol: TCP + selector: + app: kube-state-metrics diff --git a/test/vendor/github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/LICENSE b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/LICENSE similarity index 100% rename from test/vendor/github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/LICENSE rename to test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/LICENSE diff --git a/test/vendor/github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/NOTICE b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/NOTICE similarity index 100% rename from test/vendor/github.com/knative/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/NOTICE rename to test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/NOTICE diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/deployment-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/deployment-dashboard.json new file mode 100644 index 0000000000..cfde91a1c9 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/deployment-dashboard.json @@ -0,0 +1,738 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-deployment + namespace: knative-monitoring +data: + kubernetes-deployment-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "rows": [ + { + "collapse": false, + "editable": false, + "height": "200px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 8, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[3m]))", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "CPU", + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 9, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "GB", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "80%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}) / 1024^3", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Memory", + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "Bps", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "id": 7, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "sum(rate(container_network_transmit_bytes_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{namespace=\"$deployment_namespace\",pod_name=~\"$deployment_name.*\"}[3m]))", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Network", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "100px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "id": 5, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(kube_deployment_spec_replicas{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "metric": "kube_deployment_spec_replicas", + "refId": "A", + "step": 600 + } + ], + "title": "Desired Replicas", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 6, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "min(kube_deployment_status_replicas_available{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Available Replicas", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 3, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(kube_deployment_status_observed_generation{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Observed Generation", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 2, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(kube_deployment_metadata_generation{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Metadata Generation", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "350px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 1, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(kube_deployment_status_replicas{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "current replicas", + "refId": "A", + "step": 30 + }, + { + "expr": "min(kube_deployment_status_replicas_available{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "available", + "refId": "B", + "step": 30 + }, + { + "expr": "max(kube_deployment_status_replicas_unavailable{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "unavailable", + "refId": "C", + "step": 30 + }, + { + "expr": "min(kube_deployment_status_replicas_updated{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "updated", + "refId": "D", + "step": 30 + }, + { + "expr": "max(kube_deployment_spec_replicas{deployment=\"$deployment_name\",namespace=\"$deployment_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "desired", + "refId": "E", + "step": 30 + } + ], + "title": "Replicas", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "", + "logBase": 1, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "show": false + } + ] + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "deployment_namespace", + "options": [], + "query": "label_values(kube_deployment_metadata_generation, namespace)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": null, + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Deployment", + "multi": false, + "name": "deployment_name", + "options": [], + "query": "label_values(kube_deployment_metadata_generation{namespace=\"$deployment_namespace\"}, deployment)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "deployment", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Deployment", + "version": 1 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-capacity-planning-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-capacity-planning-dashboard.json new file mode 100644 index 0000000000..940153a23a --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-capacity-planning-dashboard.json @@ -0,0 +1,981 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-capacity-planning + namespace: knative-monitoring +data: + kubernetes-capacity-planning-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 3, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_cpu{mode=\"idle\"}[2m])) * 100", + "hide": false, + "intervalFactor": 10, + "legendFormat": "", + "refId": "A", + "step": 50 + } + ], + "title": "Idle CPU", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": "cpu usage", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 9, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_load1)", + "intervalFactor": 4, + "legendFormat": "load 1m", + "refId": "A", + "step": 20, + "target": "" + }, + { + "expr": "sum(node_load5)", + "intervalFactor": 4, + "legendFormat": "load 5m", + "refId": "B", + "step": 20, + "target": "" + }, + { + "expr": "sum(node_load15)", + "intervalFactor": 4, + "legendFormat": "load 15m", + "refId": "C", + "step": 20, + "target": "" + } + ], + "title": "System Load", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 4, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "node_memory_SwapFree{instance=\"172.17.0.1:9100\",job=\"prometheus\"}", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_memory_MemTotal) - sum(node_memory_MemFree) - sum(node_memory_Buffers) - sum(node_memory_Cached)", + "intervalFactor": 2, + "legendFormat": "memory usage", + "metric": "memo", + "refId": "A", + "step": 10, + "target": "" + }, + { + "expr": "sum(node_memory_Buffers)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "memory buffers", + "metric": "memo", + "refId": "B", + "step": 10, + "target": "" + }, + { + "expr": "sum(node_memory_Cached)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "memory cached", + "metric": "memo", + "refId": "C", + "step": 10, + "target": "" + }, + { + "expr": "sum(node_memory_MemFree)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "memory free", + "metric": "memo", + "refId": "D", + "step": 10, + "target": "" + } + ], + "title": "Memory Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 5, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "((sum(node_memory_MemTotal) - sum(node_memory_MemFree) - sum(node_memory_Buffers) - sum(node_memory_Cached)) / sum(node_memory_MemTotal)) * 100", + "intervalFactor": 2, + "metric": "", + "refId": "A", + "step": 60, + "target": "" + } + ], + "thresholds": "80, 90", + "title": "Memory Usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "246px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 6, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "yaxis": 1 + }, + { + "alias": "{instance=\"172.17.0.1:9100\"}", + "yaxis": 2 + }, + { + "alias": "io time", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_disk_bytes_read[5m]))", + "hide": false, + "intervalFactor": 4, + "legendFormat": "read", + "refId": "A", + "step": 20, + "target": "" + }, + { + "expr": "sum(rate(node_disk_bytes_written[5m]))", + "intervalFactor": 4, + "legendFormat": "written", + "refId": "B", + "step": 20 + }, + { + "expr": "sum(rate(node_disk_io_time_ms[5m]))", + "intervalFactor": 4, + "legendFormat": "io time", + "refId": "C", + "step": 20 + } + ], + "title": "Disk I/O", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "ms", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 12, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(node_filesystem_size{device!=\"rootfs\"}) - sum(node_filesystem_free{device!=\"rootfs\"})) / sum(node_filesystem_size{device!=\"rootfs\"})", + "intervalFactor": 2, + "refId": "A", + "step": 60, + "target": "" + } + ], + "thresholds": "0.75, 0.9", + "title": "Disk Space Usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 8, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "transmitted", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_receive_bytes{device!~\"lo\"}[5m]))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10, + "target": "" + } + ], + "title": "Network Received", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "bytes", + "logBase": 1, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 10, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "transmitted", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_transmit_bytes{device!~\"lo\"}[5m]))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10, + "target": "" + } + ], + "title": "Network Transmitted", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "bytes", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "276px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 11, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 11, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_info)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Current number of Pods", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_node_status_capacity_pods)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Maximum capacity of pods", + "refId": "B", + "step": 10 + } + ], + "title": "Cluster Pod Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 7, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "100 - (sum(kube_node_status_capacity_pods) - sum(kube_pod_info)) / sum(kube_node_status_capacity_pods) * 100", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 60, + "target": "" + } + ], + "thresholds": "80, 90", + "title": "Pod Utilization", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Capacity Planning", + "version": 4 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-health-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-health-dashboard.json new file mode 100644 index 0000000000..24bec2f9ec --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-health-dashboard.json @@ -0,0 +1,702 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-cluster-health + namespace: knative-monitoring +data: + kubernetes-cluster-health-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "editable": false, + "height": "254px", + "panels": [ + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 1, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(up{job=~\"apiserver|kube-scheduler|kube-controller-manager\"} == 0)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Control Plane Components Down", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "Everything UP and healthy", + "value": "null" + }, + { + "op": "=", + "text": "", + "value": "" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 2, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(ALERTS{alertstate=\"firing\",alertname!=\"DeadMansSwitch\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Alerts Firing", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 3, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(ALERTS{alertstate=\"pending\",alertname!=\"DeadMansSwitch\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "3, 5", + "title": "Alerts Pending", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 4, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "count(increase(kube_pod_container_status_restarts[1h]) > 5)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Crashlooping Pods", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": false, + "title": "Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 5, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(kube_node_status_condition{condition=\"Ready\",status!=\"true\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Node Not Ready", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 6, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(kube_node_status_condition{condition=\"DiskPressure\",status=\"true\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Node Disk Pressure", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 7, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(kube_node_status_condition{condition=\"MemoryPressure\",status=\"true\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Node Memory Pressure", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 8, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(kube_node_spec_unschedulable)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Nodes Unschedulable", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": false, + "title": "Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Cluster Health", + "version": 9 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-status-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-status-dashboard.json new file mode 100644 index 0000000000..8b0eb17d10 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-cluster-status-dashboard.json @@ -0,0 +1,817 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-cluster-status + namespace: knative-monitoring +data: + kubernetes-cluster-status-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "graphTooltip": 0, + "hideControls": false, + "links": [], + "rows": [ + { + "collapse": false, + "editable": false, + "height": "129px", + "panels": [ + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 5, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 6, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(up{job=~\"apiserver|kube-scheduler|kube-controller-manager\"} == 0)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Control Plane UP", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "UP", + "value": "null" + } + ], + "valueName": "total" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 6, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 6, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(ALERTS{alertstate=\"firing\",alertname!=\"DeadMansSwitch\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "3, 5", + "title": "Alerts Firing", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": true, + "title": "Cluster Health", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "168px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 1, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(up{job=\"apiserver\"} == 1) / count(up{job=\"apiserver\"})) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "50, 80", + "title": "API Servers UP", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 2, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(up{job=\"kube-controller-manager\"} == 1) / count(up{job=\"kube-controller-manager\"})) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "50, 80", + "title": "Controller Managers UP", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 3, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(up{job=\"kube-scheduler\"} == 1) / count(up{job=\"kube-scheduler\"})) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "50, 80", + "title": "Schedulers UP", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 4, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "count(increase(kube_pod_container_status_restarts{namespace=~\"kube-system|tectonic-system\"}[1h]) > 5)", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "1, 3", + "title": "Crashlooping Control Plane Pods", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": true, + "title": "Control Plane Status", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "158px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 8, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(100 - (avg by (instance) (rate(node_cpu{job=\"node-exporter\",mode=\"idle\"}[5m])) * 100)) / count(node_cpu{job=\"node-exporter\",mode=\"idle\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "80, 90", + "title": "CPU Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 7, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "((sum(node_memory_MemTotal) - sum(node_memory_MemFree) - sum(node_memory_Buffers) - sum(node_memory_Cached)) / sum(node_memory_MemTotal)) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "80, 90", + "title": "Memory Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 9, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(node_filesystem_size{device!=\"rootfs\"}) - sum(node_filesystem_free{device!=\"rootfs\"})) / sum(node_filesystem_size{device!=\"rootfs\"})", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "80, 90", + "title": "Filesystem Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 10, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "100 - (sum(kube_node_status_capacity_pods) - sum(kube_pod_info)) / sum(kube_node_status_capacity_pods) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "80, 90", + "title": "Pod Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": true, + "title": "Capacity Planning", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Cluster Status", + "version": 3 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-control-plane-status-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-control-plane-status-dashboard.json new file mode 100644 index 0000000000..09bf5d5f51 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-control-plane-status-dashboard.json @@ -0,0 +1,633 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-control-plane-status + namespace: knative-monitoring +data: + kubernetes-control-plane-status-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "graphTooltip": 0, + "hideControls": false, + "links": [], + "rows": [ + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 1, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(up{job=\"apiserver\"} == 1) / sum(up{job=\"apiserver\"})) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "50, 80", + "title": "API Servers UP", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 2, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(up{job=\"kube-controller-manager\"} == 1) / sum(up{job=\"kube-controller-manager\"})) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "50, 80", + "title": "Controller Managers UP", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 3, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(up{job=\"kube-scheduler\"} == 1) / sum(up{job=\"kube-scheduler\"})) * 100", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "thresholds": "50, 80", + "title": "Schedulers UP", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 4, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(sum by(instance) (rate(apiserver_request_count{code=~\"5..\"}[5m])) / sum by(instance) (rate(apiserver_request_count[5m]))) * 100", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 600 + } + ], + "thresholds": "5, 10", + "title": "API Server Request Error Rate", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 7, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(verb) (rate(apiserver_latency_seconds:quantile[5m]) >= 0)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 30 + } + ], + "title": "API Server Request Latency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 5, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cluster:scheduler_e2e_scheduling_latency_seconds:quantile", + "format": "time_series", + "intervalFactor": 2, + "refId": "A", + "step": 60 + } + ], + "title": "End to End Scheduling Latency", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "dtdurations", + "logBase": 1, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 6, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(instance) (rate(apiserver_request_count{code!~\"2..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Error Rate", + "refId": "A", + "step": 60 + }, + { + "expr": "sum by(instance) (rate(apiserver_request_count[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Request Rate", + "refId": "B", + "step": 60 + } + ], + "title": "API Server Request Rates", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Control Plane Status", + "version": 3 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-resource-requests-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-resource-requests-dashboard.json new file mode 100644 index 0000000000..f9f5cd9e1c --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/kubernetes-resource-requests-dashboard.json @@ -0,0 +1,410 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-resource-requests + namespace: knative-monitoring +data: + kubernetes-resource-requests-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "graphTooltip": 0, + "hideControls": false, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "editable": false, + "height": "300px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "description": "This represents the total [CPU resource requests](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) in the cluster.\nFor comparison the total [allocatable CPU cores](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node-allocatable.md) is also shown.", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 1, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "min(sum(kube_node_status_allocatable_cpu_cores) by (instance))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "Allocatable CPU Cores", + "refId": "A", + "step": 20 + }, + { + "expr": "max(sum(kube_pod_container_resource_requests_cpu_cores) by (instance))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "Requested CPU Cores", + "refId": "B", + "step": 20 + } + ], + "title": "CPU Cores", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "CPU Cores", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 2, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "max(sum(kube_pod_container_resource_requests_cpu_cores) by (instance)) / min(sum(kube_node_status_allocatable_cpu_cores) by (instance)) * 100", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 240 + } + ], + "thresholds": "80, 90", + "title": "CPU Cores", + "transparent": false, + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "CPU Cores", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "300px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "description": "This represents the total [memory resource requests](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) in the cluster.\nFor comparison the total [allocatable memory](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node-allocatable.md) is also shown.", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 3, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "min(sum(kube_node_status_allocatable_memory_bytes) by (instance))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "Allocatable Memory", + "refId": "A", + "step": 20 + }, + { + "expr": "max(sum(kube_pod_container_resource_requests_memory_bytes) by (instance))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "Requested Memory", + "refId": "B", + "step": 20 + } + ], + "title": "Memory", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": "Memory", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 4, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "max(sum(kube_pod_container_resource_requests_memory_bytes) by (instance)) / min(sum(kube_node_status_allocatable_memory_bytes) by (instance)) * 100", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 240 + } + ], + "thresholds": "80, 90", + "title": "Memory", + "transparent": false, + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "Memory", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes Resource Requests", + "version": 2 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/nodes-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/nodes-dashboard.json new file mode 100644 index 0000000000..ac19644c69 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/nodes-dashboard.json @@ -0,0 +1,829 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-nodes + namespace: knative-monitoring +data: + kubernetes-nodes-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "description": "Dashboard to get an overview of one server", + "editable": false, + "gnetId": 22, + "graphTooltip": 0, + "hideControls": false, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 3, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "100 - (avg by (cpu) (irate(node_cpu{mode=\"idle\", instance=\"$server\"}[5m])) * 100)", + "hide": false, + "intervalFactor": 10, + "legendFormat": "{{cpu}}", + "refId": "A", + "step": 50 + } + ], + "title": "Idle CPU", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": "cpu usage", + "logBase": 1, + "max": 100, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 9, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node_load1{instance=\"$server\"}", + "intervalFactor": 4, + "legendFormat": "load 1m", + "refId": "A", + "step": 20, + "target": "" + }, + { + "expr": "node_load5{instance=\"$server\"}", + "intervalFactor": 4, + "legendFormat": "load 5m", + "refId": "B", + "step": 20, + "target": "" + }, + { + "expr": "node_load15{instance=\"$server\"}", + "intervalFactor": 4, + "legendFormat": "load 15m", + "refId": "C", + "step": 20, + "target": "" + } + ], + "title": "System Load", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 4, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "node_memory_SwapFree{instance=\"172.17.0.1:9100\",job=\"prometheus\"}", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node_memory_MemTotal{instance=\"$server\"} - node_memory_MemFree{instance=\"$server\"} - node_memory_Buffers{instance=\"$server\"} - node_memory_Cached{instance=\"$server\"}", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "memory used", + "metric": "", + "refId": "C", + "step": 10 + }, + { + "expr": "node_memory_Buffers{instance=\"$server\"}", + "interval": "", + "intervalFactor": 2, + "legendFormat": "memory buffers", + "metric": "", + "refId": "E", + "step": 10 + }, + { + "expr": "node_memory_Cached{instance=\"$server\"}", + "intervalFactor": 2, + "legendFormat": "memory cached", + "metric": "", + "refId": "F", + "step": 10 + }, + { + "expr": "node_memory_MemFree{instance=\"$server\"}", + "intervalFactor": 2, + "legendFormat": "memory free", + "metric": "", + "refId": "D", + "step": 10 + } + ], + "title": "Memory Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 5, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "((node_memory_MemTotal{instance=\"$server\"} - node_memory_MemFree{instance=\"$server\"} - node_memory_Buffers{instance=\"$server\"} - node_memory_Cached{instance=\"$server\"}) / node_memory_MemTotal{instance=\"$server\"}) * 100", + "intervalFactor": 2, + "refId": "A", + "step": 60, + "target": "" + } + ], + "thresholds": "80, 90", + "title": "Memory Usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 6, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "yaxis": 1 + }, + { + "alias": "{instance=\"172.17.0.1:9100\"}", + "yaxis": 2 + }, + { + "alias": "io time", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (instance) (rate(node_disk_bytes_read{instance=\"$server\"}[2m]))", + "hide": false, + "intervalFactor": 4, + "legendFormat": "read", + "refId": "A", + "step": 20, + "target": "" + }, + { + "expr": "sum by (instance) (rate(node_disk_bytes_written{instance=\"$server\"}[2m]))", + "intervalFactor": 4, + "legendFormat": "written", + "refId": "B", + "step": 20 + }, + { + "expr": "sum by (instance) (rate(node_disk_io_time_ms{instance=\"$server\"}[2m]))", + "intervalFactor": 4, + "legendFormat": "io time", + "refId": "C", + "step": 20 + } + ], + "title": "Disk I/O", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "ms", + "logBase": 1, + "show": true + } + ] + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "prometheus", + "editable": false, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "hideTimeOverride": false, + "id": 7, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "(sum(node_filesystem_size{device!=\"rootfs\",instance=\"$server\"}) - sum(node_filesystem_free{device!=\"rootfs\",instance=\"$server\"})) / sum(node_filesystem_size{device!=\"rootfs\",instance=\"$server\"})", + "intervalFactor": 2, + "refId": "A", + "step": 60, + "target": "" + } + ], + "thresholds": "0.75, 0.9", + "title": "Disk Space Usage", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 8, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "transmitted", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_network_receive_bytes{instance=\"$server\",device!~\"lo\"}[5m])", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{device}}", + "refId": "A", + "step": 10, + "target": "" + } + ], + "title": "Network Received", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "bytes", + "logBase": 1, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 10, + "isNew": false, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "transmitted", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_network_transmit_bytes{instance=\"$server\",device!~\"lo\"}[5m])", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{device}}", + "refId": "B", + "step": 10, + "target": "" + } + ], + "title": "Network Transmitted", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "bytes", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "server", + "options": [], + "query": "label_values(node_boot_time, instance)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Nodes", + "version": 2 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/pods-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/pods-dashboard.json new file mode 100644 index 0000000000..6e76e03508 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/pods-dashboard.json @@ -0,0 +1,425 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-pods + namespace: knative-monitoring +data: + kubernetes-pods-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "graphTooltip": 1, + "hideControls": false, + "links": [], + "refresh": false, + "rows": [ + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 1, + "isNew": false, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(container_name) (container_memory_usage_bytes{pod_name=\"$pod\", container_name=~\"$container\", container_name!=\"POD\"})", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Current: {{ container_name }}", + "metric": "container_memory_usage_bytes", + "refId": "A", + "step": 15 + }, + { + "expr": "kube_pod_container_resource_requests_memory_bytes{pod=\"$pod\", container=~\"$container\"}", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "Requested: {{ container }}", + "metric": "kube_pod_container_resource_requests_memory_bytes", + "refId": "B", + "step": 20 + }, + { + "expr": "kube_pod_container_resource_limits_memory_bytes{pod=\"$pod\", container=~\"$container\"}", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "Limit: {{ container }}", + "metric": "kube_pod_container_resource_limits_memory_bytes", + "refId": "C", + "step": 20 + } + ], + "title": "Memory Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 2, + "isNew": false, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (container_name)(rate(container_cpu_usage_seconds_total{image!=\"\",container_name!=\"POD\",pod_name=\"$pod\"}[1m]))", + "intervalFactor": 2, + "legendFormat": "{{ container_name }}", + "refId": "A", + "step": 30 + }, + { + "expr": "kube_pod_container_resource_requests_cpu_cores{pod=\"$pod\", container=~\"$container\"}", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "Requested: {{ container }}", + "metric": "kube_pod_container_resource_requests_cpu_cores", + "refId": "B", + "step": 20 + }, + { + "expr": "kube_pod_container_resource_limits_cpu_cores{pod=\"$pod\", container=~\"$container\"}", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "Limit: {{ container }}", + "metric": "kube_pod_container_resource_limits_memory_bytes", + "refId": "C", + "step": 20 + } + ], + "title": "CPU Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 3, + "isNew": false, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (pod_name) (rate(container_network_receive_bytes_total{pod_name=\"$pod\"}[1m])))", + "intervalFactor": 2, + "legendFormat": "{{ pod_name }}", + "refId": "A", + "step": 30 + } + ], + "title": "Network I/O", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "New Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": true, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(kube_pod_info, namespace)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Pod", + "multi": false, + "name": "pod", + "options": [], + "query": "label_values(kube_pod_info{namespace=~\"$namespace\"}, pod)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": true, + "label": "Container", + "multi": false, + "name": "container", + "options": [], + "query": "label_values(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\"}, container)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Pods", + "version": 1 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/statefulset-dashboard.json b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/statefulset-dashboard.json new file mode 100644 index 0000000000..8d3d48c34e --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/dashboards/statefulset-dashboard.json @@ -0,0 +1,716 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-definition-kubernetes-statefulset + namespace: knative-monitoring +data: + kubernetes-statefulset-dashboard.json: |+ + { + "__inputs": [ + { + "description": "", + "label": "prometheus", + "name": "prometheus", + "pluginId": "prometheus", + "pluginName": "Prometheus", + "type": "datasource" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "graphTooltip": 1, + "hideControls": false, + "links": [], + "rows": [ + { + "collapse": false, + "editable": false, + "height": "200px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 8, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$statefulset_namespace\",pod_name=~\"$statefulset_name.*\"}[3m]))", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "CPU", + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 9, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "GB", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "80%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{namespace=\"$statefulset_namespace\",pod_name=~\"$statefulset_name.*\"}) / 1024^3", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Memory", + "type": "singlestat", + "valueFontSize": "110%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "Bps", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "id": 7, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "targets": [ + { + "expr": "sum(rate(container_network_transmit_bytes_total{namespace=\"$statefulset_namespace\",pod_name=~\"$statefulset_name.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{namespace=\"$statefulset_namespace\",pod_name=~\"$statefulset_name.*\"}[3m]))", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Network", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "100px", + "panels": [ + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "id": 5, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(kube_statefulset_replicas{statefulset=\"$statefulset_name\",namespace=\"$statefulset_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "metric": "kube_statefulset_replicas", + "refId": "A", + "step": 600 + } + ], + "title": "Desired Replicas", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 6, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "min(kube_statefulset_status_replicas{statefulset=\"$statefulset_name\",namespace=\"$statefulset_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Available Replicas", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 3, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(kube_statefulset_status_observed_generation{statefulset=\"$statefulset_name\",namespace=\"$statefulset_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Observed Generation", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "prometheus", + "editable": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 2, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "max(kube_statefulset_metadata_generation{statefulset=\"$statefulset_name\",namespace=\"$statefulset_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "refId": "A", + "step": 600 + } + ], + "title": "Metadata Generation", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + }, + { + "collapse": false, + "editable": false, + "height": "350px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "prometheus", + "editable": false, + "error": false, + "fill": 1, + "grid": { + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 1, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "min(kube_statefulset_status_replicas{statefulset=\"$statefulset_name\",namespace=\"$statefulset_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "available", + "refId": "B", + "step": 30 + }, + { + "expr": "max(kube_statefulset_replicas{statefulset=\"$statefulset_name\",namespace=\"$statefulset_namespace\"}) without (instance, pod)", + "intervalFactor": 2, + "legendFormat": "desired", + "refId": "E", + "step": 30 + } + ], + "title": "Replicas", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "", + "logBase": 1, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "show": false + } + ] + } + ], + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "statefulset_namespace", + "options": [], + "query": "label_values(kube_statefulset_metadata_generation, namespace)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": null, + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "prometheus", + "hide": 0, + "includeAll": false, + "label": "StatefulSet", + "multi": false, + "name": "statefulset_name", + "options": [], + "query": "label_values(kube_statefulset_metadata_generation{namespace=\"$statefulset_namespace\"}, statefulset)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "statefulset", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "StatefulSet", + "version": 1 + } diff --git a/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/node-exporter.yaml b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/node-exporter.yaml new file mode 100644 index 0000000000..1e119e7f33 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/config/monitoring/metrics/prometheus/prometheus-operator/node-exporter.yaml @@ -0,0 +1,125 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-exporter + namespace: knative-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-exporter + namespace: knative-monitoring +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-exporter + namespace: knative-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-exporter +subjects: +- kind: ServiceAccount + name: node-exporter + namespace: knative-monitoring +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-exporter + namespace: knative-monitoring +spec: + selector: + matchLabels: + app: node-exporter + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: node-exporter + name: node-exporter + namespace: knative-monitoring + spec: + serviceAccountName: node-exporter + securityContext: + runAsNonRoot: true + runAsUser: 65534 + hostNetwork: true + hostPID: true + containers: + - image: quay.io/prometheus/node-exporter:v0.15.2 + args: + - "--web.listen-address=127.0.0.1:9101" + - "--path.procfs=/host/proc" + - "--path.sysfs=/host/sys" + name: node-exporter + resources: + requests: + memory: 30Mi + cpu: 100m + limits: + memory: 50Mi + cpu: 200m + volumeMounts: + - name: proc + readOnly: true + mountPath: /host/proc + - name: sys + readOnly: true + mountPath: /host/sys + - name: kube-rbac-proxy + image: quay.io/coreos/kube-rbac-proxy:v0.3.0 + args: + - "--secure-listen-address=:9100" + - "--upstream=http://127.0.0.1:9101/" + ports: + - containerPort: 9100 + hostPort: 9100 + name: https + resources: + requests: + memory: 20Mi + cpu: 10m + limits: + memory: 40Mi + cpu: 20m + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: node-exporter + name: node-exporter + namespace: knative-monitoring +spec: + type: ClusterIP + clusterIP: None + ports: + - name: https + port: 9100 + protocol: TCP + selector: + app: node-exporter diff --git a/test/vendor/knative.dev/serving/third_party/contour-latest/README.md b/test/vendor/knative.dev/serving/third_party/contour-latest/README.md new file mode 100644 index 0000000000..8ad9245d9c --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/contour-latest/README.md @@ -0,0 +1,5 @@ +### Contour 1.1 + +`contour.yaml` is required to install Contour and the Contour KIngress +controller. At the moment the Contour integration is pre-release and based on +`github.com/mattmoor/net-contour` until we find the final place to host it. diff --git a/test/vendor/knative.dev/serving/third_party/contour-latest/contour.yaml b/test/vendor/knative.dev/serving/third_party/contour-latest/contour.yaml new file mode 100644 index 0000000000..b80e22a923 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/contour-latest/contour.yaml @@ -0,0 +1,1293 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + serving.knative.dev/controller: "true" + name: knative-contour-core +rules: +- apiGroups: + - projectcontour.io + resources: + - httpproxies + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - patch + - watch + +--- +apiVersion: v1 +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + + # visibility contains the configuration for how to expose services + # of assorted visibilities. Each entry is keyed by the visibility + # and contains two keys: + # 1. the "class" value to pass to the Contour class annotations, + # 2. the namespace/name of the Contour Envoy service. + visibility: | + ExternalIP: + class: contour + service: projectcontour/envoy-external + ClusterLocal: + class: contour-internal + service: projectcontour/envoy-internal +kind: ConfigMap +metadata: + labels: + serving.knative.dev/release: devel + name: config-contour + namespace: knative-serving + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: projectcontour + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: contour + namespace: projectcontour +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: ingressroutes.contour.heptio.com +spec: + additionalPrinterColumns: + - JSONPath: .spec.virtualhost.fqdn + description: Fully qualified domain name + name: FQDN + type: string + - JSONPath: .spec.virtualhost.tls.secretName + description: Secret with TLS credentials + name: TLS Secret + type: string + - JSONPath: .spec.routes[0].match + description: First routes defined + name: First route + type: string + - JSONPath: .status.currentStatus + description: The current status of the HTTPProxy + name: Status + type: string + - JSONPath: .status.description + description: Description of the current status + name: Status Description + type: string + group: contour.heptio.com + names: + kind: IngressRoute + listKind: IngressRouteList + plural: ingressroutes + singular: ingressroute + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: IngressRoute is an Ingress CRD specificiation + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IngressRouteSpec defines the spec of the CRD + properties: + routes: + description: Routes are the ingress routes. If TCPProxy is present, + Routes is ignored. + items: + description: Route contains the set of routes for a virtual host + properties: + delegate: + description: Delegate specifies that this route should be delegated + to another IngressRoute + properties: + name: + description: Name of the IngressRoute + type: string + namespace: + description: Namespace of the IngressRoute. Defaults to the + current namespace if not supplied. + type: string + required: + - name + type: object + enableWebsockets: + description: Enables websocket support for the route + type: boolean + match: + description: Match defines the prefix match + type: string + permitInsecure: + description: Allow this path to respond to insecure requests over + HTTP which are normally not permitted when a `virtualhost.tls` + block is present. + type: boolean + prefixRewrite: + description: Indicates that during forwarding, the matched prefix + (or path) should be swapped with this value + type: string + retryPolicy: + description: The retry policy for this route + properties: + count: + description: NumRetries is maximum allowed number of retries. + If not supplied, the number of retries is one. + format: int32 + type: integer + perTryTimeout: + description: PerTryTimeout specifies the timeout per retry + attempt. Ignored if NumRetries is not supplied. + type: string + type: object + services: + description: Services are the services to proxy traffic + items: + description: Service defines an upstream to proxy traffic to + properties: + healthCheck: + description: HealthCheck defines optional healthchecks on + the upstream service + properties: + healthyThresholdCount: + description: The number of healthy health checks required + before a host is marked healthy + format: int32 + type: integer + host: + description: The value of the host header in the HTTP + health check request. If left empty (default value), + the name "contour-envoy-healthcheck" will be used. + type: string + intervalSeconds: + description: The interval (seconds) between health checks + format: int64 + type: integer + path: + description: HTTP endpoint used to perform health checks + on upstream service + type: string + timeoutSeconds: + description: The time to wait (seconds) for a health + check response + format: int64 + type: integer + unhealthyThresholdCount: + description: The number of unhealthy health checks required + before a host is marked unhealthy + format: int32 + type: integer + required: + - path + type: object + name: + description: Name is the name of Kubernetes service to proxy + traffic. Names defined here will be used to look up corresponding + endpoints which contain the ips to route. + type: string + port: + description: Port (defined as Integer) to proxy traffic + to since a service can have multiple defined + type: integer + strategy: + description: LB Algorithm to apply (see https://github.com/projectcontour/contour/blob/master/design/ingressroute-design.md#load-balancing) + type: string + validation: + description: UpstreamValidation defines how to verify the + backend service's certificate + properties: + caSecret: + description: Name of the Kubernetes secret be used to + validate the certificate presented by the backend + type: string + subjectName: + description: Key which is expected to be present in + the 'subjectAltName' of the presented certificate + type: string + required: + - caSecret + - subjectName + type: object + weight: + description: Weight defines percentage of traffic to balance + traffic + format: int32 + type: integer + required: + - name + - port + type: object + type: array + timeoutPolicy: + description: The timeout policy for this route + properties: + request: + description: Timeout for receiving a response from the server + after processing a request from client. If not supplied + the timeout duration is undefined. + type: string + type: object + required: + - match + type: object + type: array + tcpproxy: + description: TCPProxy holds TCP proxy information. + properties: + delegate: + description: Delegate specifies that this tcpproxy should be delegated + to another IngressRoute + properties: + name: + description: Name of the IngressRoute + type: string + namespace: + description: Namespace of the IngressRoute. Defaults to the + current namespace if not supplied. + type: string + required: + - name + type: object + services: + description: Services are the services to proxy traffic + items: + description: Service defines an upstream to proxy traffic to + properties: + healthCheck: + description: HealthCheck defines optional healthchecks on + the upstream service + properties: + healthyThresholdCount: + description: The number of healthy health checks required + before a host is marked healthy + format: int32 + type: integer + host: + description: The value of the host header in the HTTP + health check request. If left empty (default value), + the name "contour-envoy-healthcheck" will be used. + type: string + intervalSeconds: + description: The interval (seconds) between health checks + format: int64 + type: integer + path: + description: HTTP endpoint used to perform health checks + on upstream service + type: string + timeoutSeconds: + description: The time to wait (seconds) for a health check + response + format: int64 + type: integer + unhealthyThresholdCount: + description: The number of unhealthy health checks required + before a host is marked unhealthy + format: int32 + type: integer + required: + - path + type: object + name: + description: Name is the name of Kubernetes service to proxy + traffic. Names defined here will be used to look up corresponding + endpoints which contain the ips to route. + type: string + port: + description: Port (defined as Integer) to proxy traffic to + since a service can have multiple defined + type: integer + strategy: + description: LB Algorithm to apply (see https://github.com/projectcontour/contour/blob/master/design/ingressroute-design.md#load-balancing) + type: string + validation: + description: UpstreamValidation defines how to verify the + backend service's certificate + properties: + caSecret: + description: Name of the Kubernetes secret be used to + validate the certificate presented by the backend + type: string + subjectName: + description: Key which is expected to be present in the + 'subjectAltName' of the presented certificate + type: string + required: + - caSecret + - subjectName + type: object + weight: + description: Weight defines percentage of traffic to balance + traffic + format: int32 + type: integer + required: + - name + - port + type: object + type: array + type: object + virtualhost: + description: Virtualhost appears at most once. If it is present, the + object is considered to be a "root". + properties: + fqdn: + description: The fully qualified domain name of the root of the + ingress tree all leaves of the DAG rooted at this object relate + to the fqdn + type: string + tls: + description: If present describes tls properties. The CNI names + that will be matched on are described in fqdn, the tls.secretName + secret must contain a matching certificate + properties: + minimumProtocolVersion: + description: Minimum TLS version this vhost should negotiate + type: string + passthrough: + description: If Passthrough is set to true, the SecretName will + be ignored and the encrypted handshake will be passed through + to the backing cluster. + type: boolean + secretName: + description: required, the name of a secret in the current namespace + type: string + type: object + required: + - fqdn + type: object + type: object + status: + description: Status reports the current state of the HTTPProxy. + properties: + currentStatus: + type: string + description: + type: string + required: + - currentStatus + - description + type: object + required: + - metadata + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: tlscertificatedelegations.contour.heptio.com +spec: + group: contour.heptio.com + names: + kind: TLSCertificateDelegation + listKind: TLSCertificateDelegationList + plural: tlscertificatedelegations + singular: tlscertificatedelegation + scope: "" + validation: + openAPIV3Schema: + description: TLSCertificateDelegation is an TLS Certificate Delegation CRD specificiation. + See design/tls-certificate-delegation.md for details. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TLSCertificateDelegationSpec defines the spec of the CRD + properties: + delegations: + items: + description: CertificateDelegation maps the authority to reference + a secret in the current namespace to a set of namespaces. + properties: + secretName: + description: required, the name of a secret in the current namespace. + type: string + targetNamespaces: + description: required, the namespaces the authority to reference + the the secret will be delegated to. If TargetNamespaces is + nil or empty, the CertificateDelegation' is ignored. If the + TargetNamespace list contains the character, "*" the secret + will be delegated to all namespaces. + items: + type: string + type: array + required: + - secretName + - targetNamespaces + type: object + type: array + required: + - delegations + type: object + required: + - metadata + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: httpproxies.projectcontour.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.virtualhost.fqdn + description: Fully qualified domain name + name: FQDN + type: string + - JSONPath: .spec.virtualhost.tls.secretName + description: Secret with TLS credentials + name: TLS Secret + type: string + - JSONPath: .status.currentStatus + description: The current status of the HTTPProxy + name: Status + type: string + - JSONPath: .status.description + description: Description of the current status + name: Status Description + type: string + group: projectcontour.io + names: + kind: HTTPProxy + listKind: HTTPProxyList + plural: httpproxies + shortNames: + - proxy + - proxies + singular: httpproxy + scope: Namespaced + subresources: {} + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: tlscertificatedelegations.projectcontour.io +spec: + group: projectcontour.io + names: + kind: TLSCertificateDelegation + listKind: TLSCertificateDelegationList + plural: tlscertificatedelegations + shortNames: + - tlscerts + singular: tlscertificatedelegation + scope: Namespaced + validation: + openAPIV3Schema: + description: TLSCertificateDelegation is an TLS Certificate Delegation CRD specificiation. + See design/tls-certificate-delegation.md for details. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TLSCertificateDelegationSpec defines the spec of the CRD + properties: + delegations: + items: + description: CertificateDelegation maps the authority to reference + a secret in the current namespace to a set of namespaces. + properties: + secretName: + description: required, the name of a secret in the current namespace. + type: string + targetNamespaces: + description: required, the namespaces the authority to reference + the the secret will be delegated to. If TargetNamespaces is + nil or empty, the CertificateDelegation' is ignored. If the + TargetNamespace list contains the character, "*" the secret + will be delegated to all namespaces. + items: + type: string + type: array + required: + - secretName + - targetNamespaces + type: object + type: array + required: + - delegations + type: object + required: + - metadata + - spec + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: contour-certgen + namespace: projectcontour +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: contour + namespace: projectcontour +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: contour-certgen +subjects: +- kind: ServiceAccount + name: contour-certgen + namespace: projectcontour +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: contour-certgen + namespace: projectcontour +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch + - create + - get + - put + - post + - patch +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: contour-certgen + namespace: projectcontour +spec: + backoffLimit: 1 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + app: contour-certgen + spec: + containers: + - args: + - certgen + - --namespace=projectcontour + - --incluster + - --kube + image: docker.io/mattmoor/contour@sha256:58749ee9fb7d44c0569dac64407cbff9a1b40bd8ea9a3cd84ee57b63d9c43740 + imagePullPolicy: IfNotPresent + name: contour + restartPolicy: Never + serviceAccountName: contour-certgen +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: contour +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: contour +subjects: +- kind: ServiceAccount + name: contour + namespace: projectcontour +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: contour +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - contour.heptio.com + resources: + - ingressroutes + - tlscertificatedelegations + verbs: + - get + - list + - watch + - put + - post + - patch +- apiGroups: + - projectcontour.io + resources: + - httpproxies + - tlscertificatedelegations + verbs: + - get + - list + - watch + - put + - post + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: contour-leaderelection + namespace: projectcontour +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: contour-leaderelection + namespace: projectcontour +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: contour-leaderelection +subjects: +- kind: ServiceAccount + name: contour + namespace: projectcontour + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: contour + name: contour + namespace: projectcontour +spec: + selector: + matchLabels: + app: contour + template: + metadata: + labels: + app: contour + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app: contour + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - serve + - --incluster + - --use-extensions-v1beta1-ingress + - --xds-address=0.0.0.0 + - --xds-port=8011 + - --http-port=8013 + - --debug-http-port=6060 + - --envoy-service-http-port=80 + - --envoy-service-https-port=443 + - --contour-cafile=/ca/cacert.pem + - --contour-cert-file=/certs/tls.crt + - --contour-key-file=/certs/tls.key + - --config-path=/config/contour.yaml + - --ingress-class-name=contour-internal + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: docker.io/mattmoor/contour@sha256:58749ee9fb7d44c0569dac64407cbff9a1b40bd8ea9a3cd84ee57b63d9c43740 + livenessProbe: + httpGet: + path: /healthz + port: 8013 + name: contour-internal + ports: + - containerPort: 8011 + name: xds + - containerPort: 8013 + name: debug + readinessProbe: + initialDelaySeconds: 15 + periodSeconds: 10 + tcpSocket: + port: 8011 + volumeMounts: + - mountPath: /certs + name: contourcert + readOnly: true + - mountPath: /ca + name: cacert + readOnly: true + - mountPath: /config + name: contour-internal-config + readOnly: true + - args: + - serve + - --incluster + - --use-extensions-v1beta1-ingress + - --xds-address=0.0.0.0 + - --xds-port=8010 + - --http-port=8012 + - --debug-http-port=6061 + - --envoy-service-http-port=80 + - --envoy-service-https-port=443 + - --contour-cafile=/ca/cacert.pem + - --contour-cert-file=/certs/tls.crt + - --contour-key-file=/certs/tls.key + - --config-path=/config/contour.yaml + - --ingress-class-name=contour + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: docker.io/mattmoor/contour@sha256:58749ee9fb7d44c0569dac64407cbff9a1b40bd8ea9a3cd84ee57b63d9c43740 + livenessProbe: + httpGet: + path: /healthz + port: 8012 + name: contour-external + ports: + - containerPort: 8010 + name: xds + - containerPort: 8012 + name: debug + readinessProbe: + initialDelaySeconds: 15 + periodSeconds: 10 + tcpSocket: + port: 8010 + volumeMounts: + - mountPath: /certs + name: contourcert + readOnly: true + - mountPath: /ca + name: cacert + readOnly: true + - mountPath: /config + name: contour-external-config + readOnly: true + dnsPolicy: ClusterFirst + serviceAccountName: contour + volumes: + - name: contourcert + secret: + secretName: contourcert + - name: cacert + secret: + secretName: cacert + - configMap: + defaultMode: 420 + items: + - key: contour.yaml + path: contour.yaml + name: contour-internal + name: contour-internal-config + - configMap: + defaultMode: 420 + items: + - key: contour.yaml + path: contour.yaml + name: contour-external + name: contour-external-config + +--- +apiVersion: v1 +data: + contour.yaml: | + disablePermitInsecure: false + leaderelection: + configmap-name: leader-elect-external + configmap-namespace: projectcontour + ### Logging options + accesslog-format: envoy +kind: ConfigMap +metadata: + name: contour-external + namespace: projectcontour +--- +apiVersion: v1 +kind: Service +metadata: + name: contour-external + namespace: projectcontour +spec: + ports: + - name: xds + port: 8001 + targetPort: 8010 + selector: + app: contour + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + name: envoy-external + namespace: projectcontour +spec: + externalTrafficPolicy: Local + ports: + - name: http + port: 80 + targetPort: 80 + - name: https + port: 443 + targetPort: 443 + selector: + app: envoy-external + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: envoy-external + name: envoy-external + namespace: projectcontour +spec: + selector: + matchLabels: + app: envoy-external + template: + metadata: + labels: + app: envoy-external + spec: + automountServiceAccountToken: false + containers: + - args: + - -c + - /config/envoy.json + - --service-cluster $(CONTOUR_NAMESPACE) + - --service-node $(ENVOY_POD_NAME) + - --log-level info + command: + - envoy + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: ENVOY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: docker.io/envoyproxy/envoy:v1.12.2 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - bash + - -c + - -- + - echo + - -ne + - "POST /healthcheck/fail HTTP/1.1\r\nHost: localhost\r\nConnection: + close\r\n\r\n" + - '>/dev/tcp/localhost/9001' + name: envoy + ports: + - containerPort: 80 + name: http + - containerPort: 443 + name: https + readinessProbe: + httpGet: + path: /ready + port: 8002 + initialDelaySeconds: 3 + periodSeconds: 3 + volumeMounts: + - mountPath: /config + name: envoy-external-config + - mountPath: /certs + name: envoycert + - mountPath: /ca + name: cacert + initContainers: + - args: + - bootstrap + - /config/envoy.json + - --xds-address=contour-external + - --xds-port=8001 + - --envoy-cafile=/ca/cacert.pem + - --envoy-cert-file=/certs/tls.crt + - --envoy-key-file=/certs/tls.key + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: docker.io/mattmoor/contour@sha256:58749ee9fb7d44c0569dac64407cbff9a1b40bd8ea9a3cd84ee57b63d9c43740 + imagePullPolicy: IfNotPresent + name: envoy-initconfig + volumeMounts: + - mountPath: /config + name: envoy-external-config + - mountPath: /certs + name: envoycert + readOnly: true + - mountPath: /ca + name: cacert + readOnly: true + restartPolicy: Always + volumes: + - emptyDir: {} + name: envoy-external-config + - name: envoycert + secret: + secretName: envoycert + - name: cacert + secret: + secretName: cacert + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate + +--- +apiVersion: v1 +data: + contour.yaml: | + disablePermitInsecure: false + leaderelection: + configmap-name: leader-elect-internal + configmap-namespace: projectcontour + ### Logging options + accesslog-format: envoy +kind: ConfigMap +metadata: + name: contour-internal + namespace: projectcontour +--- +apiVersion: v1 +kind: Service +metadata: + name: contour-internal + namespace: projectcontour +spec: + ports: + - name: xds + port: 8001 + targetPort: 8011 + selector: + app: contour + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + name: envoy-internal + namespace: projectcontour +spec: + ports: + - name: http + port: 80 + targetPort: 80 + selector: + app: envoy-internal + type: ClusterIP +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: envoy-internal + name: envoy-internal + namespace: projectcontour +spec: + selector: + matchLabels: + app: envoy-internal + template: + metadata: + labels: + app: envoy-internal + spec: + automountServiceAccountToken: false + containers: + - args: + - -c + - /config/envoy.json + - --service-cluster $(CONTOUR_NAMESPACE) + - --service-node $(ENVOY_POD_NAME) + - --log-level info + command: + - envoy + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: ENVOY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: docker.io/envoyproxy/envoy:v1.12.2 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - bash + - -c + - -- + - echo + - -ne + - "POST /healthcheck/fail HTTP/1.1\r\nHost: localhost\r\nConnection: + close\r\n\r\n" + - '>/dev/tcp/localhost/9001' + name: envoy + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: /ready + port: 8002 + initialDelaySeconds: 3 + periodSeconds: 3 + volumeMounts: + - mountPath: /config + name: envoy-internal-config + - mountPath: /certs + name: envoycert + - mountPath: /ca + name: cacert + initContainers: + - args: + - bootstrap + - /config/envoy.json + - --xds-address=contour-internal + - --xds-port=8001 + - --envoy-cafile=/ca/cacert.pem + - --envoy-cert-file=/certs/tls.crt + - --envoy-key-file=/certs/tls.key + env: + - name: CONTOUR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: docker.io/mattmoor/contour@sha256:58749ee9fb7d44c0569dac64407cbff9a1b40bd8ea9a3cd84ee57b63d9c43740 + imagePullPolicy: IfNotPresent + name: envoy-initconfig + volumeMounts: + - mountPath: /config + name: envoy-internal-config + - mountPath: /certs + name: envoycert + readOnly: true + - mountPath: /ca + name: cacert + readOnly: true + restartPolicy: Always + volumes: + - emptyDir: {} + name: envoy-internal-config + - name: envoycert + secret: + secretName: envoycert + - name: cacert + secret: + secretName: cacert + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: contour-ingress-controller + namespace: knative-serving +spec: + replicas: 1 + selector: + matchLabels: + app: controller + template: + metadata: + labels: + app: controller + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: knative.dev/samples + image: docker.io/mattmoor/controller@sha256:6f17bfaafbbd65605a13ddf43d0ad9d62b838d4c3400ffd8af5238e24f2590a7 + name: controller + ports: + - containerPort: 9090 + name: metrics + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 100Mi + serviceAccountName: controller + +--- diff --git a/test/vendor/knative.dev/serving/third_party/gloo-latest/README.md b/test/vendor/knative.dev/serving/third_party/gloo-latest/README.md new file mode 100644 index 0000000000..d5c00c555c --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/gloo-latest/README.md @@ -0,0 +1,7 @@ +The gloo.yaml file is generated by running + +``` +./download-gloo.sh +``` + +Using Helm v2.14.1 diff --git a/test/vendor/knative.dev/serving/third_party/gloo-latest/download-gloo.sh b/test/vendor/knative.dev/serving/third_party/gloo-latest/download-gloo.sh new file mode 100755 index 0000000000..b957158f63 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/gloo-latest/download-gloo.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -ex + +# Download and unpack Gloo +GLOO_VERSION=0.18.12 +GLOO_CHART=gloo-${GLOO_VERSION}.tgz +DOWNLOAD_URL=https://storage.googleapis.com/solo-public-helm/charts/${GLOO_CHART} + +wget ${DOWNLOAD_URL} +tar xvf ${GLOO_CHART} + +# Create CRDs template +helm template --namespace=gloo-system \ + ${GLOO_CHART} --values gloo/values-knative.yaml --values value-overrides.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > gloo.yaml + +# Clean up. +rm ${GLOO_CHART} +rm -rf gloo/ diff --git a/test/vendor/knative.dev/serving/third_party/gloo-latest/gloo.yaml b/test/vendor/knative.dev/serving/third_party/gloo-latest/gloo.yaml new file mode 100644 index 0000000000..7f3da1ddfb --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/gloo-latest/gloo.yaml @@ -0,0 +1,769 @@ +--- +# Source: gloo/templates/0-namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: gloo-system + labels: + app: gloo + annotations: + "helm.sh/hook": pre-install +--- +# Source: gloo/templates/27-knative-external-proxy-configmap.yaml + + +# configmap +apiVersion: v1 +kind: ConfigMap +metadata: + name: knative-external-proxy-config + namespace: gloo-system + labels: + app: gloo + gloo: knative-external-proxy +data: + envoy.yaml: | + node: + cluster: knative + id: "{{.PodName}}.{{.PodNamespace}}" + metadata: + # role's value is the key for the in-memory xds cache (projects/gloo/pkg/xds/envoy.go) + role: "{{.PodNamespace}}~knative-external-proxy" + static_resources: + clusters: + - name: xds_cluster + connect_timeout: 5.000s + load_assignment: + cluster_name: xds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: gloo + port_value: 9977 + http2_protocol_options: {} + upstream_connection_options: + tcp_keepalive: {} + type: STRICT_DNS + - name: admin_port_cluster + connect_timeout: 5.000s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: admin_port_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 19000 + + listeners: + - name: prometheus_listener + address: + socket_address: + address: 0.0.0.0 + port_value: 8081 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: prometheus + route_config: + name: prometheus_route + virtual_hosts: + - name: prometheus_host + domains: + - "*" + routes: + - match: + prefix: "/metrics" + headers: + - name: ":method" + exact_match: GET + route: + prefix_rewrite: "/stats/prometheus" + cluster: admin_port_cluster + http_filters: + - name: envoy.router + config: {} + + + dynamic_resources: + ads_config: + api_type: GRPC + grpc_services: + - envoy_grpc: {cluster_name: xds_cluster} + cds_config: + ads: {} + lds_config: + ads: {} + admin: + access_log_path: /dev/null + address: + socket_address: + address: 127.0.0.1 + port_value: 19000 # if .Values.settings.integrations.knative.proxy.tracing +--- +# Source: gloo/templates/30-knative-internal-proxy-configmap.yaml + + +# configmap +apiVersion: v1 +kind: ConfigMap +metadata: + name: knative-internal-proxy-config + namespace: gloo-system + labels: + app: gloo + gloo: knative-internal-proxy +data: + envoy.yaml: | + node: + cluster: knative + id: "{{.PodName}}.{{.PodNamespace}}" + metadata: + # role's value is the key for the in-memory xds cache (projects/gloo/pkg/xds/envoy.go) + role: "{{.PodNamespace}}~knative-internal-proxy" + static_resources: + clusters: + - name: xds_cluster + connect_timeout: 5.000s + load_assignment: + cluster_name: xds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: gloo + port_value: 9977 + http2_protocol_options: {} + upstream_connection_options: + tcp_keepalive: {} + type: STRICT_DNS + - name: admin_port_cluster + connect_timeout: 5.000s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: admin_port_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 19000 + + listeners: + - name: prometheus_listener + address: + socket_address: + address: 0.0.0.0 + port_value: 8081 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: prometheus + route_config: + name: prometheus_route + virtual_hosts: + - name: prometheus_host + domains: + - "*" + routes: + - match: + prefix: "/metrics" + headers: + - name: ":method" + exact_match: GET + route: + prefix_rewrite: "/stats/prometheus" + cluster: admin_port_cluster + http_filters: + - name: envoy.router + config: {} + + + dynamic_resources: + ads_config: + api_type: GRPC + grpc_services: + - envoy_grpc: {cluster_name: xds_cluster} + cds_config: + ads: {} + lds_config: + ads: {} + admin: + access_log_path: /dev/null + address: + socket_address: + address: 127.0.0.1 + port_value: 19000 # if .Values.settings.integrations.knative.proxy.tracing +--- +# Source: gloo/templates/100-gloo-crds.yaml + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: settings.gloo.solo.io + annotations: + "helm.sh/hook": crd-install + labels: + gloo: settings +spec: + group: gloo.solo.io + names: + kind: Settings + listKind: SettingsList + plural: settings + shortNames: + - st + scope: Namespaced + version: v1 + versions: + - name: v1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: gateways.gateway.solo.io.v2 + annotations: + "helm.sh/hook": crd-install +spec: + group: gateway.solo.io.v2 + names: + kind: Gateway + listKind: GatewayList + plural: gateways + shortNames: + - gw + singular: gateway + scope: Namespaced + version: v2 + versions: + - name: v2 + storage: true + served: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: virtualservices.gateway.solo.io + annotations: + "helm.sh/hook": crd-install +spec: + group: gateway.solo.io + names: + kind: VirtualService + listKind: VirtualServiceList + plural: virtualservices + shortNames: + - vs + singular: virtualservice + scope: Namespaced + version: v1 + versions: + - name: v1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: proxies.gloo.solo.io + annotations: + "helm.sh/hook": crd-install +spec: + group: gloo.solo.io + names: + kind: Proxy + listKind: ProxyList + plural: proxies + shortNames: + - px + singular: proxy + scope: Namespaced + version: v1 + versions: + - name: v1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: upstreams.gloo.solo.io + annotations: + "helm.sh/hook": crd-install +spec: + group: gloo.solo.io + names: + kind: Upstream + listKind: UpstreamList + plural: upstreams + shortNames: + - us + singular: upstream + scope: Namespaced + version: v1 + versions: + - name: v1 + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: upstreamgroups.gloo.solo.io + annotations: + "helm.sh/hook": crd-install +spec: + group: gloo.solo.io + names: + kind: UpstreamGroup + listKind: UpstreamGroupList + plural: upstreamgroups + shortNames: + - ug + singular: upstreamgroup + scope: Namespaced + version: v1 + versions: + - name: v1 + served: true + storage: true +--- +--- +# Source: gloo/templates/22-namespace-clusterrole-knative.yaml + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: gloo-role-knative + labels: + app: gloo + gloo: rbac +rules: +- apiGroups: [""] + resources: ["pods", "services", "secrets", "endpoints", "configmaps"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "create"] +- apiGroups: ["gloo.solo.io"] + resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"] + verbs: ["*"] +- apiGroups: ["networking.internal.knative.dev"] + resources: ["clusteringresses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["networking.internal.knative.dev"] + resources: ["clusteringresses/status"] + verbs: ["update"] +- apiGroups: ["networking.internal.knative.dev"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["networking.internal.knative.dev"] + resources: ["ingresses/status"] + verbs: ["update"] +--- +# Source: gloo/templates/25-namespace-clusterrolebinding-knative.yaml + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: gloo-role-binding-knative-gloo-system + labels: + app: gloo + gloo: rbac +subjects: +- kind: ServiceAccount + name: default + namespace: gloo-system +roleRef: + kind: ClusterRole + name: gloo-role-knative + apiGroup: rbac.authorization.k8s.io +--- +# Source: gloo/templates/2-gloo-service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app: gloo + gloo: gloo + name: gloo + namespace: gloo-system +spec: + + ports: + - name: grpc + port: 9977 + protocol: TCP + selector: + gloo: gloo + +--- +# Source: gloo/templates/28-knative-external-proxy-service.yaml + +apiVersion: v1 +kind: Service +metadata: + labels: + app: gloo + gloo: knative-external-proxy + name: knative-external-proxy + namespace: gloo-system +spec: + ports: + - port: 80 + protocol: TCP + name: http + - port: 443 + protocol: TCP + name: https + selector: + gloo: knative-external-proxy + type: LoadBalancer +--- +# Source: gloo/templates/31-knative-internal-proxy-service.yaml + +apiVersion: v1 +kind: Service +metadata: + labels: + app: gloo + gloo: knative-internal-proxy + name: knative-internal-proxy + namespace: gloo-system +spec: + ports: + - port: 80 + protocol: TCP + name: http + - port: 443 + protocol: TCP + name: https + selector: + gloo: knative-internal-proxy + type: ClusterIP +--- +# Source: gloo/templates/1-gloo-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: gloo + gloo: gloo + name: gloo + namespace: gloo-system +spec: + replicas: 1 + selector: + matchLabels: + gloo: gloo + template: + metadata: + labels: + gloo: gloo + spec: + containers: + - image: quay.io/solo-io/gloo:0.18.12 + imagePullPolicy: IfNotPresent + name: gloo + resources: + requests: + cpu: 500m + memory: 256Mi + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10101 + capabilities: + drop: + - ALL + ports: + - containerPort: 9977 + name: grpc + protocol: TCP + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + +--- +# Source: gloo/templates/10-ingress-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: gloo + gloo: ingress + name: ingress + namespace: gloo-system +spec: + replicas: 1 + selector: + matchLabels: + gloo: ingress + template: + metadata: + labels: + gloo: ingress + spec: + containers: + - image: quay.io/solo-io/ingress:0.18.12 + imagePullPolicy: IfNotPresent + name: ingress + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "ENABLE_KNATIVE_INGRESS" + value: "true" + - name: "KNATIVE_VERSION" + value: "0.8.0" + - name: "DISABLE_KUBE_INGRESS" + value: "true" + +--- +# Source: gloo/templates/26-knative-external-proxy-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: gloo + gloo: knative-external-proxy + name: knative-external-proxy + namespace: gloo-system +spec: + replicas: 1 + selector: + matchLabels: + gloo: knative-external-proxy + template: + metadata: + labels: + gloo: knative-external-proxy + spec: + containers: + - args: ["--disable-hot-restart"] + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: quay.io/solo-io/gloo-envoy-wrapper:0.18.12 + imagePullPolicy: IfNotPresent + name: knative-external-proxy + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + volumeMounts: + - mountPath: /etc/envoy + name: envoy-config + volumes: + - configMap: + name: knative-external-proxy-config + name: envoy-config + +--- +# Source: gloo/templates/29-knative-internal-proxy-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: gloo + gloo: knative-internal-proxy + name: knative-internal-proxy + namespace: gloo-system +spec: + replicas: 1 + selector: + matchLabels: + gloo: knative-internal-proxy + template: + metadata: + labels: + gloo: knative-internal-proxy + spec: + containers: + - args: ["--disable-hot-restart"] + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: quay.io/solo-io/gloo-envoy-wrapper:0.18.12 + imagePullPolicy: IfNotPresent + name: knative-internal-proxy + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + volumeMounts: + - mountPath: /etc/envoy + name: envoy-config + volumes: + - configMap: + name: knative-internal-proxy-config + name: envoy-config + +--- +# Source: gloo/templates/3-discovery-deployment.yaml + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: gloo + gloo: discovery + name: discovery + namespace: gloo-system +spec: + replicas: 1 + selector: + matchLabels: + gloo: discovery + template: + metadata: + labels: + gloo: discovery + spec: + containers: + - image: quay.io/solo-io/discovery:0.18.12 + imagePullPolicy: IfNotPresent + name: discovery + securityContext: + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 10101 + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + +--- +# Source: gloo/templates/18-settings.yaml + + +apiVersion: gloo.solo.io/v1 +kind: Settings +metadata: + name: default + namespace: gloo-system +spec: + bindAddr: 0.0.0.0:9977 + discoveryNamespace: gloo-system + kubernetesArtifactSource: {} + kubernetesConfigSource: {} + kubernetesSecretSource: {} + refreshRate: 60s + knative: + knativeExternalProxyAddress: "knative-external-proxy.gloo-system.svc.cluster.local" + knativeInternalProxyAddress: "knative-internal-proxy.gloo-system.svc.cluster.local" + discovery: + fdsMode: WHITELIST + +--- +# Source: gloo/templates/11-ingress-proxy-deployment.yaml + +--- +# Source: gloo/templates/12-ingress-proxy-configmap.yaml + +--- +# Source: gloo/templates/13-ingress-proxy-service.yaml + +--- +# Source: gloo/templates/14-clusteringress-proxy-deployment.yaml + + +--- +# Source: gloo/templates/15-clusteringress-proxy-configmap.yaml + +--- +# Source: gloo/templates/16-clusteringress-proxy-service.yaml + + +--- +# Source: gloo/templates/20-namespace-clusterrole-gateway.yaml + +--- +# Source: gloo/templates/21-namespace-clusterrole-ingress.yaml + +--- +# Source: gloo/templates/23-namespace-clusterrolebinding-gateway.yaml + +--- +# Source: gloo/templates/24-namespace-clusterrolebinding-ingress.yaml + +--- +# Source: gloo/templates/5-gateway-deployment.yaml + +--- +# Source: gloo/templates/6.5-gateway-conversion-job.yaml + +--- +# Source: gloo/templates/7-gateway-proxy-deployment.yaml + +--- +# Source: gloo/templates/8-gateway-proxy-service.yaml + + +--- +# Source: gloo/templates/9-gateway-proxy-configmap.yaml +# if .Values.gateway.enabled diff --git a/test/vendor/knative.dev/serving/third_party/gloo-latest/value-overrides.yaml b/test/vendor/knative.dev/serving/third_party/gloo-latest/value-overrides.yaml new file mode 100644 index 0000000000..24c68d3531 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/gloo-latest/value-overrides.yaml @@ -0,0 +1,17 @@ +namespace: + create: true + +rbac: + create: true + +discovery: + fdsMode: WHITELIST +settings: + create: true + integrations: + knative: + enabled: true + version: 0.8.0 + installKnative: true + proxy: + stats: true diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3-latest b/test/vendor/knative.dev/serving/third_party/istio-1.3-latest new file mode 120000 index 0000000000..04be51b7bc --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3-latest @@ -0,0 +1 @@ +istio-1.3.6 \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/README.md b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/README.md new file mode 100644 index 0000000000..cffd8e93a3 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/README.md @@ -0,0 +1,17 @@ +The istio\*.yaml files are generated by running + +``` +./download-istio.sh +``` + +using Helm v3.0.1. + +The generated files are: + +- istio-ci-no-mesh.yaml: used in our continuous testing of Knative with Istio + having sidecar disabled. This is also the setting that we use in our presubmit + tests. +- istio-ci-mesh.yaml: used in our continuous testing of Knative with Istio + having sidecar and mTLS enabled. +- istio-minimal.yaml: a minimal Istio installation used for development + purposes. diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/download-istio.sh b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/download-istio.sh new file mode 100755 index 0000000000..9848d0be82 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/download-istio.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +# Download and unpack Istio +ISTIO_VERSION=1.3.6 +DOWNLOAD_URL=https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istio-${ISTIO_VERSION}-linux.tar.gz + +wget --no-check-certificate $DOWNLOAD_URL +if [ $? != 0 ]; then + echo "Failed to download istio package" + exit 1 +fi +tar xzf istio-${ISTIO_VERSION}-linux.tar.gz + +( # subshell in downloaded directory +cd istio-${ISTIO_VERSION} || exit + +# Create CRDs template +helm template --namespace=istio-system \ + install/kubernetes/helm/istio-init \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-crds.yaml + +# Create a custom cluster local gateway, based on the Istio custom-gateway template. +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values-extras.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-knative-extras.yaml + +# A template with sidecar injection enabled. +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-ci-mesh.yaml + +# A lighter template, with just pilot/gateway. +# Based on install/kubernetes/helm/istio/values-istio-minimal.yaml +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values-lean.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-ci-no-mesh.yaml + +# An even lighter template, with just pilot/gateway and small resource requests. +# Based on install/kubernetes/helm/istio/values-istio-minimal.yaml +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values-local.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-minimal.yaml +) + +# Clean up. +rm -rf istio-${ISTIO_VERSION} +rm istio-${ISTIO_VERSION}-linux.tar.gz + +# Add in the `istio-system` namespace to reduce number of commands. +patch istio-crds.yaml namespace.yaml.patch +patch istio-ci-mesh.yaml namespace.yaml.patch +patch istio-ci-no-mesh.yaml namespace.yaml.patch +patch istio-minimal.yaml namespace.yaml.patch + +# Increase termination drain duration seconds. +patch -l istio-ci-mesh.yaml drain-seconds.yaml.patch diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/drain-seconds.yaml.patch b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/drain-seconds.yaml.patch new file mode 100644 index 0000000000..146fd99659 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/drain-seconds.yaml.patch @@ -0,0 +1,5 @@ +766a767,770 +> # PATCH #2: Increase termination drain duration. +> - name: TERMINATION_DRAIN_DURATION_SECONDS +> value: "20" +> # PATCH #2 ends. diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-mesh.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-mesh.yaml new file mode 100644 index 0000000000..fe679cf8c3 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-mesh.yaml @@ -0,0 +1,3380 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio/charts/galley/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + + minAvailable: 1 + selector: + matchLabels: + app: galley + release: RELEASE-NAME + istio: galley +--- +# Source: istio/charts/gateways/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + + minAvailable: 1 + selector: + matchLabels: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +--- +# Source: istio/charts/gateways/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + + minAvailable: 1 + selector: + matchLabels: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +--- +# Source: istio/charts/mixer/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: telemetry + chart: mixer + heritage: Helm + release: RELEASE-NAME + version: 1.3.6 + istio: mixer + istio-mixer-type: telemetry +spec: + + minAvailable: 1 + selector: + matchLabels: + app: telemetry + release: RELEASE-NAME + istio: mixer + istio-mixer-type: telemetry +--- +# Source: istio/charts/mixer/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-policy + namespace: istio-system + labels: + app: policy + chart: mixer + heritage: Helm + release: RELEASE-NAME + version: 1.3.6 + istio: mixer + istio-mixer-type: policy +spec: + + minAvailable: 1 + selector: + matchLabels: + app: policy + release: RELEASE-NAME + istio: mixer + istio-mixer-type: policy +--- +# Source: istio/charts/pilot/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + + minAvailable: 1 + selector: + matchLabels: + app: pilot + release: RELEASE-NAME + istio: pilot +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: sidecarInjectorWebhook + release: RELEASE-NAME + istio: sidecar-injector +spec: + + minAvailable: 1 + selector: + matchLabels: + app: sidecarInjectorWebhook + release: RELEASE-NAME + istio: sidecar-injector +--- +# Source: istio/charts/galley/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-galley-configuration + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +data: + validatingwebhookconfiguration.yaml: |- + apiVersion: admissionregistration.k8s.io/v1beta1 + kind: ValidatingWebhookConfiguration + metadata: + name: istio-galley + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + webhooks: + - name: pilot.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitpilot" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - httpapispecs + - httpapispecbindings + - quotaspecs + - quotaspecbindings + - operations: + - CREATE + - UPDATE + apiGroups: + - rbac.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - authentication.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - networking.istio.io + apiVersions: + - "*" + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + failurePolicy: Fail + sideEffects: None + - name: mixer.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitmixer" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - rules + - attributemanifests + - circonuses + - deniers + - fluentds + - kubernetesenvs + - listcheckers + - memquotas + - noops + - opas + - prometheuses + - rbacs + - solarwindses + - stackdrivers + - cloudwatches + - dogstatsds + - statsds + - stdios + - apikeys + - authorizations + - checknothings + # - kuberneteses + - listentries + - logentries + - metrics + - quotas + - reportnothings + - tracespans + - adapters + - handlers + - instances + - templates + - zipkins + failurePolicy: Fail + sideEffects: None +--- +# Source: istio/charts/security/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-security-custom-resources + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +data: + custom-resources.yaml: |- + # Authentication policy to enable permissive mode for all services (that have sidecar) in the mesh. + apiVersion: "authentication.istio.io/v1alpha1" + kind: "MeshPolicy" + metadata: + name: "default" + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + peers: + - mtls: + mode: PERMISSIVE + run.sh: |- + #!/bin/sh + + set -x + + if [ "$#" -ne "1" ]; then + echo "first argument should be path to custom resource yaml" + exit 1 + fi + + pathToResourceYAML=${1} + + kubectl get validatingwebhookconfiguration istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + echo "istio-galley validatingwebhookconfiguration found - waiting for istio-galley deployment to be ready" + while true; do + kubectl -n istio-system get deployment istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + break + fi + sleep 1 + done + kubectl -n istio-system rollout status deployment istio-galley + if [ "$?" -ne 0 ]; then + echo "istio-galley deployment rollout status check failed" + exit 1 + fi + echo "istio-galley deployment ready for configuration validation" + fi + sleep 5 + kubectl apply -f ${pathToResourceYAML} +--- +# Source: istio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME +data: + mesh: |- + # Set the following variable to true to disable policy checks by the Mixer. + # Note that metrics will still be reported to the Mixer. + disablePolicyChecks: true + # reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server + reportBatchMaxEntries: 100 + # reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server + reportBatchMaxTime: 1s + + # Set enableTracing to false to disable request tracing. + enableTracing: true + + # Set accessLogFile to empty string to disable access log. + accessLogFile: "/dev/stdout" + + # If accessLogEncoding is TEXT, value will be used directly as the log format + # example: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n" + # If AccessLogEncoding is JSON, value will be parsed as map[string]string + # example: '{"start_time": "%START_TIME%", "req_method": "%REQ(:METHOD)%"}' + # Leave empty to use default log format + accessLogFormat: "" + + # Set accessLogEncoding to JSON or TEXT to configure sidecar access log + accessLogEncoding: 'JSON' + + enableEnvoyAccessLogService: false + mixerCheckServer: istio-policy.istio-system.svc.cluster.local:9091 + mixerReportServer: istio-telemetry.istio-system.svc.cluster.local:9091 + # policyCheckFailOpen allows traffic in cases when the mixer policy service cannot be reached. + # Default is false which means the traffic is denied when the client is unable to connect to Mixer. + policyCheckFailOpen: false + # Let Pilot give ingresses the public IP of the Istio ingressgateway + ingressService: istio-ingressgateway + + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + + # Automatic protocol detection uses a set of heuristics to + # determine whether the connection is using TLS or not (on the + # server side), as well as the application protocol being used + # (e.g., http vs tcp). These heuristics rely on the client sending + # the first bits of data. For server first protocols like MySQL, + # MongoDB, etc., Envoy will timeout on the protocol detection after + # the specified period, defaulting to non mTLS plain TCP + # traffic. Set this field to tweak the period that Envoy will wait + # for the client to send the first bits of data. (MUST BE >=1ms) + protocolDetectionTimeout: 100ms + + # DNS refresh rate for Envoy clusters of type STRICT_DNS + dnsRefreshRate: 300s + + # Unix Domain Socket through which envoy communicates with NodeAgent SDS to get + # key/cert for mTLS. Use secret-mount files instead of SDS if set to empty. + sdsUdsPath: "" + + # The trust domain corresponds to the trust root of a system. + # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain + trustDomain: "" + + # Set the default behavior of the sidecar for handling outbound traffic from the application: + # ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no + # services or ServiceEntries for the destination port + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio + # configuration. + rootNamespace: istio-system + configSources: + - address: istio-galley.istio-system.svc:9901 + + defaultConfig: + # + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file + connectTimeout: 10s + # + ### ADVANCED SETTINGS ############# + # Where should envoy's configuration be stored in the istio-proxy container + configPath: "/etc/istio/proxy" + binaryPath: "/usr/local/bin/envoy" + # The pseudo service name used for Envoy. + serviceCluster: istio-proxy + # These settings that determine how long an old Envoy + # process should be kept alive after an occasional reload. + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # + # Port where Envoy listens (on local host) for admin commands + # You can exec into the istio-proxy container in a pod and + # curl the admin port (curl http://localhost:15000/) to obtain + # diagnostic information from Envoy. See + # https://lyft.github.io/envoy/docs/operations/admin.html + # for more details + proxyAdminPort: 15000 + # + # Set concurrency to a specific number to control the number of Proxy worker threads. + # If set to 0 (default), then start worker thread for each CPU thread/core. + concurrency: 2 + # + tracing: + zipkin: + # Address of the Zipkin collector + address: zipkin.istio-system:9411 + # + # Mutual TLS authentication between sidecars and istio control plane. + controlPlaneAuthPolicy: NONE + # + # Address where istio Pilot service is running + discoveryAddress: istio-pilot.istio-system:15010 + + # Configuration file for the mesh networks to be used by the Split Horizon EDS. + meshNetworks: |- + networks: {} +--- +# Source: istio/templates/sidecar-injector-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +data: + values: |- + {"certmanager":{"enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"hub":"quay.io/jetstack","image":"cert-manager-controller","nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"resources":{},"tag":"v0.6.2","tolerations":[]},"galley":{"enabled":true,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":"galley","nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","tolerations":[]},"gateways":{"cluster-local-gateway":{"autoscaleMax":4,"autoscaleMin":2,"cpu":{"targetAverageUtilization":80},"enabled":true,"externalIPs":[],"labels":{"app":"cluster-local-gateway","istio":"cluster-local-gateway"},"loadBalancerIP":"","loadBalancerSourceRanges":{},"podAnnotations":{},"ports":[{"name":"status-port","port":15020},{"name":"http2","port":80},{"name":"https","port":443}],"replicaCount":2,"resources":{"requests":{"cpu":"250m","memory":"256Mi"}},"secretVolumes":[{"mountPath":"/etc/istio/cluster-local-gateway-certs","name":"cluster-local-gateway-certs","secretName":"istio-cluster-local-gateway-certs"},{"mountPath":"/etc/istio/cluster-local-gateway-ca-certs","name":"cluster-local-gateway-ca-certs","secretName":"istio-cluster-local-gateway-ca-certs"}],"serviceAnnotations":{},"type":"ClusterIP"},"enabled":true,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"istio-egressgateway":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":false,"env":{"ISTIO_META_ROUTER_MODE":"sni-dnat"},"labels":{"app":"istio-egressgateway","istio":"egressgateway"},"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"ports":[{"name":"http2","port":80},{"name":"https","port":443},{"name":"tls","port":15443,"targetPort":15443}],"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","secretVolumes":[{"mountPath":"/etc/istio/egressgateway-certs","name":"egressgateway-certs","secretName":"istio-egressgateway-certs"},{"mountPath":"/etc/istio/egressgateway-ca-certs","name":"egressgateway-ca-certs","secretName":"istio-egressgateway-ca-certs"}],"serviceAnnotations":{},"tolerations":[],"type":"ClusterIP"},"istio-ilbgateway":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":false,"labels":{"app":"istio-ilbgateway","istio":"ilbgateway"},"loadBalancerIP":"","nodeSelector":{},"podAnnotations":{},"ports":[{"name":"grpc-pilot-mtls","port":15011},{"name":"grpc-pilot","port":15010},{"name":"tcp-citadel-grpc-tls","port":8060,"targetPort":8060},{"name":"tcp-dns","port":5353}],"resources":{"requests":{"cpu":"800m","memory":"512Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","secretVolumes":[{"mountPath":"/etc/istio/ilbgateway-certs","name":"ilbgateway-certs","secretName":"istio-ilbgateway-certs"},{"mountPath":"/etc/istio/ilbgateway-ca-certs","name":"ilbgateway-ca-certs","secretName":"istio-ilbgateway-ca-certs"}],"serviceAnnotations":{"cloud.google.com/load-balancer-type":"internal"},"tolerations":[],"type":"LoadBalancer"},"istio-ingressgateway":{"applicationPorts":"","autoscaleEnabled":true,"autoscaleMax":4,"autoscaleMin":2,"cpu":{"targetAverageUtilization":80},"enabled":true,"env":{"ISTIO_META_ROUTER_MODE":"sni-dnat"},"externalIPs":[],"labels":{"app":"istio-ingressgateway","istio":"ingressgateway"},"loadBalancerIP":"","loadBalancerSourceRanges":[],"meshExpansionPorts":[{"name":"tcp-pilot-grpc-tls","port":15011,"targetPort":15011},{"name":"tcp-mixer-grpc-tls","port":15004,"targetPort":15004},{"name":"tcp-citadel-grpc-tls","port":8060,"targetPort":8060},{"name":"tcp-dns-tls","port":853,"targetPort":853}],"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"ports":[{"name":"status-port","port":15020},{"name":"http2","port":80},{"name":"https","port":443}],"replicaCount":2,"resources":{"limits":{"cpu":"3000m","memory":"2048Mi"},"requests":{"cpu":"3000m","memory":"2048Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","sds":{"enabled":true,"image":"node-agent-k8s","resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}},"secretVolumes":[{"mountPath":"/etc/istio/ingressgateway-certs","name":"ingressgateway-certs","secretName":"istio-ingressgateway-certs"},{"mountPath":"/etc/istio/ingressgateway-ca-certs","name":"ingressgateway-ca-certs","secretName":"istio-ingressgateway-ca-certs"}],"serviceAnnotations":{},"tolerations":[],"type":"LoadBalancer"}},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"grafana":{"accessMode":"ReadWriteMany","contextPath":"/grafana","dashboardProviders":{"dashboardproviders.yaml":{"apiVersion":1,"providers":[{"disableDeletion":false,"folder":"istio","name":"istio","options":{"path":"/var/lib/grafana/dashboards/istio"},"orgId":1,"type":"file"}]}},"datasources":{"datasources.yaml":{"apiVersion":1,"datasources":[{"access":"proxy","editable":true,"isDefault":true,"jsonData":{"timeInterval":"5s"},"name":"Prometheus","orgId":1,"type":"prometheus","url":"http://prometheus:9090"}]}},"enabled":false,"env":{},"envSecrets":{},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":{"repository":"grafana/grafana","tag":"6.1.6"},"ingress":{"annotations":null,"enabled":false,"hosts":["grafana.local"],"tls":null},"nodeSelector":{},"persist":false,"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"security":{"enabled":false,"passphraseKey":"passphrase","secretName":"grafana","usernameKey":"username"},"service":{"annotations":{},"externalPort":3000,"loadBalancerIP":null,"loadBalancerSourceRanges":null,"name":"http","type":"ClusterIP"},"storageClassName":"","tolerations":[]},"istio_cni":{"enabled":false},"istiocoredns":{"coreDNSImage":"coredns/coredns:1.1.2","coreDNSPluginImage":"istio/coredns-plugin:0.2-istio-1.1","enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","tolerations":[]},"kiali":{"contextPath":"/kiali","createDemoSecret":false,"dashboard":{"auth":{"strategy":"login"},"grafanaURL":null,"jaegerURL":null,"secretName":"kiali","viewOnlyMode":false},"enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"hub":"quay.io/kiali","image":"kiali","ingress":{"annotations":null,"enabled":false,"hosts":["kiali.local"],"tls":null},"nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"prometheusAddr":"http://prometheus:9090","replicaCount":1,"security":{"cert_file":"/kiali-cert/cert-chain.pem","enabled":false,"private_key_file":"/kiali-cert/key.pem"},"tag":"v1.4","tolerations":[]},"mixer":{"adapters":{"kubernetesenv":{"enabled":true},"prometheus":{"enabled":false,"metricsExpiryDuration":"10m"},"stdio":{"enabled":false,"outputAsJson":true},"useAdapterCRDs":false},"env":{"GODEBUG":"gctrace=1","GOMAXPROCS":"6"},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":"mixer","nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"policy":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":true,"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%"},"telemetry":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":true,"loadshedding":{"latencyThreshold":"100ms","mode":"enforce"},"replicaCount":1,"reportBatchMaxEntries":100,"reportBatchMaxTime":"1s","resources":{"limits":{"cpu":"4800m","memory":"4G"},"requests":{"cpu":"1000m","memory":"1G"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","sessionAffinityEnabled":false},"tolerations":[]},"nodeagent":{"enabled":false,"env":{"CA_ADDR":"","CA_PROVIDER":"","PLUGINS":""},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":"node-agent-k8s","nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"tolerations":[]},"pilot":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":2,"cpu":{"targetAverageUtilization":80},"enableProtocolSniffingForInbound":false,"enableProtocolSniffingForOutbound":true,"enabled":true,"env":{"GODEBUG":"gctrace=1","PILOT_PUSH_THROTTLE":100},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":"pilot","keepaliveMaxServerConnectionAge":"30m","nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"resources":{"requests":{"cpu":"3000m","memory":"2048Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","sidecar":true,"tolerations":[],"traceSampling":100},"prometheus":{"enabled":false},"security":{"citadelHealthCheck":false,"createMeshPolicy":true,"enableNamespacesByDefault":true,"enabled":true,"env":{},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":"citadel","nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","selfSigned":true,"tolerations":[],"workloadCertTtl":"2160h"},"sidecarInjectorWebhook":{"alwaysInjectSelector":[],"enableNamespacesByDefault":false,"enabled":true,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"image":"sidecar_injector","neverInjectSelector":[],"nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rewriteAppHTTPProbe":true,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","tolerations":[]},"tracing":{"enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"oneNamespace":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxy_init"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.3.6","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"zipkin":{"address":""}},"trustDomain":"","useMCP":true},"ingress":{"annotations":null,"enabled":false,"hosts":null,"tls":null},"jaeger":{"accessMode":"ReadWriteMany","hub":"docker.io/jaegertracing","image":"all-in-one","memory":{"max_traces":50000},"persist":false,"spanStorageType":"badger","storageClassName":"","tag":1.14},"nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"provider":"jaeger","service":{"annotations":{},"externalPort":9411,"name":"http","type":"ClusterIP"},"tolerations":[],"zipkin":{"hub":"docker.io/openzipkin","image":"zipkin","javaOptsHeap":700,"maxSpans":500000,"node":{"cpus":2},"probeStartupDelay":200,"queryPort":9411,"resources":{"limits":{"cpu":"300m","memory":"900Mi"},"requests":{"cpu":"150m","memory":"900Mi"}},"tag":"2.14.2"}}} + + config: |- + policy: enabled + alwaysInjectSelector: + [] + neverInjectSelector: + [] + template: |- + rewriteAppHTTPProbe: {{ valueOrDefault .Values.sidecarInjectorWebhook.rewriteAppHTTPProbe false }} + {{- if or (not .Values.istio_cni.enabled) .Values.global.proxy.enableCoreDump }} + initContainers: + {{ if ne (annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode) `NONE` }} + {{- if not .Values.istio_cni.enabled }} + - name: istio-init + {{- if contains "/" .Values.global.proxy_init.image }} + image: "{{ .Values.global.proxy_init.image }}" + {{- else }} + image: "{{ .Values.global.hub }}/{{ .Values.global.proxy_init.image }}:{{ .Values.global.tag }}" + {{- end }} + args: + - "-p" + - "15001" + - "-z" + - "15006" + - "-u" + - 1337 + - "-m" + - "{{ annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode }}" + - "-i" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeOutboundIPRanges` .Values.global.proxy.includeIPRanges }}" + - "-x" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundIPRanges` .Values.global.proxy.excludeIPRanges }}" + - "-b" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` `*` }}" + - "-d" + - "{{ excludeInboundPort (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) (annotation .ObjectMeta `traffic.sidecar.istio.io/excludeInboundPorts` .Values.global.proxy.excludeInboundPorts) }}" + {{ if or (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/excludeOutboundPorts`) (ne .Values.global.proxy.excludeOutboundPorts "") -}} + - "-o" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundPorts` .Values.global.proxy.excludeOutboundPorts }}" + {{ end -}} + {{ if (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces`) -}} + - "-k" + - "{{ index .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces` }}" + {{ end -}} + imagePullPolicy: "{{ .Values.global.imagePullPolicy }}" + {{- if .Values.global.proxy.init.resources }} + resources: + {{ toYaml .Values.global.proxy.init.resources | indent 4 }} + {{- else }} + resources: {} + {{- end }} + securityContext: + runAsUser: 0 + runAsNonRoot: false + capabilities: + add: + - NET_ADMIN + {{- if .Values.global.proxy.privileged }} + privileged: true + {{- end }} + restartPolicy: Always + {{- end }} + {{ end -}} + {{- if eq .Values.global.proxy.enableCoreDump true }} + - name: enable-core-dump + args: + - -c + - sysctl -w kernel.core_pattern=/var/lib/istio/core.proxy && ulimit -c unlimited + command: + - /bin/sh + image: {{ $.Values.global.proxy.enableCoreDumpImage }} + imagePullPolicy: IfNotPresent + resources: {} + securityContext: + runAsUser: 0 + runAsNonRoot: false + privileged: true + {{ end }} + {{- end }} + containers: + - name: istio-proxy + {{- if contains "/" (annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.proxy.image) }} + image: "{{ annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.proxy.image }}" + {{- else }} + image: "{{ annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.hub }}/{{ .Values.global.proxy.image }}:{{ .Values.global.tag }}" + {{- end }} + ports: + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - sidecar + - --domain + - $(POD_NAMESPACE).svc.{{ .Values.global.proxy.clusterDomain }} + - --configPath + - "{{ .ProxyConfig.ConfigPath }}" + - --binaryPath + - "{{ .ProxyConfig.BinaryPath }}" + - --serviceCluster + {{ if ne "" (index .ObjectMeta.Labels "app") -}} + - "{{ index .ObjectMeta.Labels `app` }}.$(POD_NAMESPACE)" + {{ else -}} + - "{{ valueOrDefault .DeploymentMeta.Name `istio-proxy` }}.{{ valueOrDefault .DeploymentMeta.Namespace `default` }}" + {{ end -}} + - --drainDuration + - "{{ formatDuration .ProxyConfig.DrainDuration }}" + - --parentShutdownDuration + - "{{ formatDuration .ProxyConfig.ParentShutdownDuration }}" + - --discoveryAddress + - "{{ annotation .ObjectMeta `sidecar.istio.io/discoveryAddress` .ProxyConfig.DiscoveryAddress }}" + {{- if eq .Values.global.proxy.tracer "lightstep" }} + - --lightstepAddress + - "{{ .ProxyConfig.GetTracing.GetLightstep.GetAddress }}" + - --lightstepAccessToken + - "{{ .ProxyConfig.GetTracing.GetLightstep.GetAccessToken }}" + - --lightstepSecure={{ .ProxyConfig.GetTracing.GetLightstep.GetSecure }} + - --lightstepCacertPath + - "{{ .ProxyConfig.GetTracing.GetLightstep.GetCacertPath }}" + {{- else if eq .Values.global.proxy.tracer "zipkin" }} + - --zipkinAddress + - "{{ .ProxyConfig.GetTracing.GetZipkin.GetAddress }}" + {{- else if eq .Values.global.proxy.tracer "datadog" }} + - --datadogAgentAddress + - "{{ .ProxyConfig.GetTracing.GetDatadog.GetAddress }}" + {{- end }} + {{- if .Values.global.proxy.logLevel }} + - --proxyLogLevel={{ .Values.global.proxy.logLevel }} + {{- end}} + {{- if .Values.global.proxy.componentLogLevel }} + - --proxyComponentLogLevel={{ .Values.global.proxy.componentLogLevel }} + {{- end}} + - --dnsRefreshRate + - {{ .Values.global.proxy.dnsRefreshRate }} + - --connectTimeout + - "{{ formatDuration .ProxyConfig.ConnectTimeout }}" + {{- if .Values.global.proxy.envoyStatsd.enabled }} + - --statsdUdpAddress + - "{{ .ProxyConfig.StatsdUdpAddress }}" + {{- end }} + {{- if .Values.global.proxy.envoyMetricsService.enabled }} + - --envoyMetricsServiceAddress + - "{{ .ProxyConfig.GetEnvoyMetricsService.GetAddress }}" + {{- end }} + {{- if .Values.global.proxy.envoyAccessLogService.enabled }} + - --envoyAccessLogService + - '{{ structToJSON .ProxyConfig.EnvoyAccessLogService }}' + {{- end }} + - --proxyAdminPort + - "{{ .ProxyConfig.ProxyAdminPort }}" + {{ if gt .ProxyConfig.Concurrency 0 -}} + - --concurrency + - "{{ .ProxyConfig.Concurrency }}" + {{ end -}} + - --controlPlaneAuthPolicy + - "{{ annotation .ObjectMeta `sidecar.istio.io/controlPlaneAuthPolicy` .ProxyConfig.ControlPlaneAuthPolicy }}" + {{- if (ne (annotation .ObjectMeta "status.sidecar.istio.io/port" .Values.global.proxy.statusPort) "0") }} + - --statusPort + - "{{ annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort }}" + - --applicationPorts + - "{{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/applicationPorts` (applicationPorts .Spec.Containers) }}" + {{- end }} + {{- if .Values.global.trustDomain }} + - --trust-domain={{ .Values.global.trustDomain }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ISTIO_META_POD_PORTS + value: |- + [ + {{- range $index1, $c := .Spec.Containers }} + {{- range $index2, $p := $c.Ports }} + {{if or (ne $index1 0) (ne $index2 0)}},{{end}}{{ structToJSON $p }} + {{- end}} + {{- end}} + ] + - name: ISTIO_META_CLUSTER_ID + value: "{{ valueOrDefault .Values.global.multicluster.clusterName `Kubernetes` }}" + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + {{- if eq .Values.global.proxy.tracer "datadog" }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if isset .ObjectMeta.Annotations `apm.datadoghq.com/env` }} + {{- range $key, $value := fromJSON (index .ObjectMeta.Annotations `apm.datadoghq.com/env`) }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- end }} + {{- end }} + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: {{ $.Values.global.sds.enabled }} + - name: ISTIO_META_INTERCEPTION_MODE + value: "{{ or (index .ObjectMeta.Annotations `sidecar.istio.io/interceptionMode`) .ProxyConfig.InterceptionMode.String }}" + - name: ISTIO_META_INCLUDE_INBOUND_PORTS + value: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` (applicationPorts .Spec.Containers) }}" + {{- if .Values.global.network }} + - name: ISTIO_META_NETWORK + value: "{{ .Values.global.network }}" + {{- end }} + {{ if .ObjectMeta.Annotations }} + - name: ISTIO_METAJSON_ANNOTATIONS + value: | + {{ toJSON .ObjectMeta.Annotations }} + {{ end }} + {{ if .ObjectMeta.Labels }} + - name: ISTIO_METAJSON_LABELS + value: | + {{ toJSON .ObjectMeta.Labels }} + {{ end }} + {{- if .DeploymentMeta.Name }} + - name: ISTIO_META_WORKLOAD_NAME + value: {{ .DeploymentMeta.Name }} + {{ end }} + {{- if and .TypeMeta.APIVersion .DeploymentMeta.Name }} + - name: ISTIO_META_OWNER + value: kubernetes://api/{{ .TypeMeta.APIVersion }}/namespaces/{{ valueOrDefault .DeploymentMeta.Namespace `default` }}/{{ toLower .TypeMeta.Kind}}s/{{ .DeploymentMeta.Name }} + {{- end}} + {{- if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }} + - name: ISTIO_BOOTSTRAP_OVERRIDE + value: "/etc/istio/custom-bootstrap/custom_bootstrap.json" + {{- end }} + {{- if .Values.global.sds.customTokenDirectory }} + - name: ISTIO_META_SDS_TOKEN_PATH + value: "{{ .Values.global.sds.customTokenDirectory -}}/sdstoken" + {{- end }} + {{- if .Values.global.meshID }} + - name: ISTIO_META_MESH_ID + value: "{{ .Values.global.meshID }}" + {{- else if .Values.global.trustDomain }} + - name: ISTIO_META_MESH_ID + value: "{{ .Values.global.trustDomain }}" + {{- end }} + # PATCH #2: Increase termination drain duration. + - name: TERMINATION_DRAIN_DURATION_SECONDS + value: "20" + # PATCH #2 ends. + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + {{ if ne (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) `0` }} + readinessProbe: + httpGet: + path: /healthz/ready + port: {{ annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort }} + initialDelaySeconds: {{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/initialDelaySeconds` .Values.global.proxy.readinessInitialDelaySeconds }} + periodSeconds: {{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/periodSeconds` .Values.global.proxy.readinessPeriodSeconds }} + failureThreshold: {{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/failureThreshold` .Values.global.proxy.readinessFailureThreshold }} + {{ end -}} + securityContext: + {{- if .Values.global.proxy.privileged }} + privileged: true + {{- end }} + {{- if ne .Values.global.proxy.enableCoreDump true }} + readOnlyRootFilesystem: true + {{- end }} + {{ if eq (annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode) `TPROXY` -}} + capabilities: + add: + - NET_ADMIN + runAsGroup: 1337 + {{ else -}} + {{ if .Values.global.sds.enabled }} + runAsGroup: 1337 + {{- end }} + runAsUser: 1337 + {{- end }} + resources: + {{ if or (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyCPU`) (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyMemory`) -}} + requests: + {{ if (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyCPU`) -}} + cpu: "{{ index .ObjectMeta.Annotations `sidecar.istio.io/proxyCPU` }}" + {{ end}} + {{ if (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyMemory`) -}} + memory: "{{ index .ObjectMeta.Annotations `sidecar.istio.io/proxyMemory` }}" + {{ end }} + {{ else -}} + {{- if .Values.global.proxy.resources }} + {{ toYaml .Values.global.proxy.resources | indent 4 }} + {{- end }} + {{ end -}} + volumeMounts: + {{ if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }} + - mountPath: /etc/istio/custom-bootstrap + name: custom-bootstrap-volume + {{- end }} + - mountPath: /etc/istio/proxy + name: istio-envoy + {{- if .Values.global.sds.enabled }} + - mountPath: /var/run/sds + name: sds-uds-path + readOnly: true + - mountPath: /var/run/secrets/tokens + name: istio-token + {{- if .Values.global.sds.customTokenDirectory }} + - mountPath: "{{ .Values.global.sds.customTokenDirectory -}}" + name: custom-sds-token + readOnly: true + {{- end }} + {{- else }} + - mountPath: /etc/certs/ + name: istio-certs + readOnly: true + {{- end }} + {{- if and (eq .Values.global.proxy.tracer "lightstep") .Values.global.tracer.lightstep.cacertPath }} + - mountPath: {{ directory .ProxyConfig.GetTracing.GetLightstep.GetCacertPath }} + name: lightstep-certs + readOnly: true + {{- end }} + {{- if isset .ObjectMeta.Annotations `sidecar.istio.io/userVolumeMount` }} + {{ range $index, $value := fromJSON (index .ObjectMeta.Annotations `sidecar.istio.io/userVolumeMount`) }} + - name: "{{ $index }}" + {{ toYaml $value | indent 4 }} + {{ end }} + {{- end }} + volumes: + {{- if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }} + - name: custom-bootstrap-volume + configMap: + name: {{ annotation .ObjectMeta `sidecar.istio.io/bootstrapOverride` "" }} + {{- end }} + - emptyDir: + medium: Memory + name: istio-envoy + {{- if .Values.global.sds.enabled }} + - name: sds-uds-path + hostPath: + path: /var/run/sds + - name: istio-token + projected: + sources: + - serviceAccountToken: + path: istio-token + expirationSeconds: 43200 + audience: {{ .Values.global.sds.token.aud }} + {{- if .Values.global.sds.customTokenDirectory }} + - name: custom-sds-token + secret: + secretName: sdstokensecret + {{- end }} + {{- else }} + - name: istio-certs + secret: + optional: true + {{ if eq .Spec.ServiceAccountName "" }} + secretName: istio.default + {{ else -}} + secretName: {{ printf "istio.%s" .Spec.ServiceAccountName }} + {{ end -}} + {{- if isset .ObjectMeta.Annotations `sidecar.istio.io/userVolume` }} + {{range $index, $value := fromJSON (index .ObjectMeta.Annotations `sidecar.istio.io/userVolume`) }} + - name: "{{ $index }}" + {{ toYaml $value | indent 2 }} + {{ end }} + {{ end }} + {{- end }} + {{- if and (eq .Values.global.proxy.tracer "lightstep") .Values.global.tracer.lightstep.cacertPath }} + - name: lightstep-certs + secret: + optional: true + secretName: lightstep.cacert + {{- end }} + {{- if .Values.global.podDNSSearchNamespaces }} + dnsConfig: + searches: + {{- range .Values.global.podDNSSearchNamespaces }} + - {{ render . }} + {{- end }} + {{- end }} + podRedirectAnnot: + sidecar.istio.io/interceptionMode: "{{ annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode }}" + traffic.sidecar.istio.io/includeOutboundIPRanges: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeOutboundIPRanges` .Values.global.proxy.includeIPRanges }}" + traffic.sidecar.istio.io/excludeOutboundIPRanges: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundIPRanges` .Values.global.proxy.excludeIPRanges }}" + traffic.sidecar.istio.io/includeInboundPorts: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` (includeInboundPorts .Spec.Containers) }}" + traffic.sidecar.istio.io/excludeInboundPorts: "{{ excludeInboundPort (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) (annotation .ObjectMeta `traffic.sidecar.istio.io/excludeInboundPorts` .Values.global.proxy.excludeInboundPorts) }}" + {{ if or (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/excludeOutboundPorts`) (ne .Values.global.proxy.excludeOutboundPorts "") }} + traffic.sidecar.istio.io/excludeOutboundPorts: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundPorts` .Values.global.proxy.excludeOutboundPorts }}" + {{- end }} + traffic.sidecar.istio.io/kubevirtInterfaces: "{{ index .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces` }}" +--- +# Source: istio/charts/galley/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-galley-service-account + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-ingressgateway-service-account + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/mixer/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-mixer-service-account + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/pilot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-pilot-service-account + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-security-post-install-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-citadel-service-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-sidecar-injector-service-account + namespace: istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/galley/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-galley-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["*"] +- apiGroups: ["config.istio.io"] # istio mixer CRD watcher + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions","apps"] + resources: ["deployments"] + resourceNames: ["istio-galley"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods", "nodes", "services", "endpoints", "namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["deployments/finalizers"] + resourceNames: ["istio-galley"] + verbs: ["update"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/mixer/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-mixer-istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] # istio CRD watcher + resources: ["*"] + verbs: ["create", "get", "list", "watch", "patch"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["configmaps", "endpoints", "pods", "services", "namespaces", "secrets", "replicationcontrollers"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/pilot/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["ingresses", "ingresses/status"] + verbs: ["*"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["endpoints", "pods", "services", "namespaces", "nodes", "secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/security/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "update"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: [""] + resources: ["serviceaccounts", "services", "namespaces"] + verbs: ["get", "watch", "list"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: istio-security-post-install-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["authentication.istio.io"] # needed to create default authn policy + resources: ["*"] + verbs: ["*"] +- apiGroups: ["networking.istio.io"] # needed to create security destination rules + resources: ["*"] + verbs: ["*"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-sidecar-injector-istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "patch"] +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/galley/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-galley-admin-role-binding-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-galley-istio-system +subjects: + - kind: ServiceAccount + name: istio-galley-service-account + namespace: istio-system +--- +# Source: istio/charts/mixer/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-mixer-admin-role-binding-istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-mixer-istio-system +subjects: + - kind: ServiceAccount + name: istio-mixer-service-account + namespace: istio-system +--- +# Source: istio/charts/pilot/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-pilot-istio-system +subjects: + - kind: ServiceAccount + name: istio-pilot-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-citadel-istio-system +subjects: + - kind: ServiceAccount + name: istio-citadel-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: istio-security-post-install-role-binding-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-security-post-install-istio-system +subjects: + - kind: ServiceAccount + name: istio-security-post-install-account + namespace: istio-system +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-sidecar-injector-admin-role-binding-istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-sidecar-injector-istio-system +subjects: + - kind: ServiceAccount + name: istio-sidecar-injector-service-account + namespace: istio-system +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.3.6 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: istio/charts/gateways/templates/rolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account +--- +# Source: istio/charts/galley/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + ports: + - port: 443 + name: https-validation + - port: 15014 + name: http-monitoring + - port: 9901 + name: grpc-mcp + selector: + istio: galley +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + type: LoadBalancer + selector: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/mixer/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-telemetry + namespace: istio-system + annotations: + networking.istio.io/exportTo: "*" + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + ports: + - name: grpc-mixer + port: 9091 + - name: grpc-mixer-mtls + port: 15004 + - name: http-monitoring + port: 15014 + - name: prometheus + port: 42422 + selector: + istio: mixer + istio-mixer-type: telemetry +--- +# Source: istio/charts/mixer/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-policy + namespace: istio-system + annotations: + networking.istio.io/exportTo: "*" + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + ports: + - name: grpc-mixer + port: 9091 + - name: grpc-mixer-mtls + port: 15004 + - name: http-monitoring + port: 15014 + selector: + istio: mixer + istio-mixer-type: policy +--- +# Source: istio/charts/pilot/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + ports: + - port: 15010 + name: grpc-xds # direct + - port: 15011 + name: https-xds # mTLS + - port: 8080 + name: http-legacy-discovery # direct + - port: 15014 + name: http-monitoring + selector: + istio: pilot +--- +# Source: istio/charts/security/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + # we use the normal name here (e.g. 'prometheus') + # as grafana is configured to use this as a data source + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + ports: + - name: grpc-citadel + port: 8060 + targetPort: 8060 + protocol: TCP + - name: http-monitoring + port: 15014 + selector: + istio: citadel +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +spec: + ports: + - port: 443 + name: https-inject + - port: 15014 + name: http-monitoring + selector: + istio: sidecar-injector +--- +# Source: istio/charts/galley/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + replicas: 1 + selector: + matchLabels: + istio: galley + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-galley-service-account + containers: + - name: galley + image: "docker.io/istio/galley:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 443 + - containerPort: 15014 + - containerPort: 9901 + command: + - /usr/local/bin/galley + - server + - --meshConfigFile=/etc/mesh-config/mesh + - --livenessProbeInterval=1s + - --livenessProbePath=/healthliveness + - --readinessProbePath=/healthready + - --readinessProbeInterval=1s + - --deployment-namespace=istio-system + - --insecure=true + - --validation-webhook-config-file + - /etc/config/validatingwebhookconfiguration.yaml + - --monitoringPort=15014 + - --log_output_level=default:info + volumeMounts: + - name: certs + mountPath: /etc/certs + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + - name: mesh-config + mountPath: /etc/mesh-config + readOnly: true + livenessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthliveness + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthready + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + requests: + cpu: 10m + volumes: + - name: certs + secret: + secretName: istio.istio-galley-service-account + - name: config + configMap: + name: istio-galley-configuration + - name: mesh-config + configMap: + name: istio + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: ingress-sds + image: "docker.io/istio/node-agent-k8s:1.3.6" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: "ENABLE_WORKLOAD_SDS" + value: "false" + - name: "ENABLE_INGRESS_GATEWAY_SDS" + value: "true" + - name: "INGRESS_GATEWAY_NAMESPACE" + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - istio-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 3000m + memory: 2048Mi + requests: + cpu: 3000m + memory: 2048Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_USER_SDS + value: "true" + - name: ISTIO_META_ROUTER_MODE + value: sni-dnat + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: ingressgatewaysdsudspath + emptyDir: {} + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + replicas: 2 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 256Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/mixer/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: istio-mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: mixer + istio-mixer-type: telemetry + template: + metadata: + labels: + app: telemetry + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer + istio-mixer-type: telemetry + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-mixer-service-account + volumes: + - name: istio-certs + secret: + secretName: istio.istio-mixer-service-account + optional: true + - name: uds-socket + emptyDir: {} + - name: telemetry-adapter-secret + secret: + secretName: telemetry-adapter-secret + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" + containers: + - name: mixer + image: "docker.io/istio/mixer:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15014 + - containerPort: 42422 + args: + - --monitoringPort=15014 + - --address + - unix:///sock/mixer.socket + - --log_output_level=default:info + - --configStoreURL=mcp://istio-galley.istio-system.svc:9901 + - --configDefaultNamespace=istio-system + - --useAdapterCRDs=false + - --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans + - --averageLatencyThreshold + - 100ms + - --loadsheddingMode + - enforce + env: + - name: GODEBUG + value: "gctrace=1" + - name: GOMAXPROCS + value: "6" + resources: + limits: + cpu: 4800m + memory: 4G + requests: + cpu: 1000m + memory: 1G + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: telemetry-adapter-secret + mountPath: /var/run/secrets/istio.io/telemetry/adapter + readOnly: true + - name: uds-socket + mountPath: /sock + livenessProbe: + httpGet: + path: /version + port: 15014 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9091 + - containerPort: 15004 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --serviceCluster + - istio-telemetry + - --templateFile + - /etc/istio/proxy/envoy_telemetry.yaml.tmpl + - --controlPlaneAuthPolicy + - NONE + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SDS_ENABLED + value: "false" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: uds-socket + mountPath: /sock +--- +# Source: istio/charts/mixer/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-policy + namespace: istio-system + labels: + app: istio-mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: mixer + istio-mixer-type: policy + template: + metadata: + labels: + app: policy + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer + istio-mixer-type: policy + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-mixer-service-account + volumes: + - name: istio-certs + secret: + secretName: istio.istio-mixer-service-account + optional: true + - name: uds-socket + emptyDir: {} + - name: policy-adapter-secret + secret: + secretName: policy-adapter-secret + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" + containers: + - name: mixer + image: "docker.io/istio/mixer:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15014 + - containerPort: 42422 + args: + - --monitoringPort=15014 + - --address + - unix:///sock/mixer.socket + - --log_output_level=default:info + - --configStoreURL=mcp://istio-galley.istio-system.svc:9901 + - --configDefaultNamespace=istio-system + - --useAdapterCRDs=false + - --useTemplateCRDs=false + - --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans + env: + - name: GODEBUG + value: "gctrace=1" + - name: GOMAXPROCS + value: "6" + resources: + requests: + cpu: 10m + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: uds-socket + mountPath: /sock + livenessProbe: + httpGet: + path: /version + port: 15014 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9091 + - containerPort: 15004 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --serviceCluster + - istio-policy + - --templateFile + - /etc/istio/proxy/envoy_policy.yaml.tmpl + - --controlPlaneAuthPolicy + - NONE + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SDS_ENABLED + value: "false" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: uds-socket + mountPath: /sock + - name: policy-adapter-secret + mountPath: /var/run/secrets/istio.io/policy/adapter + readOnly: true +--- +# Source: istio/charts/pilot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-pilot + namespace: istio-system + # TODO: default template doesn't have this, which one is right ? + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + checksum/config-volume: f8da08b6b8c170dde721efd680270b2901e750d4aa186ebb6c22bef5b78a43f9 +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: pilot + template: + metadata: + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-pilot-service-account + containers: + - name: discovery + image: "docker.io/istio/pilot:1.3.6" + imagePullPolicy: IfNotPresent + args: + - "discovery" + - --monitoringAddr=:15014 + - --log_output_level=default:info + - --domain + - cluster.local + - --secureGrpcAddr + - "" + - --keepaliveMaxServerConnectionAge + - "30m" + ports: + - containerPort: 8080 + - containerPort: 15010 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 30 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: GODEBUG + value: "gctrace=1" + - name: PILOT_PUSH_THROTTLE + value: "100" + - name: PILOT_TRACE_SAMPLING + value: "100" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND + value: "true" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND + value: "false" + resources: + requests: + cpu: 3000m + memory: 2048Mi + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15003 + - containerPort: 15005 + - containerPort: 15007 + - containerPort: 15011 + args: + - proxy + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --serviceCluster + - istio-pilot + - --templateFile + - /etc/istio/proxy/envoy_pilot.yaml.tmpl + - --controlPlaneAuthPolicy + - NONE + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SDS_ENABLED + value: "false" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: config-volume + configMap: + name: istio + - name: istio-certs + secret: + secretName: istio.istio-pilot-service-account + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/security/templates/deployment.yaml +# istio CA watching all namespaces +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + replicas: 1 + selector: + matchLabels: + istio: citadel + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-citadel-service-account + containers: + - name: citadel + image: "docker.io/istio/citadel:1.3.6" + imagePullPolicy: IfNotPresent + args: + - --append-dns-names=true + - --grpc-port=8060 + - --citadel-storage-namespace=istio-system + - --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system + - --monitoring-port=15014 + - --self-signed-ca=true + - --workload-cert-ttl=2160h + env: + - name: CITADEL_ENABLE_NAMESPACES_BY_DEFAULT + value: "true" + resources: + requests: + cpu: 10m + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +spec: + replicas: 1 + selector: + matchLabels: + istio: sidecar-injector + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-sidecar-injector-service-account + containers: + - name: sidecar-injector-webhook + image: "docker.io/istio/sidecar_injector:1.3.6" + imagePullPolicy: IfNotPresent + args: + - --caCertFile=/etc/istio/certs/root-cert.pem + - --tlsCertFile=/etc/istio/certs/cert-chain.pem + - --tlsKeyFile=/etc/istio/certs/key.pem + - --injectConfig=/etc/istio/inject/config + - --meshConfig=/etc/istio/config/mesh + - --healthCheckInterval=2s + - --healthCheckFile=/health + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + readOnly: true + - name: certs + mountPath: /etc/istio/certs + readOnly: true + - name: inject-config + mountPath: /etc/istio/inject + readOnly: true + livenessProbe: + exec: + command: + - /usr/local/bin/sidecar-injector + - probe + - --probe-path=/health + - --interval=4s + initialDelaySeconds: 4 + periodSeconds: 4 + readinessProbe: + exec: + command: + - /usr/local/bin/sidecar-injector + - probe + - --probe-path=/health + - --interval=4s + initialDelaySeconds: 4 + periodSeconds: 4 + resources: + requests: + cpu: 10m + volumes: + - name: config-volume + configMap: + name: istio + - name: certs + secret: + secretName: istio.istio-sidecar-injector-service-account + - name: inject-config + configMap: + name: istio-sidecar-injector + items: + - key: config + path: config + - key: values + path: values + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + maxReplicas: 4 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/mixer/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-policy + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-policy + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/mixer/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-telemetry + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/pilot/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-pilot + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/mixer/templates/config.yaml +--- +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + host: istio-telemetry.istio-system.svc.cluster.local + trafficPolicy: + connectionPool: + http: + http2MaxRequests: 10000 + maxRequestsPerConnection: 10000 +--- +# Source: istio/charts/mixer/templates/config.yaml +# Configuration needed by Mixer. +# Mixer cluster is delivered via CDS +# Specify mixer cluster settings +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-policy + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + host: istio-policy.istio-system.svc.cluster.local + trafficPolicy: + connectionPool: + http: + http2MaxRequests: 10000 + maxRequestsPerConnection: 10000 +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/mutatingwebhook.yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: istio-sidecar-injector + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME +webhooks: + - name: sidecar-injector.istio.io + clientConfig: + service: + name: istio-sidecar-injector + namespace: istio-system + path: "/inject" + caBundle: "" + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + failurePolicy: Fail + namespaceSelector: + matchLabels: + istio-injection: enabled +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: attributemanifest +metadata: + name: istioproxy + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + attributes: + origin.ip: + valueType: IP_ADDRESS + origin.uid: + valueType: STRING + origin.user: + valueType: STRING + request.headers: + valueType: STRING_MAP + request.id: + valueType: STRING + request.host: + valueType: STRING + request.method: + valueType: STRING + request.path: + valueType: STRING + request.url_path: + valueType: STRING + request.query_params: + valueType: STRING_MAP + request.reason: + valueType: STRING + request.referer: + valueType: STRING + request.scheme: + valueType: STRING + request.total_size: + valueType: INT64 + request.size: + valueType: INT64 + request.time: + valueType: TIMESTAMP + request.useragent: + valueType: STRING + response.code: + valueType: INT64 + response.duration: + valueType: DURATION + response.headers: + valueType: STRING_MAP + response.total_size: + valueType: INT64 + response.size: + valueType: INT64 + response.time: + valueType: TIMESTAMP + response.grpc_status: + valueType: STRING + response.grpc_message: + valueType: STRING + source.uid: + valueType: STRING + source.user: # DEPRECATED + valueType: STRING + source.principal: + valueType: STRING + destination.uid: + valueType: STRING + destination.principal: + valueType: STRING + destination.port: + valueType: INT64 + connection.event: + valueType: STRING + connection.id: + valueType: STRING + connection.received.bytes: + valueType: INT64 + connection.received.bytes_total: + valueType: INT64 + connection.sent.bytes: + valueType: INT64 + connection.sent.bytes_total: + valueType: INT64 + connection.duration: + valueType: DURATION + connection.mtls: + valueType: BOOL + connection.requested_server_name: + valueType: STRING + context.protocol: + valueType: STRING + context.proxy_error_code: + valueType: STRING + context.timestamp: + valueType: TIMESTAMP + context.time: + valueType: TIMESTAMP + # Deprecated, kept for compatibility + context.reporter.local: + valueType: BOOL + context.reporter.kind: + valueType: STRING + context.reporter.uid: + valueType: STRING + api.service: + valueType: STRING + api.version: + valueType: STRING + api.operation: + valueType: STRING + api.protocol: + valueType: STRING + request.auth.principal: + valueType: STRING + request.auth.audiences: + valueType: STRING + request.auth.presenter: + valueType: STRING + request.auth.claims: + valueType: STRING_MAP + request.auth.raw_claims: + valueType: STRING + request.api_key: + valueType: STRING + rbac.permissive.response_code: + valueType: STRING + rbac.permissive.effective_policy_id: + valueType: STRING + check.error_code: + valueType: INT64 + check.error_message: + valueType: STRING + check.cache_hit: + valueType: BOOL + quota.cache_hit: + valueType: BOOL + context.proxy_version: + valueType: STRING +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: attributemanifest +metadata: + name: kubernetes + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + attributes: + source.ip: + valueType: IP_ADDRESS + source.labels: + valueType: STRING_MAP + source.metadata: + valueType: STRING_MAP + source.name: + valueType: STRING + source.namespace: + valueType: STRING + source.owner: + valueType: STRING + source.serviceAccount: + valueType: STRING + source.services: + valueType: STRING + source.workload.uid: + valueType: STRING + source.workload.name: + valueType: STRING + source.workload.namespace: + valueType: STRING + destination.ip: + valueType: IP_ADDRESS + destination.labels: + valueType: STRING_MAP + destination.metadata: + valueType: STRING_MAP + destination.owner: + valueType: STRING + destination.name: + valueType: STRING + destination.container.name: + valueType: STRING + destination.namespace: + valueType: STRING + destination.service.uid: + valueType: STRING + destination.service.name: + valueType: STRING + destination.service.namespace: + valueType: STRING + destination.service.host: + valueType: STRING + destination.serviceAccount: + valueType: STRING + destination.workload.uid: + valueType: STRING + destination.workload.name: + valueType: STRING + destination.workload.namespace: + valueType: STRING +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: handler +metadata: + name: kubernetesenv + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + compiledAdapter: kubernetesenv + params: + # when running from mixer root, use the following config after adding a + # symbolic link to a kubernetes config file via: + # + # $ ln -s ~/.kube/config mixer/adapter/kubernetes/kubeconfig + # + # kubeconfig_path: "mixer/adapter/kubernetes/kubeconfig" +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: instance +metadata: + name: attributes + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + compiledTemplate: kubernetes + params: + # Pass the required attribute data to the adapter + source_uid: source.uid | "" + source_ip: source.ip | ip("0.0.0.0") # default to unspecified ip addr + destination_uid: destination.uid | "" + destination_port: destination.port | 0 + attributeBindings: + # Fill the new attributes from the adapter produced output. + # $out refers to an instance of OutputTemplate message + source.ip: $out.source_pod_ip | ip("0.0.0.0") + source.uid: $out.source_pod_uid | "unknown" + source.labels: $out.source_labels | emptyStringMap() + source.name: $out.source_pod_name | "unknown" + source.namespace: $out.source_namespace | "default" + source.owner: $out.source_owner | "unknown" + source.serviceAccount: $out.source_service_account_name | "unknown" + source.workload.uid: $out.source_workload_uid | "unknown" + source.workload.name: $out.source_workload_name | "unknown" + source.workload.namespace: $out.source_workload_namespace | "unknown" + destination.ip: $out.destination_pod_ip | ip("0.0.0.0") + destination.uid: $out.destination_pod_uid | "unknown" + destination.labels: $out.destination_labels | emptyStringMap() + destination.name: $out.destination_pod_name | "unknown" + destination.container.name: $out.destination_container_name | "unknown" + destination.namespace: $out.destination_namespace | "default" + destination.owner: $out.destination_owner | "unknown" + destination.serviceAccount: $out.destination_service_account_name | "unknown" + destination.workload.uid: $out.destination_workload_uid | "unknown" + destination.workload.name: $out.destination_workload_name | "unknown" + destination.workload.namespace: $out.destination_workload_namespace | "unknown" +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: rule +metadata: + name: tcpkubeattrgenrulerule + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + match: context.protocol == "tcp" + actions: + - handler: kubernetesenv + instances: + - attributes +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: rule +metadata: + name: kubeattrgenrulerule + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + actions: + - handler: kubernetesenv + instances: + - attributes +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: istio-security-post-install-1.3.6 + namespace: istio-system + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +spec: + template: + metadata: + name: istio-security-post-install + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + serviceAccountName: istio-security-post-install-account + containers: + - name: kubectl + image: "docker.io/istio/kubectl:1.3.6" + imagePullPolicy: IfNotPresent + command: [ "/bin/bash", "/tmp/security/run.sh", "/tmp/security/custom-resources.yaml" ] + volumeMounts: + - mountPath: "/tmp/security" + name: tmp-configmap-security + volumes: + - name: tmp-configmap-security + configMap: + name: istio-security-custom-resources + restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-no-mesh.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-no-mesh.yaml new file mode 100644 index 0000000000..465134dba8 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-ci-no-mesh.yaml @@ -0,0 +1,1680 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio/charts/galley/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-galley-configuration + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +data: + validatingwebhookconfiguration.yaml: |- + apiVersion: admissionregistration.k8s.io/v1beta1 + kind: ValidatingWebhookConfiguration + metadata: + name: istio-galley + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + webhooks: + - name: pilot.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitpilot" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - httpapispecs + - httpapispecbindings + - quotaspecs + - quotaspecbindings + - operations: + - CREATE + - UPDATE + apiGroups: + - rbac.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - authentication.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - networking.istio.io + apiVersions: + - "*" + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + failurePolicy: Fail + sideEffects: None + - name: mixer.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitmixer" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - rules + - attributemanifests + - circonuses + - deniers + - fluentds + - kubernetesenvs + - listcheckers + - memquotas + - noops + - opas + - prometheuses + - rbacs + - solarwindses + - stackdrivers + - cloudwatches + - dogstatsds + - statsds + - stdios + - apikeys + - authorizations + - checknothings + # - kuberneteses + - listentries + - logentries + - metrics + - quotas + - reportnothings + - tracespans + - adapters + - handlers + - instances + - templates + - zipkins + failurePolicy: Fail + sideEffects: None +--- +# Source: istio/charts/security/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-security-custom-resources + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +data: + custom-resources.yaml: |- + # Authentication policy to enable permissive mode for all services (that have sidecar) in the mesh. + apiVersion: "authentication.istio.io/v1alpha1" + kind: "MeshPolicy" + metadata: + name: "default" + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + peers: + - mtls: + mode: PERMISSIVE + run.sh: |- + #!/bin/sh + + set -x + + if [ "$#" -ne "1" ]; then + echo "first argument should be path to custom resource yaml" + exit 1 + fi + + pathToResourceYAML=${1} + + kubectl get validatingwebhookconfiguration istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + echo "istio-galley validatingwebhookconfiguration found - waiting for istio-galley deployment to be ready" + while true; do + kubectl -n istio-system get deployment istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + break + fi + sleep 1 + done + kubectl -n istio-system rollout status deployment istio-galley + if [ "$?" -ne 0 ]; then + echo "istio-galley deployment rollout status check failed" + exit 1 + fi + echo "istio-galley deployment ready for configuration validation" + fi + sleep 5 + kubectl apply -f ${pathToResourceYAML} +--- +# Source: istio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME +data: + mesh: |- + # Set the following variable to true to disable policy checks by the Mixer. + # Note that metrics will still be reported to the Mixer. + disablePolicyChecks: true + # reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server + reportBatchMaxEntries: 100 + # reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server + reportBatchMaxTime: 1s + + # Set enableTracing to false to disable request tracing. + enableTracing: true + + # Set accessLogFile to empty string to disable access log. + accessLogFile: "/dev/stdout" + + # If accessLogEncoding is TEXT, value will be used directly as the log format + # example: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n" + # If AccessLogEncoding is JSON, value will be parsed as map[string]string + # example: '{"start_time": "%START_TIME%", "req_method": "%REQ(:METHOD)%"}' + # Leave empty to use default log format + accessLogFormat: "" + + # Set accessLogEncoding to JSON or TEXT to configure sidecar access log + accessLogEncoding: 'JSON' + + enableEnvoyAccessLogService: false + # Let Pilot give ingresses the public IP of the Istio ingressgateway + ingressService: istio-ingressgateway + + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + + # Automatic protocol detection uses a set of heuristics to + # determine whether the connection is using TLS or not (on the + # server side), as well as the application protocol being used + # (e.g., http vs tcp). These heuristics rely on the client sending + # the first bits of data. For server first protocols like MySQL, + # MongoDB, etc., Envoy will timeout on the protocol detection after + # the specified period, defaulting to non mTLS plain TCP + # traffic. Set this field to tweak the period that Envoy will wait + # for the client to send the first bits of data. (MUST BE >=1ms) + protocolDetectionTimeout: 100ms + + # DNS refresh rate for Envoy clusters of type STRICT_DNS + dnsRefreshRate: 300s + + # Unix Domain Socket through which envoy communicates with NodeAgent SDS to get + # key/cert for mTLS. Use secret-mount files instead of SDS if set to empty. + sdsUdsPath: "" + + # The trust domain corresponds to the trust root of a system. + # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain + trustDomain: "" + + # Set the default behavior of the sidecar for handling outbound traffic from the application: + # ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no + # services or ServiceEntries for the destination port + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio + # configuration. + rootNamespace: istio-system + + defaultConfig: + # + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file + connectTimeout: 10s + # + ### ADVANCED SETTINGS ############# + # Where should envoy's configuration be stored in the istio-proxy container + configPath: "/etc/istio/proxy" + binaryPath: "/usr/local/bin/envoy" + # The pseudo service name used for Envoy. + serviceCluster: istio-proxy + # These settings that determine how long an old Envoy + # process should be kept alive after an occasional reload. + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # + # Port where Envoy listens (on local host) for admin commands + # You can exec into the istio-proxy container in a pod and + # curl the admin port (curl http://localhost:15000/) to obtain + # diagnostic information from Envoy. See + # https://lyft.github.io/envoy/docs/operations/admin.html + # for more details + proxyAdminPort: 15000 + # + # Set concurrency to a specific number to control the number of Proxy worker threads. + # If set to 0 (default), then start worker thread for each CPU thread/core. + concurrency: 2 + # + tracing: + zipkin: + # Address of the Zipkin collector + address: zipkin.istio-system:9411 + # + # Mutual TLS authentication between sidecars and istio control plane. + controlPlaneAuthPolicy: NONE + # + # Address where istio Pilot service is running + discoveryAddress: istio-pilot.istio-system:15010 + + # Configuration file for the mesh networks to be used by the Split Horizon EDS. + meshNetworks: |- + networks: {} +--- +# Source: istio/charts/galley/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-galley-service-account + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-ingressgateway-service-account + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/pilot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-pilot-service-account + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-security-post-install-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-citadel-service-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/galley/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-galley-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["*"] +- apiGroups: ["config.istio.io"] # istio mixer CRD watcher + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions","apps"] + resources: ["deployments"] + resourceNames: ["istio-galley"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods", "nodes", "services", "endpoints", "namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["deployments/finalizers"] + resourceNames: ["istio-galley"] + verbs: ["update"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/pilot/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["ingresses", "ingresses/status"] + verbs: ["*"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["endpoints", "pods", "services", "namespaces", "nodes", "secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/security/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "update"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: [""] + resources: ["serviceaccounts", "services", "namespaces"] + verbs: ["get", "watch", "list"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: istio-security-post-install-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["authentication.istio.io"] # needed to create default authn policy + resources: ["*"] + verbs: ["*"] +- apiGroups: ["networking.istio.io"] # needed to create security destination rules + resources: ["*"] + verbs: ["*"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/galley/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-galley-admin-role-binding-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-galley-istio-system +subjects: + - kind: ServiceAccount + name: istio-galley-service-account + namespace: istio-system +--- +# Source: istio/charts/pilot/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-pilot-istio-system +subjects: + - kind: ServiceAccount + name: istio-pilot-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-citadel-istio-system +subjects: + - kind: ServiceAccount + name: istio-citadel-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: istio-security-post-install-role-binding-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-security-post-install-istio-system +subjects: + - kind: ServiceAccount + name: istio-security-post-install-account + namespace: istio-system +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.3.6 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: istio/charts/gateways/templates/rolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account +--- +# Source: istio/charts/galley/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + ports: + - port: 443 + name: https-validation + - port: 15014 + name: http-monitoring + - port: 9901 + name: grpc-mcp + selector: + istio: galley +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + type: LoadBalancer + selector: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/pilot/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + ports: + - port: 15010 + name: grpc-xds # direct + - port: 15011 + name: https-xds # mTLS + - port: 8080 + name: http-legacy-discovery # direct + - port: 15014 + name: http-monitoring + selector: + istio: pilot +--- +# Source: istio/charts/security/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + # we use the normal name here (e.g. 'prometheus') + # as grafana is configured to use this as a data source + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + ports: + - name: grpc-citadel + port: 8060 + targetPort: 8060 + protocol: TCP + - name: http-monitoring + port: 15014 + selector: + istio: citadel +--- +# Source: istio/charts/galley/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + replicas: 1 + selector: + matchLabels: + istio: galley + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-galley-service-account + containers: + - name: galley + image: "docker.io/istio/galley:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 443 + - containerPort: 15014 + - containerPort: 9901 + command: + - /usr/local/bin/galley + - server + - --meshConfigFile=/etc/mesh-config/mesh + - --livenessProbeInterval=1s + - --livenessProbePath=/healthliveness + - --readinessProbePath=/healthready + - --readinessProbeInterval=1s + - --deployment-namespace=istio-system + - --insecure=true + - --enable-server=false + - --validation-webhook-config-file + - /etc/config/validatingwebhookconfiguration.yaml + - --monitoringPort=15014 + - --log_output_level=default:info + volumeMounts: + - name: certs + mountPath: /etc/certs + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + - name: mesh-config + mountPath: /etc/mesh-config + readOnly: true + livenessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthliveness + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthready + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + requests: + cpu: 10m + volumes: + - name: certs + secret: + secretName: istio.istio-galley-service-account + - name: config + configMap: + name: istio-galley-configuration + - name: mesh-config + configMap: + name: istio + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + replicas: 2 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 1000m + memory: 1024Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: ingress-sds + image: "docker.io/istio/node-agent-k8s:1.3.6" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: "ENABLE_WORKLOAD_SDS" + value: "false" + - name: "ENABLE_INGRESS_GATEWAY_SDS" + value: "true" + - name: "INGRESS_GATEWAY_NAMESPACE" + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - istio-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_USER_SDS + value: "true" + - name: ISTIO_META_ROUTER_MODE + value: sni-dnat + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: ingressgatewaysdsudspath + emptyDir: {} + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/pilot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-pilot + namespace: istio-system + # TODO: default template doesn't have this, which one is right ? + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + checksum/config-volume: f8da08b6b8c170dde721efd680270b2901e750d4aa186ebb6c22bef5b78a43f9 +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: pilot + template: + metadata: + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-pilot-service-account + containers: + - name: discovery + image: "docker.io/istio/pilot:1.3.6" + imagePullPolicy: IfNotPresent + args: + - "discovery" + - --monitoringAddr=:15014 + - --log_output_level=default:info + - --domain + - cluster.local + - --secureGrpcAddr + - "" + - --keepaliveMaxServerConnectionAge + - "30m" + ports: + - containerPort: 8080 + - containerPort: 15010 + - containerPort: 15011 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 30 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: GODEBUG + value: "gctrace=1" + - name: PILOT_PUSH_THROTTLE + value: "100" + - name: PILOT_TRACE_SAMPLING + value: "100" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND + value: "true" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND + value: "false" + resources: + requests: + cpu: 1000m + memory: 1024Mi + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + - name: istio-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: config-volume + configMap: + name: istio + - name: istio-certs + secret: + secretName: istio.istio-pilot-service-account + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/security/templates/deployment.yaml +# istio CA watching all namespaces +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + replicas: 1 + selector: + matchLabels: + istio: citadel + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-citadel-service-account + containers: + - name: citadel + image: "docker.io/istio/citadel:1.3.6" + imagePullPolicy: IfNotPresent + args: + - --append-dns-names=true + - --grpc-port=8060 + - --citadel-storage-namespace=istio-system + - --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system + - --monitoring-port=15014 + - --self-signed-ca=true + - --workload-cert-ttl=2160h + env: + - name: CITADEL_ENABLE_NAMESPACES_BY_DEFAULT + value: "true" + resources: + requests: + cpu: 10m + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + maxReplicas: 5 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/pilot/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-pilot + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: istio-security-post-install-1.3.6 + namespace: istio-system + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +spec: + template: + metadata: + name: istio-security-post-install + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + serviceAccountName: istio-security-post-install-account + containers: + - name: kubectl + image: "docker.io/istio/kubectl:1.3.6" + imagePullPolicy: IfNotPresent + command: [ "/bin/bash", "/tmp/security/run.sh", "/tmp/security/custom-resources.yaml" ] + volumeMounts: + - mountPath: "/tmp/security" + name: tmp-configmap-security + volumes: + - name: tmp-configmap-security + configMap: + name: istio-security-custom-resources + restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-crds.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-crds.yaml new file mode 100644 index 0000000000..81665ac27c --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-crds.yaml @@ -0,0 +1,846 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio-init/templates/configmap-crd-10.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: istio-system + name: istio-crd-10 +data: + crd-10.yaml: |- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: virtualservices.networking.istio.io + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: networking.istio.io + names: + kind: VirtualService + listKind: VirtualServiceList + plural: virtualservices + singular: virtualservice + shortNames: + - vs + categories: + - istio-io + - networking-istio-io + scope: Namespaced + versions: + - name: v1alpha3 + served: true + storage: true + additionalPrinterColumns: + - JSONPath: .spec.gateways + description: The names of gateways and sidecars that should apply these routes + name: Gateways + type: string + - JSONPath: .spec.hosts + description: The destination hosts to which traffic is being sent + name: Hosts + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: destinationrules.networking.istio.io + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: networking.istio.io + names: + kind: DestinationRule + listKind: DestinationRuleList + plural: destinationrules + singular: destinationrule + shortNames: + - dr + categories: + - istio-io + - networking-istio-io + scope: Namespaced + versions: + - name: v1alpha3 + served: true + storage: true + additionalPrinterColumns: + - JSONPath: .spec.host + description: The name of a service from the service registry + name: Host + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: serviceentries.networking.istio.io + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: networking.istio.io + names: + kind: ServiceEntry + listKind: ServiceEntryList + plural: serviceentries + singular: serviceentry + shortNames: + - se + categories: + - istio-io + - networking-istio-io + scope: Namespaced + versions: + - name: v1alpha3 + served: true + storage: true + additionalPrinterColumns: + - JSONPath: .spec.hosts + description: The hosts associated with the ServiceEntry + name: Hosts + type: string + - JSONPath: .spec.location + description: Whether the service is external to the mesh or part of the mesh (MESH_EXTERNAL or MESH_INTERNAL) + name: Location + type: string + - JSONPath: .spec.resolution + description: Service discovery mode for the hosts (NONE, STATIC, or DNS) + name: Resolution + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: gateways.networking.istio.io + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: networking.istio.io + names: + kind: Gateway + plural: gateways + singular: gateway + shortNames: + - gw + categories: + - istio-io + - networking-istio-io + scope: Namespaced + versions: + - name: v1alpha3 + served: true + storage: true + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: envoyfilters.networking.istio.io + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: networking.istio.io + names: + kind: EnvoyFilter + plural: envoyfilters + singular: envoyfilter + categories: + - istio-io + - networking-istio-io + scope: Namespaced + versions: + - name: v1alpha3 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: clusterrbacconfigs.rbac.istio.io + labels: + app: istio-pilot + istio: rbac + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: rbac.istio.io + names: + kind: ClusterRbacConfig + plural: clusterrbacconfigs + singular: clusterrbacconfig + categories: + - istio-io + - rbac-istio-io + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: policies.authentication.istio.io + labels: + app: istio-citadel + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: authentication.istio.io + names: + kind: Policy + plural: policies + singular: policy + categories: + - istio-io + - authentication-istio-io + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: meshpolicies.authentication.istio.io + labels: + app: istio-citadel + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: authentication.istio.io + names: + kind: MeshPolicy + listKind: MeshPolicyList + plural: meshpolicies + singular: meshpolicy + categories: + - istio-io + - authentication-istio-io + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: httpapispecbindings.config.istio.io + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: HTTPAPISpecBinding + plural: httpapispecbindings + singular: httpapispecbinding + categories: + - istio-io + - apim-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: httpapispecs.config.istio.io + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: HTTPAPISpec + plural: httpapispecs + singular: httpapispec + categories: + - istio-io + - apim-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: quotaspecbindings.config.istio.io + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: QuotaSpecBinding + plural: quotaspecbindings + singular: quotaspecbinding + categories: + - istio-io + - apim-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: quotaspecs.config.istio.io + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: QuotaSpec + plural: quotaspecs + singular: quotaspec + categories: + - istio-io + - apim-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: rules.config.istio.io + labels: + app: mixer + package: istio.io.mixer + istio: core + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: rule + plural: rules + singular: rule + categories: + - istio-io + - policy-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: attributemanifests.config.istio.io + labels: + app: mixer + package: istio.io.mixer + istio: core + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: attributemanifest + plural: attributemanifests + singular: attributemanifest + categories: + - istio-io + - policy-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: rbacconfigs.rbac.istio.io + labels: + app: mixer + package: istio.io.mixer + istio: rbac + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: rbac.istio.io + names: + kind: RbacConfig + plural: rbacconfigs + singular: rbacconfig + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: serviceroles.rbac.istio.io + labels: + app: mixer + package: istio.io.mixer + istio: rbac + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: rbac.istio.io + names: + kind: ServiceRole + plural: serviceroles + singular: servicerole + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: servicerolebindings.rbac.istio.io + labels: + app: mixer + package: istio.io.mixer + istio: rbac + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: rbac.istio.io + names: + kind: ServiceRoleBinding + plural: servicerolebindings + singular: servicerolebinding + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + additionalPrinterColumns: + - JSONPath: .spec.roleRef.name + description: The name of the ServiceRole object being referenced + name: Reference + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: adapters.config.istio.io + labels: + app: mixer + package: adapter + istio: mixer-adapter + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: adapter + plural: adapters + singular: adapter + categories: + - istio-io + - policy-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: instances.config.istio.io + labels: + app: mixer + package: instance + istio: mixer-instance + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: instance + plural: instances + singular: instance + categories: + - istio-io + - policy-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: templates.config.istio.io + labels: + app: mixer + package: template + istio: mixer-template + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: template + plural: templates + singular: template + categories: + - istio-io + - policy-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: handlers.config.istio.io + labels: + app: mixer + package: handler + istio: mixer-handler + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: handler + plural: handlers + singular: handler + categories: + - istio-io + - policy-istio-io + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + --- +--- +# Source: istio-init/templates/configmap-crd-11.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: istio-system + name: istio-crd-11 +data: + crd-11.yaml: |- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + name: sidecars.networking.istio.io + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: networking.istio.io + names: + kind: Sidecar + plural: sidecars + singular: sidecar + categories: + - istio-io + - networking-istio-io + scope: Namespaced + versions: + - name: v1alpha3 + served: true + storage: true + --- +--- +# Source: istio-init/templates/configmap-crd-12.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: istio-system + name: istio-crd-12 +data: + crd-12.yaml: |- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: authorizationpolicies.rbac.istio.io + labels: + app: istio-pilot + istio: rbac + heritage: Tiller + release: istio + spec: + group: rbac.istio.io + names: + kind: AuthorizationPolicy + plural: authorizationpolicies + singular: authorizationpolicy + categories: + - istio-io + - rbac-istio-io + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + --- +--- +# Source: istio-init/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-init-service-account + namespace: istio-system + labels: + app: istio-init + istio: init +--- +# Source: istio-init/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-init-istio-system + labels: + app: istio-init + istio: init +rules: +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "get", "list", "watch", "patch"] +--- +# Source: istio-init/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-init-admin-role-binding-istio-system + labels: + app: istio-init + istio: init +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-init-istio-system +subjects: + - kind: ServiceAccount + name: istio-init-service-account + namespace: istio-system +--- +# Source: istio-init/templates/job-crd-10.yaml +apiVersion: batch/v1 +kind: Job +metadata: + namespace: istio-system + name: istio-init-crd-10-1.3.6 +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-init-service-account + containers: + - name: istio-init-crd-10 + image: "docker.io/istio/kubectl:1.3.6" + imagePullPolicy: IfNotPresent + volumeMounts: + - name: crd-10 + mountPath: /etc/istio/crd-10 + readOnly: true + command: ["kubectl", "apply", "-f", "/etc/istio/crd-10/crd-10.yaml"] + volumes: + - name: crd-10 + configMap: + name: istio-crd-10 + restartPolicy: OnFailure +--- +# Source: istio-init/templates/job-crd-11.yaml +apiVersion: batch/v1 +kind: Job +metadata: + namespace: istio-system + name: istio-init-crd-11-1.3.6 +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-init-service-account + containers: + - name: istio-init-crd-11 + image: "docker.io/istio/kubectl:1.3.6" + imagePullPolicy: IfNotPresent + volumeMounts: + - name: crd-11 + mountPath: /etc/istio/crd-11 + readOnly: true + command: ["kubectl", "apply", "-f", "/etc/istio/crd-11/crd-11.yaml"] + volumes: + - name: crd-11 + configMap: + name: istio-crd-11 + restartPolicy: OnFailure +--- +# Source: istio-init/templates/job-crd-12.yaml +apiVersion: batch/v1 +kind: Job +metadata: + namespace: istio-system + name: istio-init-crd-12-1.3.6 +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-init-service-account + containers: + - name: istio-init-crd-12 + image: "docker.io/istio/kubectl:1.3.6" + imagePullPolicy: IfNotPresent + volumeMounts: + - name: crd-12 + mountPath: /etc/istio/crd-12 + readOnly: true + command: ["kubectl", "apply", "-f", "/etc/istio/crd-12/crd-12.yaml"] + volumes: + - name: crd-12 + configMap: + name: istio-crd-12 + restartPolicy: OnFailure diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-knative-extras.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-knative-extras.yaml new file mode 100644 index 0000000000..c613109ac3 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-knative-extras.yaml @@ -0,0 +1,262 @@ +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.3.6 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 10m + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-minimal.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-minimal.yaml new file mode 100644 index 0000000000..8bc30de3ec --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/istio-minimal.yaml @@ -0,0 +1,920 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME +data: + mesh: |- + # Set the following variable to true to disable policy checks by the Mixer. + # Note that metrics will still be reported to the Mixer. + disablePolicyChecks: true + # reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server + reportBatchMaxEntries: 100 + # reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server + reportBatchMaxTime: 1s + + # Set enableTracing to false to disable request tracing. + enableTracing: true + + # Set accessLogFile to empty string to disable access log. + accessLogFile: "/dev/stdout" + + # If accessLogEncoding is TEXT, value will be used directly as the log format + # example: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n" + # If AccessLogEncoding is JSON, value will be parsed as map[string]string + # example: '{"start_time": "%START_TIME%", "req_method": "%REQ(:METHOD)%"}' + # Leave empty to use default log format + accessLogFormat: "" + + # Set accessLogEncoding to JSON or TEXT to configure sidecar access log + accessLogEncoding: 'JSON' + + enableEnvoyAccessLogService: false + # Let Pilot give ingresses the public IP of the Istio ingressgateway + ingressService: istio-ingressgateway + + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + + # Automatic protocol detection uses a set of heuristics to + # determine whether the connection is using TLS or not (on the + # server side), as well as the application protocol being used + # (e.g., http vs tcp). These heuristics rely on the client sending + # the first bits of data. For server first protocols like MySQL, + # MongoDB, etc., Envoy will timeout on the protocol detection after + # the specified period, defaulting to non mTLS plain TCP + # traffic. Set this field to tweak the period that Envoy will wait + # for the client to send the first bits of data. (MUST BE >=1ms) + protocolDetectionTimeout: 100ms + + # DNS refresh rate for Envoy clusters of type STRICT_DNS + dnsRefreshRate: 300s + + # Unix Domain Socket through which envoy communicates with NodeAgent SDS to get + # key/cert for mTLS. Use secret-mount files instead of SDS if set to empty. + sdsUdsPath: "" + + # The trust domain corresponds to the trust root of a system. + # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain + trustDomain: "" + + # Set the default behavior of the sidecar for handling outbound traffic from the application: + # ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no + # services or ServiceEntries for the destination port + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio + # configuration. + rootNamespace: istio-system + + defaultConfig: + # + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file + connectTimeout: 10s + # + ### ADVANCED SETTINGS ############# + # Where should envoy's configuration be stored in the istio-proxy container + configPath: "/etc/istio/proxy" + binaryPath: "/usr/local/bin/envoy" + # The pseudo service name used for Envoy. + serviceCluster: istio-proxy + # These settings that determine how long an old Envoy + # process should be kept alive after an occasional reload. + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # + # Port where Envoy listens (on local host) for admin commands + # You can exec into the istio-proxy container in a pod and + # curl the admin port (curl http://localhost:15000/) to obtain + # diagnostic information from Envoy. See + # https://lyft.github.io/envoy/docs/operations/admin.html + # for more details + proxyAdminPort: 15000 + # + # Set concurrency to a specific number to control the number of Proxy worker threads. + # If set to 0 (default), then start worker thread for each CPU thread/core. + concurrency: 2 + # + tracing: + zipkin: + # Address of the Zipkin collector + address: zipkin.istio-system:9411 + # + # Mutual TLS authentication between sidecars and istio control plane. + controlPlaneAuthPolicy: NONE + # + # Address where istio Pilot service is running + discoveryAddress: istio-pilot.istio-system:15010 + + # Configuration file for the mesh networks to be used by the Split Horizon EDS. + meshNetworks: |- + networks: {} +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-ingressgateway-service-account + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/pilot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-pilot-service-account + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/pilot/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["ingresses", "ingresses/status"] + verbs: ["*"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["endpoints", "pods", "services", "namespaces", "nodes", "secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/pilot/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-pilot-istio-system +subjects: + - kind: ServiceAccount + name: istio-pilot-service-account + namespace: istio-system +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.3.6 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: istio/charts/gateways/templates/rolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + type: LoadBalancer + selector: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/pilot/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + ports: + - port: 15010 + name: grpc-xds # direct + - port: 15011 + name: https-xds # mTLS + - port: 8080 + name: http-legacy-discovery # direct + - port: 15014 + name: http-monitoring + selector: + istio: pilot +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 128Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + replicas: 1 + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: ingress-sds + image: "docker.io/istio/node-agent-k8s:1.3.6" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: "ENABLE_WORKLOAD_SDS" + value: "false" + - name: "ENABLE_INGRESS_GATEWAY_SDS" + value: "true" + - name: "INGRESS_GATEWAY_NAMESPACE" + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.3.6" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - istio-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_USER_SDS + value: "true" + - name: ISTIO_META_ROUTER_MODE + value: sni-dnat + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: ingressgatewaysdsudspath + emptyDir: {} + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/pilot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-pilot + namespace: istio-system + # TODO: default template doesn't have this, which one is right ? + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + checksum/config-volume: f8da08b6b8c170dde721efd680270b2901e750d4aa186ebb6c22bef5b78a43f9 +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: pilot + template: + metadata: + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-pilot-service-account + containers: + - name: discovery + image: "docker.io/istio/pilot:1.3.6" + imagePullPolicy: IfNotPresent + args: + - "discovery" + - --monitoringAddr=:15014 + - --log_output_level=default:info + - --domain + - cluster.local + - --secureGrpcAddr + - "" + - --keepaliveMaxServerConnectionAge + - "30m" + ports: + - containerPort: 8080 + - containerPort: 15010 + - containerPort: 15011 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 30 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: GODEBUG + value: "gctrace=1" + - name: PILOT_PUSH_THROTTLE + value: "100" + - name: PILOT_TRACE_SAMPLING + value: "100" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND + value: "true" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND + value: "false" + resources: + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + - name: istio-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: config-volume + configMap: + name: istio + - name: istio-certs + secret: + secretName: istio.istio-pilot-service-account + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/namespace.yaml.patch b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/namespace.yaml.patch new file mode 100644 index 0000000000..3e15f44aa1 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/namespace.yaml.patch @@ -0,0 +1,10 @@ +1a2,10 +> # PATCH #1: Creating the istio-system namespace. +> apiVersion: v1 +> kind: Namespace +> metadata: +> name: istio-system +> labels: +> istio-injection: disabled +> # PATCH #1 ends. +> --- diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-extras.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-extras.yaml new file mode 100644 index 0000000000..d6f2d68ede --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-extras.yaml @@ -0,0 +1,86 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + autoInject: disabled + disablePolicyChecks: true + omitSidecarInjectorConfigMap: true + defaultPodDisruptionBudget: + enabled: false + useMCP: false + +sidecarInjectorWebhook: + enabled: false + enableNamespacesByDefault: false + +gateways: + enabled: true + + istio-ingressgateway: + enabled: false + istio-egressgateway: + enabled: false + istio-ilbgateway: + enabled: false + + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + replicaCount: 1 + autoscaleMin: 1 + autoscaleMax: 1 + resources: {} + cpu: + targetAverageUtilization: 80 + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +security: + enabled: false + +galley: + enabled: false + +mixer: + policy: + enabled: false + telemetry: + enabled: false + +pilot: + enabled: false + +grafana: + enabled: false + +prometheus: + enabled: false + +tracing: + enabled: false + +kiali: + enabled: false + +certmanager: + enabled: false diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-lean.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-lean.yaml new file mode 100644 index 0000000000..726abf12fe --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-lean.yaml @@ -0,0 +1,96 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + accessLogEncoding: 'JSON' + autoInject: disabled + disablePolicyChecks: true + omitSidecarInjectorConfigMap: true + defaultPodDisruptionBudget: + enabled: false + useMCP: false + +sidecarInjectorWebhook: + enabled: false + enableNamespacesByDefault: false + +gateways: + istio-ingressgateway: + enabled: true + sds: + enabled: true + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 5 + resources: + requests: + cpu: 1000m + memory: 1024Mi + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - port: 443 + name: https + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 5 + resources: + requests: + cpu: 1000m + memory: 1024Mi + cpu: + targetAverageUtilization: 80 + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +prometheus: + enabled: false + +mixer: + enabled: false + policy: + enabled: false + telemetry: + enabled: false + adapters: + prometheus: + enabled: false + +pilot: + traceSampling: 100 + sidecar: false + resources: + requests: + cpu: 1000m + memory: 1024Mi + +galley: + enabled: true + +security: + enabled: true diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-local.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-local.yaml new file mode 100644 index 0000000000..0f2f0ada03 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values-local.yaml @@ -0,0 +1,91 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + accessLogEncoding: 'JSON' + autoInject: disabled + disablePolicyChecks: true + omitSidecarInjectorConfigMap: true + defaultPodDisruptionBudget: + enabled: false + useMCP: false + +sidecarInjectorWebhook: + enabled: false + enableNamespacesByDefault: false + +gateways: + istio-ingressgateway: + enabled: true + sds: + enabled: true + autoscaleEnabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - port: 443 + name: https + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + autoscaleEnabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +prometheus: + enabled: false + +mixer: + enabled: false + policy: + enabled: false + telemetry: + enabled: false + adapters: + prometheus: + enabled: false + +pilot: + traceSampling: 100 + sidecar: false + autoscaleEnabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + +galley: + enabled: false + +security: + enabled: false diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values.yaml new file mode 100644 index 0000000000..96fe0a36bf --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.3.6/values.yaml @@ -0,0 +1,85 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + accessLogEncoding: 'JSON' + autoInject: enabled + disablePolicyChecks: true + +sidecarInjectorWebhook: + enabled: true + enableNamespacesByDefault: false + rewriteAppHTTPProbe: true + +gateways: + istio-ingressgateway: + enabled: true + sds: + enabled: true + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 4 + resources: + limits: + cpu: 3000m + memory: 2048Mi + requests: + cpu: 3000m + memory: 2048Mi + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - port: 443 + name: https + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 4 + resources: + requests: + cpu: 250m + memory: 256Mi + cpu: + targetAverageUtilization: 80 + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +prometheus: + enabled: false + +mixer: + adapters: + prometheus: + enabled: false + +pilot: + traceSampling: 100 + autoscaleMin: 2 + resources: + requests: + cpu: 3000m + memory: 2048Mi diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4-latest b/test/vendor/knative.dev/serving/third_party/istio-1.4-latest new file mode 120000 index 0000000000..d2c2c177b0 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4-latest @@ -0,0 +1 @@ +istio-1.4.2 \ No newline at end of file diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/README.md b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/README.md new file mode 100644 index 0000000000..cffd8e93a3 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/README.md @@ -0,0 +1,17 @@ +The istio\*.yaml files are generated by running + +``` +./download-istio.sh +``` + +using Helm v3.0.1. + +The generated files are: + +- istio-ci-no-mesh.yaml: used in our continuous testing of Knative with Istio + having sidecar disabled. This is also the setting that we use in our presubmit + tests. +- istio-ci-mesh.yaml: used in our continuous testing of Knative with Istio + having sidecar and mTLS enabled. +- istio-minimal.yaml: a minimal Istio installation used for development + purposes. diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/download-istio.sh b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/download-istio.sh new file mode 100755 index 0000000000..8c4a7629ba --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/download-istio.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download and unpack Istio +ISTIO_VERSION=1.4.2 +DOWNLOAD_URL=https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istio-${ISTIO_VERSION}-linux.tar.gz + +wget --no-check-certificate $DOWNLOAD_URL +if [ $? != 0 ]; then + echo "Failed to download istio package" + exit 1 +fi +tar xzf istio-${ISTIO_VERSION}-linux.tar.gz + +( # subshell in downloaded directory +cd istio-${ISTIO_VERSION} || exit + +# Create CRDs template +helm template --namespace=istio-system \ + install/kubernetes/helm/istio-init \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-crds.yaml + +# Create a custom cluster local gateway, based on the Istio custom-gateway template. +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values-extras.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-knative-extras.yaml + +# A template with sidecar injection enabled. +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-ci-mesh.yaml + +# A lighter template, with just pilot/gateway. +# Based on install/kubernetes/helm/istio/values-istio-minimal.yaml +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values-lean.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-ci-no-mesh.yaml + +# An even lighter template, with just pilot/gateway and small resource requests. +# Based on install/kubernetes/helm/istio/values-istio-minimal.yaml +helm template --namespace=istio-system install/kubernetes/helm/istio --values ../values-local.yaml \ + `# Removing trailing whitespaces to make automation happy` \ + | sed 's/[ \t]*$//' \ + > ../istio-minimal.yaml +) + +# Clean up. +rm -rf istio-${ISTIO_VERSION} +rm istio-${ISTIO_VERSION}-linux.tar.gz + +# Add in the `istio-system` namespace to reduce number of commands. +patch istio-crds.yaml namespace.yaml.patch +patch istio-ci-mesh.yaml namespace.yaml.patch +patch istio-ci-no-mesh.yaml namespace.yaml.patch +patch istio-minimal.yaml namespace.yaml.patch + +# Increase termination drain duration seconds. +patch -l istio-ci-mesh.yaml drain-seconds.yaml.patch diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/drain-seconds.yaml.patch b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/drain-seconds.yaml.patch new file mode 100644 index 0000000000..c4316c58c0 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/drain-seconds.yaml.patch @@ -0,0 +1,5 @@ +820a821,824 +> # PATCH #2: Increase termination drain duration. +> - name: TERMINATION_DRAIN_DURATION_SECONDS +> value: "20" +> # PATCH #2 ends. diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-mesh.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-mesh.yaml new file mode 100644 index 0000000000..a2cde9dc4c --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-mesh.yaml @@ -0,0 +1,3511 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio/charts/galley/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + + minAvailable: 1 + selector: + matchLabels: + app: galley + release: RELEASE-NAME + istio: galley +--- +# Source: istio/charts/gateways/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + + minAvailable: 1 + selector: + matchLabels: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +--- +# Source: istio/charts/gateways/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + + minAvailable: 1 + selector: + matchLabels: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +--- +# Source: istio/charts/mixer/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: telemetry + chart: mixer + heritage: Helm + release: RELEASE-NAME + version: 1.4.2 + istio: mixer + istio-mixer-type: telemetry +spec: + + minAvailable: 1 + selector: + matchLabels: + app: telemetry + release: RELEASE-NAME + istio: mixer + istio-mixer-type: telemetry +--- +# Source: istio/charts/mixer/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-policy + namespace: istio-system + labels: + app: policy + chart: mixer + heritage: Helm + release: RELEASE-NAME + version: 1.4.2 + istio: mixer + istio-mixer-type: policy +spec: + + minAvailable: 1 + selector: + matchLabels: + app: policy + release: RELEASE-NAME + istio: mixer + istio-mixer-type: policy +--- +# Source: istio/charts/pilot/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + + minAvailable: 1 + selector: + matchLabels: + app: pilot + release: RELEASE-NAME + istio: pilot +--- +# Source: istio/charts/security/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + + minAvailable: 1 + selector: + matchLabels: + app: security + release: RELEASE-NAME + istio: citadel +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/poddisruptionbudget.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: sidecarInjectorWebhook + release: RELEASE-NAME + istio: sidecar-injector +spec: + + minAvailable: 1 + selector: + matchLabels: + app: sidecarInjectorWebhook + release: RELEASE-NAME + istio: sidecar-injector +--- +# Source: istio/charts/galley/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-galley-configuration + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +data: + validatingwebhookconfiguration.yaml: |- + apiVersion: admissionregistration.k8s.io/v1beta1 + kind: ValidatingWebhookConfiguration + metadata: + name: istio-galley + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + webhooks: + - name: pilot.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitpilot" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - httpapispecs + - httpapispecbindings + - quotaspecs + - quotaspecbindings + - operations: + - CREATE + - UPDATE + apiGroups: + - rbac.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - security.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - authentication.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - networking.istio.io + apiVersions: + - "*" + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + failurePolicy: Fail + sideEffects: None + - name: mixer.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitmixer" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - rules + - attributemanifests + - circonuses + - deniers + - fluentds + - kubernetesenvs + - listcheckers + - memquotas + - noops + - opas + - prometheuses + - rbacs + - solarwindses + - stackdrivers + - cloudwatches + - dogstatsds + - statsds + - stdios + - apikeys + - authorizations + - checknothings + # - kuberneteses + - listentries + - logentries + - metrics + - quotas + - reportnothings + - tracespans + - adapters + - handlers + - instances + - templates + - zipkins + failurePolicy: Fail + sideEffects: None +--- +# Source: istio/charts/security/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-security-custom-resources + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +data: + custom-resources.yaml: |- + # Authentication policy to enable permissive mode for all services (that have sidecar) in the mesh. + apiVersion: "authentication.istio.io/v1alpha1" + kind: "MeshPolicy" + metadata: + name: "default" + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + peers: + - mtls: + mode: PERMISSIVE + run.sh: |- + #!/bin/sh + + set -x + + if [ "$#" -ne "1" ]; then + echo "first argument should be path to custom resource yaml" + exit 1 + fi + + pathToResourceYAML=${1} + + kubectl get validatingwebhookconfiguration istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + echo "istio-galley validatingwebhookconfiguration found - waiting for istio-galley deployment to be ready" + while true; do + kubectl -n istio-system get deployment istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + break + fi + sleep 1 + done + kubectl -n istio-system rollout status deployment istio-galley + if [ "$?" -ne 0 ]; then + echo "istio-galley deployment rollout status check failed" + exit 1 + fi + echo "istio-galley deployment ready for configuration validation" + fi + sleep 5 + kubectl apply -f ${pathToResourceYAML} +--- +# Source: istio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME +data: + mesh: |- + # Set the following variable to true to disable policy checks by Mixer. + # Note that metrics will still be reported to Mixer. + disablePolicyChecks: true + + disableMixerHttpReports: false + # reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server + reportBatchMaxEntries: 100 + # reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server + reportBatchMaxTime: 1s + + # Set enableTracing to false to disable request tracing. + enableTracing: true + + # Set accessLogFile to empty string to disable access log. + accessLogFile: "/dev/stdout" + + # If accessLogEncoding is TEXT, value will be used directly as the log format + # example: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n" + # If AccessLogEncoding is JSON, value will be parsed as map[string]string + # example: '{"start_time": "%START_TIME%", "req_method": "%REQ(:METHOD)%"}' + # Leave empty to use default log format + accessLogFormat: "" + + # Set accessLogEncoding to JSON or TEXT to configure sidecar access log + accessLogEncoding: 'JSON' + + enableEnvoyAccessLogService: false + mixerCheckServer: istio-policy.istio-system.svc.cluster.local:9091 + mixerReportServer: istio-telemetry.istio-system.svc.cluster.local:9091 + # policyCheckFailOpen allows traffic in cases when the mixer policy service cannot be reached. + # Default is false which means the traffic is denied when the client is unable to connect to Mixer. + policyCheckFailOpen: false + # Let Pilot give ingresses the public IP of the Istio ingressgateway + ingressService: istio-ingressgateway + + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + + # Automatic protocol detection uses a set of heuristics to + # determine whether the connection is using TLS or not (on the + # server side), as well as the application protocol being used + # (e.g., http vs tcp). These heuristics rely on the client sending + # the first bits of data. For server first protocols like MySQL, + # MongoDB, etc., Envoy will timeout on the protocol detection after + # the specified period, defaulting to non mTLS plain TCP + # traffic. Set this field to tweak the period that Envoy will wait + # for the client to send the first bits of data. (MUST BE >=1ms) + protocolDetectionTimeout: 100ms + + # DNS refresh rate for Envoy clusters of type STRICT_DNS + dnsRefreshRate: 300s + + # Unix Domain Socket through which envoy communicates with NodeAgent SDS to get + # key/cert for mTLS. Use secret-mount files instead of SDS if set to empty. + sdsUdsPath: "" + + # The trust domain corresponds to the trust root of a system. + # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain + trustDomain: "" + + # The trust domain aliases represent the aliases of trust_domain. + # For example, if we have + # trustDomain: td1 + # trustDomainAliases: [“td2”, "td3"] + # Any service with the identity "td1/ns/foo/sa/a-service-account", "td2/ns/foo/sa/a-service-account", + # or "td3/ns/foo/sa/a-service-account" will be treated the same in the Istio mesh. + trustDomainAliases: + + # If true, automatically configure client side mTLS settings to match the corresponding service's + # server side mTLS authentication policy, when destination rule for that service does not specify + # TLS settings. + enableAutoMtls: false + + # Set the default behavior of the sidecar for handling outbound traffic from the application: + # ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no + # services or ServiceEntries for the destination port + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio + # configuration. + rootNamespace: istio-system + + # Configures DNS certificates provisioned through Chiron linked into Pilot. + certificates: + [] + configSources: + - address: istio-galley.istio-system.svc:9901 + + defaultConfig: + # + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file + connectTimeout: 10s + # + ### ADVANCED SETTINGS ############# + # Where should envoy's configuration be stored in the istio-proxy container + configPath: "/etc/istio/proxy" + binaryPath: "/usr/local/bin/envoy" + # The pseudo service name used for Envoy. + serviceCluster: istio-proxy + # These settings that determine how long an old Envoy + # process should be kept alive after an occasional reload. + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # + # Port where Envoy listens (on local host) for admin commands + # You can exec into the istio-proxy container in a pod and + # curl the admin port (curl http://localhost:15000/) to obtain + # diagnostic information from Envoy. See + # https://lyft.github.io/envoy/docs/operations/admin.html + # for more details + proxyAdminPort: 15000 + # + # Set concurrency to a specific number to control the number of Proxy worker threads. + # If set to 0 (default), then start worker thread for each CPU thread/core. + concurrency: 2 + # + tracing: + zipkin: + # Address of the Zipkin collector + address: zipkin.istio-system:9411 + # + # Mutual TLS authentication between sidecars and istio control plane. + controlPlaneAuthPolicy: NONE + # + # Address where istio Pilot service is running + discoveryAddress: istio-pilot.istio-system:15010 + + # Configuration file for the mesh networks to be used by the Split Horizon EDS. + meshNetworks: |- + networks: {} +--- +# Source: istio/templates/sidecar-injector-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +data: + values: |- + {"certmanager":{"enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"hub":"quay.io/jetstack","image":"cert-manager-controller","nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"resources":{},"tag":"v0.8.1","tolerations":[]},"galley":{"enableAnalysis":false,"enableServiceDiscovery":false,"enabled":true,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":"galley","nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","tolerations":[]},"gateways":{"cluster-local-gateway":{"autoscaleMax":4,"autoscaleMin":2,"cpu":{"targetAverageUtilization":80},"enabled":true,"externalIPs":[],"labels":{"app":"cluster-local-gateway","istio":"cluster-local-gateway"},"loadBalancerIP":"","loadBalancerSourceRanges":{},"podAnnotations":{},"ports":[{"name":"status-port","port":15020},{"name":"http2","port":80},{"name":"https","port":443}],"replicaCount":2,"resources":{"requests":{"cpu":"250m","memory":"256Mi"}},"secretVolumes":[{"mountPath":"/etc/istio/cluster-local-gateway-certs","name":"cluster-local-gateway-certs","secretName":"istio-cluster-local-gateway-certs"},{"mountPath":"/etc/istio/cluster-local-gateway-ca-certs","name":"cluster-local-gateway-ca-certs","secretName":"istio-cluster-local-gateway-ca-certs"}],"serviceAnnotations":{},"type":"ClusterIP"},"enabled":true,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"istio-egressgateway":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":false,"env":{"ISTIO_META_ROUTER_MODE":"sni-dnat"},"labels":{"app":"istio-egressgateway","istio":"egressgateway"},"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"ports":[{"name":"http2","port":80},{"name":"https","port":443},{"name":"tls","port":15443,"targetPort":15443}],"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","secretVolumes":[{"mountPath":"/etc/istio/egressgateway-certs","name":"egressgateway-certs","secretName":"istio-egressgateway-certs"},{"mountPath":"/etc/istio/egressgateway-ca-certs","name":"egressgateway-ca-certs","secretName":"istio-egressgateway-ca-certs"}],"serviceAnnotations":{},"tolerations":[],"type":"ClusterIP"},"istio-ilbgateway":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":false,"labels":{"app":"istio-ilbgateway","istio":"ilbgateway"},"loadBalancerIP":"","nodeSelector":{},"podAnnotations":{},"ports":[{"name":"grpc-pilot-mtls","port":15011},{"name":"grpc-pilot","port":15010},{"name":"tcp-citadel-grpc-tls","port":8060,"targetPort":8060},{"name":"tcp-dns","port":5353}],"resources":{"requests":{"cpu":"800m","memory":"512Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","secretVolumes":[{"mountPath":"/etc/istio/ilbgateway-certs","name":"ilbgateway-certs","secretName":"istio-ilbgateway-certs"},{"mountPath":"/etc/istio/ilbgateway-ca-certs","name":"ilbgateway-ca-certs","secretName":"istio-ilbgateway-ca-certs"}],"serviceAnnotations":{"cloud.google.com/load-balancer-type":"internal"},"tolerations":[],"type":"LoadBalancer"},"istio-ingressgateway":{"applicationPorts":"","autoscaleEnabled":true,"autoscaleMax":4,"autoscaleMin":2,"cpu":{"targetAverageUtilization":80},"enabled":true,"env":{"ISTIO_META_ROUTER_MODE":"sni-dnat"},"externalIPs":[],"labels":{"app":"istio-ingressgateway","istio":"ingressgateway"},"loadBalancerIP":"","loadBalancerSourceRanges":[],"meshExpansionPorts":[{"name":"tcp-pilot-grpc-tls","port":15011,"targetPort":15011},{"name":"tcp-mixer-grpc-tls","port":15004,"targetPort":15004},{"name":"tcp-citadel-grpc-tls","port":8060,"targetPort":8060},{"name":"tcp-dns-tls","port":853,"targetPort":853}],"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"ports":[{"name":"status-port","port":15020},{"name":"http2","port":80},{"name":"https","port":443}],"replicaCount":2,"resources":{"limits":{"cpu":"3000m","memory":"2048Mi"},"requests":{"cpu":"3000m","memory":"2048Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","sds":{"enabled":true,"image":"node-agent-k8s","resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}},"secretVolumes":[{"mountPath":"/etc/istio/ingressgateway-certs","name":"ingressgateway-certs","secretName":"istio-ingressgateway-certs"},{"mountPath":"/etc/istio/ingressgateway-ca-certs","name":"ingressgateway-ca-certs","secretName":"istio-ingressgateway-ca-certs"}],"serviceAnnotations":{},"tolerations":[],"type":"LoadBalancer"}},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"grafana":{"accessMode":"ReadWriteMany","contextPath":"/grafana","dashboardProviders":{"dashboardproviders.yaml":{"apiVersion":1,"providers":[{"disableDeletion":false,"folder":"istio","name":"istio","options":{"path":"/var/lib/grafana/dashboards/istio"},"orgId":1,"type":"file"}]}},"datasources":{"datasources.yaml":{"apiVersion":1,"datasources":[{"access":"proxy","editable":true,"isDefault":true,"jsonData":{"timeInterval":"5s"},"name":"Prometheus","orgId":1,"type":"prometheus","url":"http://prometheus:9090"}]}},"enabled":false,"env":{},"envSecrets":{},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":{"repository":"grafana/grafana","tag":"6.4.3"},"ingress":{"annotations":{},"enabled":false,"hosts":["grafana.local"],"tls":[]},"nodeSelector":{},"persist":false,"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"security":{"enabled":false,"passphraseKey":"passphrase","secretName":"grafana","usernameKey":"username"},"service":{"annotations":{},"externalPort":3000,"loadBalancerIP":null,"loadBalancerSourceRanges":[],"name":"http","type":"ClusterIP"},"storageClassName":"","tolerations":[]},"istio_cni":{"enabled":false},"istiocoredns":{"coreDNSImage":"coredns/coredns","coreDNSPluginImage":"istio/coredns-plugin:0.2-istio-1.1","coreDNSTag":"1.6.2","enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","tolerations":[]},"kiali":{"contextPath":"/kiali","createDemoSecret":false,"dashboard":{"auth":{"strategy":"login"},"grafanaURL":null,"jaegerURL":null,"secretName":"kiali","viewOnlyMode":false},"enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"hub":"quay.io/kiali","image":"kiali","ingress":{"annotations":{},"enabled":false,"hosts":["kiali.local"],"tls":null},"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"prometheusAddr":"http://prometheus:9090","replicaCount":1,"security":{"cert_file":"/kiali-cert/cert-chain.pem","enabled":false,"private_key_file":"/kiali-cert/key.pem"},"tag":"v1.9","tolerations":[]},"mixer":{"adapters":{"kubernetesenv":{"enabled":true},"prometheus":{"enabled":false,"metricsExpiryDuration":"10m"},"stdio":{"enabled":false,"outputAsJson":true},"useAdapterCRDs":false},"env":{"GOMAXPROCS":"6"},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":"mixer","nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"policy":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":true,"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%"},"telemetry":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":1,"cpu":{"targetAverageUtilization":80},"enabled":true,"loadshedding":{"latencyThreshold":"100ms","mode":"enforce"},"replicaCount":1,"reportBatchMaxEntries":100,"reportBatchMaxTime":"1s","resources":{"limits":{"cpu":"4800m","memory":"4G"},"requests":{"cpu":"1000m","memory":"1G"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","sessionAffinityEnabled":false},"tolerations":[]},"nodeagent":{"enabled":false,"env":{"CA_ADDR":"","CA_PROVIDER":"","PLUGINS":""},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":"node-agent-k8s","nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"tolerations":[]},"pilot":{"autoscaleEnabled":true,"autoscaleMax":5,"autoscaleMin":2,"configSource":{"subscribedResources":null},"cpu":{"targetAverageUtilization":80},"enableProtocolSniffingForInbound":false,"enableProtocolSniffingForOutbound":true,"enabled":true,"env":{"PILOT_PUSH_THROTTLE":100},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":"pilot","keepaliveMaxServerConnectionAge":"30m","nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"resources":{"requests":{"cpu":"3000m","memory":"2048Mi"}},"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","sidecar":true,"tolerations":[],"traceSampling":100},"prometheus":{"enabled":false},"security":{"citadelHealthCheck":false,"createMeshPolicy":true,"enableNamespacesByDefault":true,"enabled":true,"env":{},"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":"citadel","nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","selfSigned":true,"tolerations":[],"workloadCertTtl":"2160h"},"sidecarInjectorWebhook":{"alwaysInjectSelector":[],"enableNamespacesByDefault":false,"enabled":true,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"image":"sidecar_injector","injectedAnnotations":{},"neverInjectSelector":[],"nodeSelector":{},"podAnnotations":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"replicaCount":1,"rewriteAppHTTPProbe":true,"rollingMaxSurge":"100%","rollingMaxUnavailable":"25%","tolerations":[]},"tracing":{"enabled":false,"global":{"arch":{"amd64":2,"ppc64le":2,"s390x":2},"certificates":[],"configValidation":true,"controlPlaneSecurityEnabled":false,"defaultNodeSelector":{},"defaultPodDisruptionBudget":{"enabled":true},"defaultResources":{"requests":{"cpu":"10m"}},"defaultTolerations":[],"disablePolicyChecks":true,"enableHelmTest":false,"enableTracing":true,"hub":"docker.io/istio","imagePullPolicy":"IfNotPresent","imagePullSecrets":[],"k8sIngress":{"enableHttps":false,"enabled":false,"gatewayName":"ingressgateway"},"localityLbSetting":{"enabled":true},"logging":{"level":"default:info"},"meshExpansion":{"enabled":false,"useILB":false},"meshID":"","meshNetworks":{},"monitoringPort":15014,"mtls":{"auto":false,"enabled":false},"multiCluster":{"clusterName":"","enabled":false},"network":"","oneNamespace":false,"operatorManageWebhooks":false,"outboundTrafficPolicy":{"mode":"ALLOW_ANY"},"policyCheckFailOpen":false,"priorityClassName":"","proxy":{"accessLogEncoding":"JSON","accessLogFile":"/dev/stdout","accessLogFormat":"","autoInject":"enabled","clusterDomain":"cluster.local","componentLogLevel":"","concurrency":2,"dnsRefreshRate":"300s","enableCoreDump":false,"enableCoreDumpImage":"ubuntu:xenial","envoyAccessLogService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyMetricsService":{"enabled":false,"host":null,"port":null,"tcpKeepalive":{"interval":"10s","probes":3,"time":"10s"},"tlsSettings":{"caCertificates":null,"clientCertificate":null,"mode":"DISABLE","privateKey":null,"sni":null,"subjectAltNames":[]}},"envoyStatsd":{"enabled":false,"host":null,"port":null},"excludeIPRanges":"","excludeInboundPorts":"","excludeOutboundPorts":"","image":"proxyv2","includeIPRanges":"*","includeInboundPorts":"*","init":{"resources":{"limits":{"cpu":"100m","memory":"50Mi"},"requests":{"cpu":"10m","memory":"10Mi"}}},"kubevirtInterfaces":"","logLevel":"","privileged":false,"protocolDetectionTimeout":"100ms","readinessFailureThreshold":30,"readinessInitialDelaySeconds":1,"readinessPeriodSeconds":2,"resources":{"limits":{"cpu":"2000m","memory":"1024Mi"},"requests":{"cpu":"100m","memory":"128Mi"}},"statusPort":15020,"tracer":"zipkin"},"proxy_init":{"image":"proxyv2"},"sds":{"enabled":false,"token":{"aud":"istio-ca"},"udsPath":""},"tag":"1.4.2","tracer":{"datadog":{"address":"$(HOST_IP):8126"},"lightstep":{"accessToken":"","address":"","cacertPath":"","secure":true},"stackdriver":{"debug":false,"maxNumberOfAnnotations":200,"maxNumberOfAttributes":200,"maxNumberOfMessageEvents":200},"zipkin":{"address":""}},"trustDomain":"","trustDomainAliases":[],"useMCP":true},"ingress":{"annotations":null,"enabled":false,"hosts":null,"tls":null},"jaeger":{"accessMode":"ReadWriteMany","hub":"docker.io/jaegertracing","image":"all-in-one","memory":{"max_traces":50000},"persist":false,"podAnnotations":{},"spanStorageType":"badger","storageClassName":"","tag":1.14},"nodeSelector":{},"podAntiAffinityLabelSelector":[],"podAntiAffinityTermLabelSelector":[],"provider":"jaeger","service":{"annotations":{},"externalPort":80,"name":"http","type":"ClusterIP"},"tolerations":[],"zipkin":{"hub":"docker.io/openzipkin","image":"zipkin","javaOptsHeap":700,"maxSpans":500000,"node":{"cpus":2},"podAnnotations":{},"probeStartupDelay":200,"queryPort":9411,"resources":{"limits":{"cpu":"300m","memory":"900Mi"},"requests":{"cpu":"150m","memory":"900Mi"}},"tag":"2.14.2"}}} + + config: |- + policy: enabled + alwaysInjectSelector: + [] + neverInjectSelector: + [] + template: |- + rewriteAppHTTPProbe: {{ valueOrDefault .Values.sidecarInjectorWebhook.rewriteAppHTTPProbe false }} + {{- if or (not .Values.istio_cni.enabled) .Values.global.proxy.enableCoreDump }} + initContainers: + {{ if ne (annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode) `NONE` }} + {{- if not .Values.istio_cni.enabled }} + - name: istio-init + {{- if contains "/" .Values.global.proxy_init.image }} + image: "{{ .Values.global.proxy_init.image }}" + {{- else }} + image: "{{ .Values.global.hub }}/{{ .Values.global.proxy_init.image }}:{{ .Values.global.tag }}" + {{- end }} + command: + - istio-iptables + - "-p" + - "15001" + - "-z" + - "15006" + - "-u" + - 1337 + - "-m" + - "{{ annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode }}" + - "-i" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeOutboundIPRanges` .Values.global.proxy.includeIPRanges }}" + - "-x" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundIPRanges` .Values.global.proxy.excludeIPRanges }}" + - "-b" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` `*` }}" + - "-d" + - "{{ excludeInboundPort (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) (annotation .ObjectMeta `traffic.sidecar.istio.io/excludeInboundPorts` .Values.global.proxy.excludeInboundPorts) }}" + {{ if or (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/excludeOutboundPorts`) (ne .Values.global.proxy.excludeOutboundPorts "") -}} + - "-o" + - "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundPorts` .Values.global.proxy.excludeOutboundPorts }}" + {{ end -}} + {{ if (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces`) -}} + - "-k" + - "{{ index .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces` }}" + {{ end -}} + imagePullPolicy: "{{ .Values.global.imagePullPolicy }}" + {{- if .Values.global.proxy.init.resources }} + resources: + {{ toYaml .Values.global.proxy.init.resources | indent 4 }} + {{- else }} + resources: {} + {{- end }} + securityContext: + runAsUser: 0 + runAsNonRoot: false + capabilities: + add: + - NET_ADMIN + {{- if .Values.global.proxy.privileged }} + privileged: true + {{- end }} + restartPolicy: Always + {{- end }} + {{ end -}} + {{- if eq .Values.global.proxy.enableCoreDump true }} + - name: enable-core-dump + args: + - -c + - sysctl -w kernel.core_pattern=/var/lib/istio/core.proxy && ulimit -c unlimited + command: + - /bin/sh + image: {{ $.Values.global.proxy.enableCoreDumpImage }} + imagePullPolicy: IfNotPresent + resources: {} + securityContext: + runAsUser: 0 + runAsNonRoot: false + privileged: true + {{ end }} + {{- end }} + containers: + - name: istio-proxy + {{- if contains "/" (annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.proxy.image) }} + image: "{{ annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.proxy.image }}" + {{- else }} + image: "{{ annotation .ObjectMeta `sidecar.istio.io/proxyImage` .Values.global.hub }}/{{ .Values.global.proxy.image }}:{{ .Values.global.tag }}" + {{- end }} + ports: + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - sidecar + - --domain + - $(POD_NAMESPACE).svc.{{ .Values.global.proxy.clusterDomain }} + - --configPath + - "{{ .ProxyConfig.ConfigPath }}" + - --binaryPath + - "{{ .ProxyConfig.BinaryPath }}" + - --serviceCluster + {{ if ne "" (index .ObjectMeta.Labels "app") -}} + - "{{ index .ObjectMeta.Labels `app` }}.$(POD_NAMESPACE)" + {{ else -}} + - "{{ valueOrDefault .DeploymentMeta.Name `istio-proxy` }}.{{ valueOrDefault .DeploymentMeta.Namespace `default` }}" + {{ end -}} + - --drainDuration + - "{{ formatDuration .ProxyConfig.DrainDuration }}" + - --parentShutdownDuration + - "{{ formatDuration .ProxyConfig.ParentShutdownDuration }}" + - --discoveryAddress + - "{{ annotation .ObjectMeta `sidecar.istio.io/discoveryAddress` .ProxyConfig.DiscoveryAddress }}" + {{- if eq .Values.global.proxy.tracer "lightstep" }} + - --lightstepAddress + - "{{ .ProxyConfig.GetTracing.GetLightstep.GetAddress }}" + - --lightstepAccessToken + - "{{ .ProxyConfig.GetTracing.GetLightstep.GetAccessToken }}" + - --lightstepSecure={{ .ProxyConfig.GetTracing.GetLightstep.GetSecure }} + - --lightstepCacertPath + - "{{ .ProxyConfig.GetTracing.GetLightstep.GetCacertPath }}" + {{- else if eq .Values.global.proxy.tracer "zipkin" }} + - --zipkinAddress + - "{{ .ProxyConfig.GetTracing.GetZipkin.GetAddress }}" + {{- else if eq .Values.global.proxy.tracer "datadog" }} + - --datadogAgentAddress + - "{{ .ProxyConfig.GetTracing.GetDatadog.GetAddress }}" + {{- end }} + {{- if .Values.global.proxy.logLevel }} + - --proxyLogLevel={{ .Values.global.proxy.logLevel }} + {{- end}} + {{- if .Values.global.proxy.componentLogLevel }} + - --proxyComponentLogLevel={{ .Values.global.proxy.componentLogLevel }} + {{- end}} + - --dnsRefreshRate + - {{ .Values.global.proxy.dnsRefreshRate }} + - --connectTimeout + - "{{ formatDuration .ProxyConfig.ConnectTimeout }}" + {{- if .Values.global.proxy.envoyStatsd.enabled }} + - --statsdUdpAddress + - "{{ .ProxyConfig.StatsdUdpAddress }}" + {{- end }} + {{- if .Values.global.proxy.envoyMetricsService.enabled }} + - --envoyMetricsService + - '{{ protoToJSON .ProxyConfig.EnvoyMetricsService }}' + {{- end }} + {{- if .Values.global.proxy.envoyAccessLogService.enabled }} + - --envoyAccessLogService + - '{{ protoToJSON .ProxyConfig.EnvoyAccessLogService }}' + {{- end }} + - --proxyAdminPort + - "{{ .ProxyConfig.ProxyAdminPort }}" + {{ if gt .ProxyConfig.Concurrency 0 -}} + - --concurrency + - "{{ .ProxyConfig.Concurrency }}" + {{ end -}} + - --controlPlaneAuthPolicy + - "{{ annotation .ObjectMeta `sidecar.istio.io/controlPlaneAuthPolicy` .ProxyConfig.ControlPlaneAuthPolicy }}" + {{- if (ne (annotation .ObjectMeta "status.sidecar.istio.io/port" (valueOrDefault .Values.global.proxy.statusPort 0 )) `0`) }} + - --statusPort + - "{{ annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort }}" + - --applicationPorts + - "{{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/applicationPorts` (applicationPorts .Spec.Containers) }}" + {{- end }} + {{- if .Values.global.trustDomain }} + - --trust-domain={{ .Values.global.trustDomain }} + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ISTIO_META_POD_PORTS + value: |- + [ + {{- range $index1, $c := .Spec.Containers }} + {{- range $index2, $p := $c.Ports }} + {{if or (ne $index1 0) (ne $index2 0)}},{{end}}{{ structToJSON $p }} + {{- end}} + {{- end}} + ] + - name: ISTIO_META_CLUSTER_ID + value: "{{ valueOrDefault .Values.global.multiCluster.clusterName `Kubernetes` }}" + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + {{- if .Values.global.mtls.auto }} + - name: ISTIO_AUTO_MTLS_ENABLED + value: "true" + {{- end }} + {{- if eq .Values.global.proxy.tracer "datadog" }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- if isset .ObjectMeta.Annotations `apm.datadoghq.com/env` }} + {{- range $key, $value := fromJSON (index .ObjectMeta.Annotations `apm.datadoghq.com/env`) }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- end }} + {{- end }} + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDS_ENABLED + value: {{ $.Values.global.sds.enabled }} + - name: ISTIO_META_INTERCEPTION_MODE + value: "{{ or (index .ObjectMeta.Annotations `sidecar.istio.io/interceptionMode`) .ProxyConfig.InterceptionMode.String }}" + - name: ISTIO_META_INCLUDE_INBOUND_PORTS + value: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` (applicationPorts .Spec.Containers) }}" + {{- if .Values.global.network }} + - name: ISTIO_META_NETWORK + value: "{{ .Values.global.network }}" + {{- end }} + {{ if .ObjectMeta.Annotations }} + - name: ISTIO_METAJSON_ANNOTATIONS + value: | + {{ toJSON .ObjectMeta.Annotations }} + {{ end }} + {{ if .ObjectMeta.Labels }} + - name: ISTIO_METAJSON_LABELS + value: | + {{ toJSON .ObjectMeta.Labels }} + {{ end }} + {{- if .DeploymentMeta.Name }} + - name: ISTIO_META_WORKLOAD_NAME + value: {{ .DeploymentMeta.Name }} + {{ end }} + {{- if and .TypeMeta.APIVersion .DeploymentMeta.Name }} + - name: ISTIO_META_OWNER + value: kubernetes://apis/{{ .TypeMeta.APIVersion }}/namespaces/{{ valueOrDefault .DeploymentMeta.Namespace `default` }}/{{ toLower .TypeMeta.Kind}}s/{{ .DeploymentMeta.Name }} + {{- end}} + {{- if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }} + - name: ISTIO_BOOTSTRAP_OVERRIDE + value: "/etc/istio/custom-bootstrap/custom_bootstrap.json" + {{- end }} + {{- if .Values.global.sds.customTokenDirectory }} + - name: ISTIO_META_SDS_TOKEN_PATH + value: "{{ .Values.global.sds.customTokenDirectory -}}/sdstoken" + {{- end }} + {{- if .Values.global.meshID }} + - name: ISTIO_META_MESH_ID + value: "{{ .Values.global.meshID }}" + {{- else if .Values.global.trustDomain }} + - name: ISTIO_META_MESH_ID + value: "{{ .Values.global.trustDomain }}" + {{- end }} + # PATCH #2: Increase termination drain duration. + - name: TERMINATION_DRAIN_DURATION_SECONDS + value: "20" + # PATCH #2 ends. + {{- if eq .Values.global.proxy.tracer "stackdriver" }} + - name: STACKDRIVER_TRACING_ENABLED + value: "true" + - name: STACKDRIVER_TRACING_DEBUG + value: "{{ .ProxyConfig.GetTracing.GetStackdriver.GetDebug }}" + {{- if .ProxyConfig.GetTracing.GetStackdriver.GetMaxNumberOfAnnotations }} + - name: STACKDRIVER_TRACING_MAX_NUMBER_OF_ANNOTATIONS + value: "{{ .ProxyConfig.GetTracing.GetStackdriver.GetMaxNumberOfAnnotations }}" + {{- end }} + {{- if .ProxyConfig.GetTracing.GetStackdriver.GetMaxNumberOfAttributes }} + - name: STACKDRIVER_TRACING_MAX_NUMBER_OF_ATTRIBUTES + value: "{{ .ProxyConfig.GetTracing.GetStackdriver.GetMaxNumberOfAttributes }}" + {{- end }} + {{- if .ProxyConfig.GetTracing.GetStackdriver.GetMaxNumberOfMessageEvents }} + - name: STACKDRIVER_TRACING_MAX_NUMBER_OF_MESSAGE_EVENTS + value: "{{ .ProxyConfig.GetTracing.GetStackdriver.GetMaxNumberOfMessageEvents }}" + {{- end }} + {{- end }} + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + {{ if ne (annotation .ObjectMeta `status.sidecar.istio.io/port` (valueOrDefault .Values.global.proxy.statusPort 0 )) `0` }} + readinessProbe: + httpGet: + path: /healthz/ready + port: {{ annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort }} + initialDelaySeconds: {{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/initialDelaySeconds` .Values.global.proxy.readinessInitialDelaySeconds }} + periodSeconds: {{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/periodSeconds` .Values.global.proxy.readinessPeriodSeconds }} + failureThreshold: {{ annotation .ObjectMeta `readiness.status.sidecar.istio.io/failureThreshold` .Values.global.proxy.readinessFailureThreshold }} + {{ end -}} + securityContext: + {{- if .Values.global.proxy.privileged }} + privileged: true + {{- end }} + {{- if ne .Values.global.proxy.enableCoreDump true }} + readOnlyRootFilesystem: true + {{- end }} + {{ if eq (annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode) `TPROXY` -}} + capabilities: + add: + - NET_ADMIN + runAsGroup: 1337 + {{ else -}} + {{ if .Values.global.sds.enabled }} + runAsGroup: 1337 + {{- end }} + runAsUser: 1337 + {{- end }} + resources: + {{ if or (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyCPU`) (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyMemory`) -}} + requests: + {{ if (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyCPU`) -}} + cpu: "{{ index .ObjectMeta.Annotations `sidecar.istio.io/proxyCPU` }}" + {{ end}} + {{ if (isset .ObjectMeta.Annotations `sidecar.istio.io/proxyMemory`) -}} + memory: "{{ index .ObjectMeta.Annotations `sidecar.istio.io/proxyMemory` }}" + {{ end }} + {{ else -}} + {{- if .Values.global.proxy.resources }} + {{ toYaml .Values.global.proxy.resources | indent 4 }} + {{- end }} + {{ end -}} + volumeMounts: + {{ if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }} + - mountPath: /etc/istio/custom-bootstrap + name: custom-bootstrap-volume + {{- end }} + - mountPath: /etc/istio/proxy + name: istio-envoy + {{- if .Values.global.sds.enabled }} + - mountPath: /var/run/sds + name: sds-uds-path + readOnly: true + - mountPath: /var/run/secrets/tokens + name: istio-token + {{- if .Values.global.sds.customTokenDirectory }} + - mountPath: "{{ .Values.global.sds.customTokenDirectory -}}" + name: custom-sds-token + readOnly: true + {{- end }} + {{- else }} + - mountPath: /etc/certs/ + name: istio-certs + readOnly: true + {{- end }} + {{- if and (eq .Values.global.proxy.tracer "lightstep") .Values.global.tracer.lightstep.cacertPath }} + - mountPath: {{ directory .ProxyConfig.GetTracing.GetLightstep.GetCacertPath }} + name: lightstep-certs + readOnly: true + {{- end }} + {{- if isset .ObjectMeta.Annotations `sidecar.istio.io/userVolumeMount` }} + {{ range $index, $value := fromJSON (index .ObjectMeta.Annotations `sidecar.istio.io/userVolumeMount`) }} + - name: "{{ $index }}" + {{ toYaml $value | indent 4 }} + {{ end }} + {{- end }} + volumes: + {{- if (isset .ObjectMeta.Annotations `sidecar.istio.io/bootstrapOverride`) }} + - name: custom-bootstrap-volume + configMap: + name: {{ annotation .ObjectMeta `sidecar.istio.io/bootstrapOverride` "" }} + {{- end }} + - emptyDir: + medium: Memory + name: istio-envoy + {{- if .Values.global.sds.enabled }} + - name: sds-uds-path + hostPath: + path: /var/run/sds + - name: istio-token + projected: + sources: + - serviceAccountToken: + path: istio-token + expirationSeconds: 43200 + audience: {{ .Values.global.sds.token.aud }} + {{- if .Values.global.sds.customTokenDirectory }} + - name: custom-sds-token + secret: + secretName: sdstokensecret + {{- end }} + {{- else }} + - name: istio-certs + secret: + optional: true + {{ if eq .Spec.ServiceAccountName "" }} + secretName: istio.default + {{ else -}} + secretName: {{ printf "istio.%s" .Spec.ServiceAccountName }} + {{ end -}} + {{- if isset .ObjectMeta.Annotations `sidecar.istio.io/userVolume` }} + {{range $index, $value := fromJSON (index .ObjectMeta.Annotations `sidecar.istio.io/userVolume`) }} + - name: "{{ $index }}" + {{ toYaml $value | indent 2 }} + {{ end }} + {{ end }} + {{- end }} + {{- if and (eq .Values.global.proxy.tracer "lightstep") .Values.global.tracer.lightstep.cacertPath }} + - name: lightstep-certs + secret: + optional: true + secretName: lightstep.cacert + {{- end }} + {{- if .Values.global.podDNSSearchNamespaces }} + dnsConfig: + searches: + {{- range .Values.global.podDNSSearchNamespaces }} + - {{ render . }} + {{- end }} + {{- end }} + podRedirectAnnot: + sidecar.istio.io/interceptionMode: "{{ annotation .ObjectMeta `sidecar.istio.io/interceptionMode` .ProxyConfig.InterceptionMode }}" + traffic.sidecar.istio.io/includeOutboundIPRanges: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeOutboundIPRanges` .Values.global.proxy.includeIPRanges }}" + traffic.sidecar.istio.io/excludeOutboundIPRanges: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundIPRanges` .Values.global.proxy.excludeIPRanges }}" + traffic.sidecar.istio.io/includeInboundPorts: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/includeInboundPorts` (includeInboundPorts .Spec.Containers) }}" + traffic.sidecar.istio.io/excludeInboundPorts: "{{ excludeInboundPort (annotation .ObjectMeta `status.sidecar.istio.io/port` .Values.global.proxy.statusPort) (annotation .ObjectMeta `traffic.sidecar.istio.io/excludeInboundPorts` .Values.global.proxy.excludeInboundPorts) }}" + {{ if or (isset .ObjectMeta.Annotations `traffic.sidecar.istio.io/excludeOutboundPorts`) (ne .Values.global.proxy.excludeOutboundPorts "") }} + traffic.sidecar.istio.io/excludeOutboundPorts: "{{ annotation .ObjectMeta `traffic.sidecar.istio.io/excludeOutboundPorts` .Values.global.proxy.excludeOutboundPorts }}" + {{- end }} + traffic.sidecar.istio.io/kubevirtInterfaces: "{{ index .ObjectMeta.Annotations `traffic.sidecar.istio.io/kubevirtInterfaces` }}" + injectedAnnotations: +--- +# Source: istio/charts/galley/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-galley-service-account + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-ingressgateway-service-account + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/mixer/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-mixer-service-account + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/pilot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-pilot-service-account + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-security-post-install-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-citadel-service-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-sidecar-injector-service-account + namespace: istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/galley/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-galley-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +rules: + # For reading Istio resources +- apiGroups: [ + "authentication.istio.io", + "config.istio.io", + "networking.istio.io", + "rbac.istio.io", + "security.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] + # For updating Istio resource statuses +- apiGroups: [ + "authentication.istio.io", + "config.istio.io", + "networking.istio.io", + "rbac.istio.io", + "security.istio.io"] + resources: ["*/status"] + verbs: ["update"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["*"] +- apiGroups: ["extensions","apps"] + resources: ["deployments"] + resourceNames: ["istio-galley"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods", "nodes", "services", "endpoints", "namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["deployments/finalizers"] + resourceNames: ["istio-galley"] + verbs: ["update"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/mixer/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-mixer-istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] # istio CRD watcher + resources: ["*"] + verbs: ["create", "get", "list", "watch", "patch"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["configmaps", "endpoints", "pods", "services", "namespaces", "secrets", "replicationcontrollers"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/pilot/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["security.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["ingresses", "ingresses/status"] + verbs: ["*"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["endpoints", "pods", "services", "namespaces", "nodes", "secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: ["certificates.k8s.io"] + resources: + - "certificatesigningrequests" + - "certificatesigningrequests/approval" + - "certificatesigningrequests/status" + verbs: ["update", "create", "get", "delete"] +--- +# Source: istio/charts/security/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "update"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: [""] + resources: ["serviceaccounts", "services", "namespaces"] + verbs: ["get", "watch", "list"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-security-post-install-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["authentication.istio.io"] # needed to create default authn policy + resources: ["*"] + verbs: ["*"] +- apiGroups: ["networking.istio.io"] # needed to create security destination rules + resources: ["*"] + verbs: ["*"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-sidecar-injector-istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "patch"] +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/galley/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-galley-admin-role-binding-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-galley-istio-system +subjects: + - kind: ServiceAccount + name: istio-galley-service-account + namespace: istio-system +--- +# Source: istio/charts/mixer/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-mixer-admin-role-binding-istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-mixer-istio-system +subjects: + - kind: ServiceAccount + name: istio-mixer-service-account + namespace: istio-system +--- +# Source: istio/charts/pilot/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-pilot-istio-system +subjects: + - kind: ServiceAccount + name: istio-pilot-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-citadel-istio-system +subjects: + - kind: ServiceAccount + name: istio-citadel-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-security-post-install-role-binding-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-security-post-install-istio-system +subjects: + - kind: ServiceAccount + name: istio-security-post-install-account + namespace: istio-system +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-sidecar-injector-admin-role-binding-istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-sidecar-injector-istio-system +subjects: + - kind: ServiceAccount + name: istio-sidecar-injector-service-account + namespace: istio-system +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.4.2 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: istio/charts/gateways/templates/rolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account +--- +# Source: istio/charts/galley/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + ports: + - port: 443 + name: https-validation + - port: 15014 + name: http-monitoring + - port: 9901 + name: grpc-mcp + selector: + istio: galley +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + type: LoadBalancer + selector: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/mixer/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-telemetry + namespace: istio-system + annotations: + networking.istio.io/exportTo: "*" + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + ports: + - name: grpc-mixer + port: 9091 + - name: grpc-mixer-mtls + port: 15004 + - name: http-monitoring + port: 15014 + - name: prometheus + port: 42422 + selector: + istio: mixer + istio-mixer-type: telemetry +--- +# Source: istio/charts/mixer/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-policy + namespace: istio-system + annotations: + networking.istio.io/exportTo: "*" + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + ports: + - name: grpc-mixer + port: 9091 + - name: grpc-mixer-mtls + port: 15004 + - name: http-monitoring + port: 15014 + selector: + istio: mixer + istio-mixer-type: policy +--- +# Source: istio/charts/pilot/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + ports: + - port: 15010 + name: grpc-xds # direct + - port: 15011 + name: https-xds # mTLS + - port: 8080 + name: http-legacy-discovery # direct + - port: 15014 + name: http-monitoring + selector: + istio: pilot +--- +# Source: istio/charts/security/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + # we use the normal name here (e.g. 'prometheus') + # as grafana is configured to use this as a data source + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + ports: + - name: grpc-citadel + port: 8060 + targetPort: 8060 + protocol: TCP + - name: http-monitoring + port: 15014 + selector: + istio: citadel +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +spec: + ports: + - port: 443 + name: https-inject + - port: 15014 + name: http-monitoring + selector: + istio: sidecar-injector +--- +# Source: istio/charts/galley/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + replicas: 1 + selector: + matchLabels: + istio: galley + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-galley-service-account + containers: + - name: galley + image: "docker.io/istio/galley:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 443 + - containerPort: 15014 + - containerPort: 9901 + command: + - /usr/local/bin/galley + - server + - --meshConfigFile=/etc/mesh-config/mesh + - --livenessProbeInterval=1s + - --livenessProbePath=/healthliveness + - --readinessProbePath=/healthready + - --readinessProbeInterval=1s + - --deployment-namespace=istio-system + - --insecure=true + - --enable-reconcileWebhookConfiguration=true + - --validation-webhook-config-file + - /etc/config/validatingwebhookconfiguration.yaml + - --monitoringPort=15014 + - --log_output_level=default:info + volumeMounts: + - name: certs + mountPath: /etc/certs + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + - name: mesh-config + mountPath: /etc/mesh-config + readOnly: true + livenessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthliveness + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthready + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + requests: + cpu: 10m + volumes: + - name: certs + secret: + secretName: istio.istio-galley-service-account + - name: config + configMap: + name: istio-galley-configuration + - name: mesh-config + configMap: + name: istio + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME +spec: + replicas: 2 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 256Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"cluster-local-gateway","chart":"gateways","heritage":"Helm","istio":"cluster-local-gateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + istio: ingressgateway + release: RELEASE-NAME +spec: + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + istio: ingressgateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: ingress-sds + image: "docker.io/istio/node-agent-k8s:1.4.2" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: "ENABLE_WORKLOAD_SDS" + value: "false" + - name: "ENABLE_INGRESS_GATEWAY_SDS" + value: "true" + - name: "INGRESS_GATEWAY_NAMESPACE" + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - istio-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 3000m + memory: 2048Mi + requests: + cpu: 3000m + memory: 2048Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"istio-ingressgateway","chart":"gateways","heritage":"Helm","istio":"ingressgateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_USER_SDS + value: "true" + - name: ISTIO_META_ROUTER_MODE + value: sni-dnat + + + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: ingressgatewaysdsudspath + emptyDir: {} + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/mixer/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: istio-mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: mixer + istio-mixer-type: telemetry + template: + metadata: + labels: + app: telemetry + chart: mixer + heritage: Helm + release: RELEASE-NAME + security.istio.io/tlsMode: "istio" + istio: mixer + istio-mixer-type: telemetry + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-mixer-service-account + volumes: + - name: istio-certs + secret: + secretName: istio.istio-mixer-service-account + optional: true + - name: uds-socket + emptyDir: {} + - name: telemetry-adapter-secret + secret: + secretName: telemetry-adapter-secret + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" + containers: + - name: mixer + image: "docker.io/istio/mixer:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15014 + - containerPort: 42422 + args: + - --monitoringPort=15014 + - --address + - unix:///sock/mixer.socket + - --log_output_level=default:info + - --configStoreURL=mcp://istio-galley.istio-system.svc:9901 + - --configDefaultNamespace=istio-system + - --useAdapterCRDs=false + - --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans + - --averageLatencyThreshold + - 100ms + - --loadsheddingMode + - enforce + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: GOMAXPROCS + value: "6" + resources: + limits: + cpu: 4800m + memory: 4G + requests: + cpu: 1000m + memory: 1G + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: telemetry-adapter-secret + mountPath: /var/run/secrets/istio.io/telemetry/adapter + readOnly: true + - name: uds-socket + mountPath: /sock + livenessProbe: + httpGet: + path: /version + port: 15014 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9091 + - containerPort: 15004 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --serviceCluster + - istio-telemetry + - --templateFile + - /etc/istio/proxy/envoy_telemetry.yaml.tmpl + - --controlPlaneAuthPolicy + - NONE + - --log_output_level=default:info + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SDS_ENABLED + value: "false" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: uds-socket + mountPath: /sock +--- +# Source: istio/charts/mixer/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-policy + namespace: istio-system + labels: + app: istio-mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME + istio: mixer +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: mixer + istio-mixer-type: policy + template: + metadata: + labels: + app: policy + chart: mixer + heritage: Helm + release: RELEASE-NAME + security.istio.io/tlsMode: "istio" + istio: mixer + istio-mixer-type: policy + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-mixer-service-account + volumes: + - name: istio-certs + secret: + secretName: istio.istio-mixer-service-account + optional: true + - name: uds-socket + emptyDir: {} + - name: policy-adapter-secret + secret: + secretName: policy-adapter-secret + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" + containers: + - name: mixer + image: "docker.io/istio/mixer:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15014 + - containerPort: 42422 + args: + - --monitoringPort=15014 + - --address + - unix:///sock/mixer.socket + - --log_output_level=default:info + - --configStoreURL=mcp://istio-galley.istio-system.svc:9901 + - --configDefaultNamespace=istio-system + - --useAdapterCRDs=false + - --useTemplateCRDs=false + - --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: GOMAXPROCS + value: "6" + resources: + requests: + cpu: 10m + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: uds-socket + mountPath: /sock + livenessProbe: + httpGet: + path: /version + port: 15014 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9091 + - containerPort: 15004 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --serviceCluster + - istio-policy + - --templateFile + - /etc/istio/proxy/envoy_policy.yaml.tmpl + - --controlPlaneAuthPolicy + - NONE + - --log_output_level=default:info + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SDS_ENABLED + value: "false" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: uds-socket + mountPath: /sock + - name: policy-adapter-secret + mountPath: /var/run/secrets/istio.io/policy/adapter + readOnly: true +--- +# Source: istio/charts/pilot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-pilot + namespace: istio-system + # TODO: default template doesn't have this, which one is right ? + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: pilot + template: + metadata: + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-pilot-service-account + containers: + - name: discovery + image: "docker.io/istio/pilot:1.4.2" + imagePullPolicy: IfNotPresent + args: + - "discovery" + - --monitoringAddr=:15014 + - --log_output_level=default:info + - --domain + - cluster.local + - --secureGrpcAddr + - "" + - --keepaliveMaxServerConnectionAge + - "30m" + ports: + - containerPort: 8080 + - containerPort: 15010 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: PILOT_PUSH_THROTTLE + value: "100" + - name: PILOT_TRACE_SAMPLING + value: "100" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND + value: "true" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND + value: "false" + resources: + requests: + cpu: 3000m + memory: 2048Mi + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15003 + - containerPort: 15005 + - containerPort: 15007 + - containerPort: 15011 + args: + - proxy + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --serviceCluster + - istio-pilot + - --templateFile + - /etc/istio/proxy/envoy_pilot.yaml.tmpl + - --controlPlaneAuthPolicy + - NONE + - --log_output_level=default:info + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: SDS_ENABLED + value: "false" + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: config-volume + configMap: + name: istio + - name: istio-certs + secret: + secretName: istio.istio-pilot-service-account + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/security/templates/deployment.yaml +# istio CA watching all namespaces +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + replicas: 1 + selector: + matchLabels: + istio: citadel + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-citadel-service-account + containers: + - name: citadel + image: "docker.io/istio/citadel:1.4.2" + imagePullPolicy: IfNotPresent + args: + - --append-dns-names=true + - --grpc-port=8060 + - --citadel-storage-namespace=istio-system + - --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system + - --monitoring-port=15014 + - --self-signed-ca=true + - --workload-cert-ttl=2160h + env: + - name: CITADEL_ENABLE_NAMESPACES_BY_DEFAULT + value: "true" + resources: + requests: + cpu: 10m + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-sidecar-injector + namespace: istio-system + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector +spec: + replicas: 1 + selector: + matchLabels: + istio: sidecar-injector + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME + istio: sidecar-injector + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-sidecar-injector-service-account + containers: + - name: sidecar-injector-webhook + image: "docker.io/istio/sidecar_injector:1.4.2" + imagePullPolicy: IfNotPresent + args: + - --caCertFile=/etc/istio/certs/root-cert.pem + - --tlsCertFile=/etc/istio/certs/cert-chain.pem + - --tlsKeyFile=/etc/istio/certs/key.pem + - --injectConfig=/etc/istio/inject/config + - --meshConfig=/etc/istio/config/mesh + - --healthCheckInterval=2s + - --healthCheckFile=/health + - --reconcileWebhookConfig=true + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + readOnly: true + - name: certs + mountPath: /etc/istio/certs + readOnly: true + - name: inject-config + mountPath: /etc/istio/inject + readOnly: true + livenessProbe: + exec: + command: + - /usr/local/bin/sidecar-injector + - probe + - --probe-path=/health + - --interval=4s + initialDelaySeconds: 4 + periodSeconds: 4 + readinessProbe: + exec: + command: + - /usr/local/bin/sidecar-injector + - probe + - --probe-path=/health + - --interval=4s + initialDelaySeconds: 4 + periodSeconds: 4 + resources: + requests: + cpu: 10m + volumes: + - name: config-volume + configMap: + name: istio + - name: certs + secret: + secretName: istio.istio-sidecar-injector-service-account + - name: inject-config + configMap: + name: istio-sidecar-injector + items: + - key: config + path: config + - key: values + path: values + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + maxReplicas: 4 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/mixer/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-telemetry + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/mixer/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-policy + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-policy + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/pilot/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-pilot + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/mixer/templates/config.yaml +--- +--- +# Source: istio/charts/mixer/templates/config.yaml +# Configuration needed by Mixer. +# Mixer cluster is delivered via CDS +# Specify mixer cluster settings +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-policy + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + host: istio-policy.istio-system.svc.cluster.local + trafficPolicy: + portLevelSettings: + - port: + number: 15004 # grpc-mixer-mtls + tls: + mode: ISTIO_MUTUAL + - port: + number: 9091 # grpc-mixer + tls: + mode: DISABLE + connectionPool: + http: + http2MaxRequests: 10000 + maxRequestsPerConnection: 10000 +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: istio-telemetry + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + host: istio-telemetry.istio-system.svc.cluster.local + trafficPolicy: + portLevelSettings: + - port: + number: 15004 # grpc-mixer-mtls + tls: + mode: ISTIO_MUTUAL + - port: + number: 9091 # grpc-mixer + tls: + mode: DISABLE + connectionPool: + http: + http2MaxRequests: 10000 + maxRequestsPerConnection: 10000 +--- +# Source: istio/charts/sidecarInjectorWebhook/templates/mutatingwebhook.yaml +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: istio-sidecar-injector + labels: + app: sidecarInjectorWebhook + chart: sidecarInjectorWebhook + heritage: Helm + release: RELEASE-NAME +webhooks: + - name: sidecar-injector.istio.io + clientConfig: + service: + name: istio-sidecar-injector + namespace: istio-system + path: "/inject" + caBundle: "" + rules: + - operations: [ "CREATE" ] + apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + failurePolicy: Fail + namespaceSelector: + matchLabels: + istio-injection: enabled +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: attributemanifest +metadata: + name: istioproxy + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + attributes: + origin.ip: + valueType: IP_ADDRESS + origin.uid: + valueType: STRING + origin.user: + valueType: STRING + request.headers: + valueType: STRING_MAP + request.id: + valueType: STRING + request.host: + valueType: STRING + request.method: + valueType: STRING + request.path: + valueType: STRING + request.url_path: + valueType: STRING + request.query_params: + valueType: STRING_MAP + request.reason: + valueType: STRING + request.referer: + valueType: STRING + request.scheme: + valueType: STRING + request.total_size: + valueType: INT64 + request.size: + valueType: INT64 + request.time: + valueType: TIMESTAMP + request.useragent: + valueType: STRING + response.code: + valueType: INT64 + response.duration: + valueType: DURATION + response.headers: + valueType: STRING_MAP + response.total_size: + valueType: INT64 + response.size: + valueType: INT64 + response.time: + valueType: TIMESTAMP + response.grpc_status: + valueType: STRING + response.grpc_message: + valueType: STRING + source.uid: + valueType: STRING + source.user: # DEPRECATED + valueType: STRING + source.principal: + valueType: STRING + destination.uid: + valueType: STRING + destination.principal: + valueType: STRING + destination.port: + valueType: INT64 + connection.event: + valueType: STRING + connection.id: + valueType: STRING + connection.received.bytes: + valueType: INT64 + connection.received.bytes_total: + valueType: INT64 + connection.sent.bytes: + valueType: INT64 + connection.sent.bytes_total: + valueType: INT64 + connection.duration: + valueType: DURATION + connection.mtls: + valueType: BOOL + connection.requested_server_name: + valueType: STRING + context.protocol: + valueType: STRING + context.proxy_error_code: + valueType: STRING + context.timestamp: + valueType: TIMESTAMP + context.time: + valueType: TIMESTAMP + # Deprecated, kept for compatibility + context.reporter.local: + valueType: BOOL + context.reporter.kind: + valueType: STRING + context.reporter.uid: + valueType: STRING + api.service: + valueType: STRING + api.version: + valueType: STRING + api.operation: + valueType: STRING + api.protocol: + valueType: STRING + request.auth.principal: + valueType: STRING + request.auth.audiences: + valueType: STRING + request.auth.presenter: + valueType: STRING + request.auth.claims: + valueType: STRING_MAP + request.auth.raw_claims: + valueType: STRING + request.api_key: + valueType: STRING + rbac.permissive.response_code: + valueType: STRING + rbac.permissive.effective_policy_id: + valueType: STRING + check.error_code: + valueType: INT64 + check.error_message: + valueType: STRING + check.cache_hit: + valueType: BOOL + quota.cache_hit: + valueType: BOOL + context.proxy_version: + valueType: STRING +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: attributemanifest +metadata: + name: kubernetes + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + attributes: + source.ip: + valueType: IP_ADDRESS + source.labels: + valueType: STRING_MAP + source.metadata: + valueType: STRING_MAP + source.name: + valueType: STRING + source.namespace: + valueType: STRING + source.owner: + valueType: STRING + source.serviceAccount: + valueType: STRING + source.services: + valueType: STRING + source.workload.uid: + valueType: STRING + source.workload.name: + valueType: STRING + source.workload.namespace: + valueType: STRING + destination.ip: + valueType: IP_ADDRESS + destination.labels: + valueType: STRING_MAP + destination.metadata: + valueType: STRING_MAP + destination.owner: + valueType: STRING + destination.name: + valueType: STRING + destination.container.name: + valueType: STRING + destination.namespace: + valueType: STRING + destination.service.uid: + valueType: STRING + destination.service.name: + valueType: STRING + destination.service.namespace: + valueType: STRING + destination.service.host: + valueType: STRING + destination.serviceAccount: + valueType: STRING + destination.workload.uid: + valueType: STRING + destination.workload.name: + valueType: STRING + destination.workload.namespace: + valueType: STRING +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: handler +metadata: + name: kubernetesenv + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + compiledAdapter: kubernetesenv + params: + # when running from mixer root, use the following config after adding a + # symbolic link to a kubernetes config file via: + # + # $ ln -s ~/.kube/config mixer/adapter/kubernetes/kubeconfig + # + # kubeconfig_path: "mixer/adapter/kubernetes/kubeconfig" +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: instance +metadata: + name: attributes + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + compiledTemplate: kubernetes + params: + # Pass the required attribute data to the adapter + source_uid: source.uid | "" + source_ip: source.ip | ip("0.0.0.0") # default to unspecified ip addr + destination_uid: destination.uid | "" + destination_port: destination.port | 0 + attributeBindings: + # Fill the new attributes from the adapter produced output. + # $out refers to an instance of OutputTemplate message + source.ip: $out.source_pod_ip | ip("0.0.0.0") + source.uid: $out.source_pod_uid | "unknown" + source.labels: $out.source_labels | emptyStringMap() + source.name: $out.source_pod_name | "unknown" + source.namespace: $out.source_namespace | "default" + source.owner: $out.source_owner | "unknown" + source.serviceAccount: $out.source_service_account_name | "unknown" + source.workload.uid: $out.source_workload_uid | "unknown" + source.workload.name: $out.source_workload_name | "unknown" + source.workload.namespace: $out.source_workload_namespace | "unknown" + destination.ip: $out.destination_pod_ip | ip("0.0.0.0") + destination.uid: $out.destination_pod_uid | "unknown" + destination.labels: $out.destination_labels | emptyStringMap() + destination.name: $out.destination_pod_name | "unknown" + destination.container.name: $out.destination_container_name | "unknown" + destination.namespace: $out.destination_namespace | "default" + destination.owner: $out.destination_owner | "unknown" + destination.serviceAccount: $out.destination_service_account_name | "unknown" + destination.workload.uid: $out.destination_workload_uid | "unknown" + destination.workload.name: $out.destination_workload_name | "unknown" + destination.workload.namespace: $out.destination_workload_namespace | "unknown" +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: rule +metadata: + name: tcpkubeattrgenrulerule + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + match: context.protocol == "tcp" + actions: + - handler: kubernetesenv + instances: + - attributes +--- +# Source: istio/charts/mixer/templates/config.yaml +apiVersion: "config.istio.io/v1alpha2" +kind: rule +metadata: + name: kubeattrgenrulerule + namespace: istio-system + labels: + app: mixer + chart: mixer + heritage: Helm + release: RELEASE-NAME +spec: + actions: + - handler: kubernetesenv + instances: + - attributes +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: istio-security-post-install-1.4.2 + namespace: istio-system + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +spec: + template: + metadata: + name: istio-security-post-install + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + serviceAccountName: istio-security-post-install-account + containers: + - name: kubectl + image: "docker.io/istio/kubectl:1.4.2" + imagePullPolicy: IfNotPresent + command: [ "/bin/bash", "/tmp/security/run.sh", "/tmp/security/custom-resources.yaml" ] + volumeMounts: + - mountPath: "/tmp/security" + name: tmp-configmap-security + volumes: + - name: tmp-configmap-security + configMap: + name: istio-security-custom-resources + restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-no-mesh.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-no-mesh.yaml new file mode 100644 index 0000000000..25d68eb918 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-ci-no-mesh.yaml @@ -0,0 +1,1736 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio/charts/galley/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-galley-configuration + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +data: + validatingwebhookconfiguration.yaml: |- + apiVersion: admissionregistration.k8s.io/v1beta1 + kind: ValidatingWebhookConfiguration + metadata: + name: istio-galley + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + webhooks: + - name: pilot.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitpilot" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - httpapispecs + - httpapispecbindings + - quotaspecs + - quotaspecbindings + - operations: + - CREATE + - UPDATE + apiGroups: + - rbac.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - security.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - authentication.istio.io + apiVersions: + - "*" + resources: + - "*" + - operations: + - CREATE + - UPDATE + apiGroups: + - networking.istio.io + apiVersions: + - "*" + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + failurePolicy: Fail + sideEffects: None + - name: mixer.validation.istio.io + clientConfig: + service: + name: istio-galley + namespace: istio-system + path: "/admitmixer" + caBundle: "" + rules: + - operations: + - CREATE + - UPDATE + apiGroups: + - config.istio.io + apiVersions: + - v1alpha2 + resources: + - rules + - attributemanifests + - circonuses + - deniers + - fluentds + - kubernetesenvs + - listcheckers + - memquotas + - noops + - opas + - prometheuses + - rbacs + - solarwindses + - stackdrivers + - cloudwatches + - dogstatsds + - statsds + - stdios + - apikeys + - authorizations + - checknothings + # - kuberneteses + - listentries + - logentries + - metrics + - quotas + - reportnothings + - tracespans + - adapters + - handlers + - instances + - templates + - zipkins + failurePolicy: Fail + sideEffects: None +--- +# Source: istio/charts/security/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-security-custom-resources + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +data: + custom-resources.yaml: |- + # Authentication policy to enable permissive mode for all services (that have sidecar) in the mesh. + apiVersion: "authentication.istio.io/v1alpha1" + kind: "MeshPolicy" + metadata: + name: "default" + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + peers: + - mtls: + mode: PERMISSIVE + run.sh: |- + #!/bin/sh + + set -x + + if [ "$#" -ne "1" ]; then + echo "first argument should be path to custom resource yaml" + exit 1 + fi + + pathToResourceYAML=${1} + + kubectl get validatingwebhookconfiguration istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + echo "istio-galley validatingwebhookconfiguration found - waiting for istio-galley deployment to be ready" + while true; do + kubectl -n istio-system get deployment istio-galley 2>/dev/null + if [ "$?" -eq 0 ]; then + break + fi + sleep 1 + done + kubectl -n istio-system rollout status deployment istio-galley + if [ "$?" -ne 0 ]; then + echo "istio-galley deployment rollout status check failed" + exit 1 + fi + echo "istio-galley deployment ready for configuration validation" + fi + sleep 5 + kubectl apply -f ${pathToResourceYAML} +--- +# Source: istio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME +data: + mesh: |- + # Set the following variable to true to disable policy checks by Mixer. + # Note that metrics will still be reported to Mixer. + disablePolicyChecks: true + + disableMixerHttpReports: false + # reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server + reportBatchMaxEntries: 100 + # reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server + reportBatchMaxTime: 1s + + # Set enableTracing to false to disable request tracing. + enableTracing: true + + # Set accessLogFile to empty string to disable access log. + accessLogFile: "/dev/stdout" + + # If accessLogEncoding is TEXT, value will be used directly as the log format + # example: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n" + # If AccessLogEncoding is JSON, value will be parsed as map[string]string + # example: '{"start_time": "%START_TIME%", "req_method": "%REQ(:METHOD)%"}' + # Leave empty to use default log format + accessLogFormat: "" + + # Set accessLogEncoding to JSON or TEXT to configure sidecar access log + accessLogEncoding: 'JSON' + + enableEnvoyAccessLogService: false + # Let Pilot give ingresses the public IP of the Istio ingressgateway + ingressService: istio-ingressgateway + + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + + # Automatic protocol detection uses a set of heuristics to + # determine whether the connection is using TLS or not (on the + # server side), as well as the application protocol being used + # (e.g., http vs tcp). These heuristics rely on the client sending + # the first bits of data. For server first protocols like MySQL, + # MongoDB, etc., Envoy will timeout on the protocol detection after + # the specified period, defaulting to non mTLS plain TCP + # traffic. Set this field to tweak the period that Envoy will wait + # for the client to send the first bits of data. (MUST BE >=1ms) + protocolDetectionTimeout: 100ms + + # DNS refresh rate for Envoy clusters of type STRICT_DNS + dnsRefreshRate: 300s + + # Unix Domain Socket through which envoy communicates with NodeAgent SDS to get + # key/cert for mTLS. Use secret-mount files instead of SDS if set to empty. + sdsUdsPath: "" + + # The trust domain corresponds to the trust root of a system. + # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain + trustDomain: "" + + # The trust domain aliases represent the aliases of trust_domain. + # For example, if we have + # trustDomain: td1 + # trustDomainAliases: [“td2”, "td3"] + # Any service with the identity "td1/ns/foo/sa/a-service-account", "td2/ns/foo/sa/a-service-account", + # or "td3/ns/foo/sa/a-service-account" will be treated the same in the Istio mesh. + trustDomainAliases: + + # If true, automatically configure client side mTLS settings to match the corresponding service's + # server side mTLS authentication policy, when destination rule for that service does not specify + # TLS settings. + enableAutoMtls: false + + # Set the default behavior of the sidecar for handling outbound traffic from the application: + # ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no + # services or ServiceEntries for the destination port + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio + # configuration. + rootNamespace: istio-system + + # Configures DNS certificates provisioned through Chiron linked into Pilot. + certificates: + [] + + defaultConfig: + # + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file + connectTimeout: 10s + # + ### ADVANCED SETTINGS ############# + # Where should envoy's configuration be stored in the istio-proxy container + configPath: "/etc/istio/proxy" + binaryPath: "/usr/local/bin/envoy" + # The pseudo service name used for Envoy. + serviceCluster: istio-proxy + # These settings that determine how long an old Envoy + # process should be kept alive after an occasional reload. + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # + # Port where Envoy listens (on local host) for admin commands + # You can exec into the istio-proxy container in a pod and + # curl the admin port (curl http://localhost:15000/) to obtain + # diagnostic information from Envoy. See + # https://lyft.github.io/envoy/docs/operations/admin.html + # for more details + proxyAdminPort: 15000 + # + # Set concurrency to a specific number to control the number of Proxy worker threads. + # If set to 0 (default), then start worker thread for each CPU thread/core. + concurrency: 2 + # + tracing: + zipkin: + # Address of the Zipkin collector + address: zipkin.istio-system:9411 + # + # Mutual TLS authentication between sidecars and istio control plane. + controlPlaneAuthPolicy: NONE + # + # Address where istio Pilot service is running + discoveryAddress: istio-pilot.istio-system:15010 + + # Configuration file for the mesh networks to be used by the Split Horizon EDS. + meshNetworks: |- + networks: {} +--- +# Source: istio/charts/galley/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-galley-service-account + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-ingressgateway-service-account + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/pilot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-pilot-service-account + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-security-post-install-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/security/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-citadel-service-account + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/galley/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-galley-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +rules: + # For reading Istio resources +- apiGroups: [ + "authentication.istio.io", + "config.istio.io", + "networking.istio.io", + "rbac.istio.io", + "security.istio.io"] + resources: ["*"] + verbs: ["get", "list", "watch"] + # For updating Istio resource statuses +- apiGroups: [ + "authentication.istio.io", + "config.istio.io", + "networking.istio.io", + "rbac.istio.io", + "security.istio.io"] + resources: ["*/status"] + verbs: ["update"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["*"] +- apiGroups: ["extensions","apps"] + resources: ["deployments"] + resourceNames: ["istio-galley"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods", "nodes", "services", "endpoints", "namespaces"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["extensions"] + resources: ["deployments/finalizers"] + resourceNames: ["istio-galley"] + verbs: ["update"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/pilot/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["security.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["ingresses", "ingresses/status"] + verbs: ["*"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["endpoints", "pods", "services", "namespaces", "nodes", "secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: ["certificates.k8s.io"] + resources: + - "certificatesigningrequests" + - "certificatesigningrequests/approval" + - "certificatesigningrequests/status" + verbs: ["update", "create", "get", "delete"] +--- +# Source: istio/charts/security/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "update"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: [""] + resources: ["serviceaccounts", "services", "namespaces"] + verbs: ["get", "watch", "list"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-security-post-install-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["authentication.istio.io"] # needed to create default authn policy + resources: ["*"] + verbs: ["*"] +- apiGroups: ["networking.istio.io"] # needed to create security destination rules + resources: ["*"] + verbs: ["*"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/galley/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-galley-admin-role-binding-istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-galley-istio-system +subjects: + - kind: ServiceAccount + name: istio-galley-service-account + namespace: istio-system +--- +# Source: istio/charts/pilot/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-pilot-istio-system +subjects: + - kind: ServiceAccount + name: istio-pilot-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-citadel-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-citadel-istio-system +subjects: + - kind: ServiceAccount + name: istio-citadel-service-account + namespace: istio-system +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-security-post-install-role-binding-istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-security-post-install-istio-system +subjects: + - kind: ServiceAccount + name: istio-security-post-install-account + namespace: istio-system +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.4.2 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: istio/charts/gateways/templates/rolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account +--- +# Source: istio/charts/galley/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + ports: + - port: 443 + name: https-validation + - port: 15014 + name: http-monitoring + - port: 9901 + name: grpc-mcp + selector: + istio: galley +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + type: LoadBalancer + selector: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/pilot/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + ports: + - port: 15010 + name: grpc-xds # direct + - port: 15011 + name: https-xds # mTLS + - port: 8080 + name: http-legacy-discovery # direct + - port: 15014 + name: http-monitoring + selector: + istio: pilot +--- +# Source: istio/charts/security/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + # we use the normal name here (e.g. 'prometheus') + # as grafana is configured to use this as a data source + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + ports: + - name: grpc-citadel + port: 8060 + targetPort: 8060 + protocol: TCP + - name: http-monitoring + port: 15014 + selector: + istio: citadel +--- +# Source: istio/charts/galley/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-galley + namespace: istio-system + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley +spec: + replicas: 1 + selector: + matchLabels: + istio: galley + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: galley + chart: galley + heritage: Helm + release: RELEASE-NAME + istio: galley + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-galley-service-account + containers: + - name: galley + image: "docker.io/istio/galley:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 443 + - containerPort: 15014 + - containerPort: 9901 + command: + - /usr/local/bin/galley + - server + - --meshConfigFile=/etc/mesh-config/mesh + - --livenessProbeInterval=1s + - --livenessProbePath=/healthliveness + - --readinessProbePath=/healthready + - --readinessProbeInterval=1s + - --deployment-namespace=istio-system + - --insecure=true + - --enable-server=false + - --enable-reconcileWebhookConfiguration=true + - --validation-webhook-config-file + - /etc/config/validatingwebhookconfiguration.yaml + - --monitoringPort=15014 + - --log_output_level=default:info + volumeMounts: + - name: certs + mountPath: /etc/certs + readOnly: true + - name: config + mountPath: /etc/config + readOnly: true + - name: mesh-config + mountPath: /etc/mesh-config + readOnly: true + livenessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthliveness + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + exec: + command: + - /usr/local/bin/galley + - probe + - --probe-path=/healthready + - --interval=10s + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + requests: + cpu: 10m + volumes: + - name: certs + secret: + secretName: istio.istio-galley-service-account + - name: config + configMap: + name: istio-galley-configuration + - name: mesh-config + configMap: + name: istio + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME +spec: + replicas: 2 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 1000m + memory: 1024Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"cluster-local-gateway","chart":"gateways","heritage":"Helm","istio":"cluster-local-gateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + istio: ingressgateway + release: RELEASE-NAME +spec: + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + istio: ingressgateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: ingress-sds + image: "docker.io/istio/node-agent-k8s:1.4.2" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: "ENABLE_WORKLOAD_SDS" + value: "false" + - name: "ENABLE_INGRESS_GATEWAY_SDS" + value: "true" + - name: "INGRESS_GATEWAY_NAMESPACE" + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - istio-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"istio-ingressgateway","chart":"gateways","heritage":"Helm","istio":"ingressgateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_USER_SDS + value: "true" + - name: ISTIO_META_ROUTER_MODE + value: sni-dnat + + + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: ingressgatewaysdsudspath + emptyDir: {} + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/pilot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-pilot + namespace: istio-system + # TODO: default template doesn't have this, which one is right ? + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: pilot + template: + metadata: + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-pilot-service-account + containers: + - name: discovery + image: "docker.io/istio/pilot:1.4.2" + imagePullPolicy: IfNotPresent + args: + - "discovery" + - --monitoringAddr=:15014 + - --log_output_level=default:info + - --domain + - cluster.local + - --secureGrpcAddr + - "" + - --keepaliveMaxServerConnectionAge + - "30m" + ports: + - containerPort: 8080 + - containerPort: 15010 + - containerPort: 15011 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: PILOT_PUSH_THROTTLE + value: "100" + - name: PILOT_TRACE_SAMPLING + value: "100" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND + value: "true" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND + value: "false" + resources: + requests: + cpu: 1000m + memory: 1024Mi + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + - name: istio-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: config-volume + configMap: + name: istio + - name: istio-certs + secret: + secretName: istio.istio-pilot-service-account + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/security/templates/deployment.yaml +# istio CA watching all namespaces +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-citadel + namespace: istio-system + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel +spec: + replicas: 1 + selector: + matchLabels: + istio: citadel + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + istio: citadel + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-citadel-service-account + containers: + - name: citadel + image: "docker.io/istio/citadel:1.4.2" + imagePullPolicy: IfNotPresent + args: + - --append-dns-names=true + - --grpc-port=8060 + - --citadel-storage-namespace=istio-system + - --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system + - --monitoring-port=15014 + - --self-signed-ca=true + - --workload-cert-ttl=2160h + env: + - name: CITADEL_ENABLE_NAMESPACES_BY_DEFAULT + value: "true" + resources: + requests: + cpu: 10m + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + maxReplicas: 5 + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/pilot/templates/autoscale.yaml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +spec: + maxReplicas: 5 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: istio-pilot + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80 +--- +# Source: istio/charts/security/templates/create-custom-resources-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: istio-security-post-install-1.4.2 + namespace: istio-system + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME +spec: + template: + metadata: + name: istio-security-post-install + labels: + app: security + chart: security + heritage: Helm + release: RELEASE-NAME + spec: + serviceAccountName: istio-security-post-install-account + containers: + - name: kubectl + image: "docker.io/istio/kubectl:1.4.2" + imagePullPolicy: IfNotPresent + command: [ "/bin/bash", "/tmp/security/run.sh", "/tmp/security/custom-resources.yaml" ] + volumeMounts: + - mountPath: "/tmp/security" + name: tmp-configmap-security + volumes: + - name: tmp-configmap-security + configMap: + name: istio-security-custom-resources + restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-crds.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-crds.yaml new file mode 100644 index 0000000000..0327d27d36 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-crds.yaml @@ -0,0 +1,5255 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio-init/templates/configmap-crd-10.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: istio-system + name: istio-crd-10 +data: + crd-10.yaml: |- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: mixer + chart: istio + heritage: Tiller + istio: core + package: istio.io.mixer + release: istio + name: attributemanifests.config.istio.io + spec: + group: config.istio.io + names: + categories: + - istio-io + - policy-istio-io + kind: attributemanifest + plural: attributemanifests + singular: attributemanifest + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Describes the rules used to configure Mixer''s policy and + telemetry features. See more details at: https://istio.io/docs/reference/config/policy-and-telemetry/istio.policy.v1beta1.html' + properties: + attributes: + additionalProperties: + properties: + description: + description: A human-readable description of the attribute's purpose. + format: string + type: string + valueType: + description: The type of data carried by this attribute. + enum: + - VALUE_TYPE_UNSPECIFIED + - STRING + - INT64 + - DOUBLE + - BOOL + - TIMESTAMP + - IP_ADDRESS + - EMAIL_ADDRESS + - URI + - DNS_NAME + - DURATION + - STRING_MAP + type: string + type: object + description: The set of attributes this Istio component will be responsible + for producing at runtime. + type: object + name: + description: Name of the component producing these attributes. + format: string + type: string + revision: + description: The revision of this document. + format: string + type: string + type: object + type: object + versions: + - name: v1alpha2 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + heritage: Tiller + istio: rbac + release: istio + name: clusterrbacconfigs.rbac.istio.io + spec: + group: rbac.istio.io + names: + categories: + - istio-io + - rbac-istio-io + kind: ClusterRbacConfig + plural: clusterrbacconfigs + singular: clusterrbacconfig + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration for Role Based Access Control. See more details + at: https://istio.io/docs/reference/config/authorization/istio.rbac.v1alpha1.html' + properties: + enforcementMode: + enum: + - ENFORCED + - PERMISSIVE + type: string + exclusion: + description: A list of services or namespaces that should not be enforced + by Istio RBAC policies. + properties: + namespaces: + description: A list of namespaces. + items: + format: string + type: string + type: array + services: + description: A list of services. + items: + format: string + type: string + type: array + type: object + inclusion: + description: A list of services or namespaces that should be enforced + by Istio RBAC policies. + properties: + namespaces: + description: A list of namespaces. + items: + format: string + type: string + type: array + services: + description: A list of services. + items: + format: string + type: string + type: array + type: object + mode: + description: Istio RBAC mode. + enum: + - "OFF" + - "ON" + - ON_WITH_INCLUSION + - ON_WITH_EXCLUSION + type: string + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: destinationrules.networking.istio.io + spec: + additionalPrinterColumns: + - JSONPath: .spec.host + description: The name of a service from the service registry + name: Host + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: DestinationRule + listKind: DestinationRuleList + plural: destinationrules + shortNames: + - dr + singular: destinationrule + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting load balancing, outlier detection, + etc. See more details at: https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule.html' + properties: + exportTo: + description: A list of namespaces to which this destination rule is + exported. + items: + format: string + type: string + type: array + host: + description: The name of a service from the service registry. + format: string + type: string + subsets: + items: + properties: + labels: + additionalProperties: + format: string + type: string + type: object + name: + description: Name of the subset. + format: string + type: string + trafficPolicy: + description: Traffic policies that apply to this subset. + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests + to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the socket + to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + format: string + type: string + path: + description: Path to set for the cookie. + format: string + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + format: string + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutiveErrors: + format: int32 + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + type: object + portLevelSettings: + description: Traffic policies specific to individual ports. + items: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP + requests to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a + backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per + connection to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP + upstream connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on + the socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer + algorithms. + oneOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + format: string + type: string + path: + description: Path to set for the cookie. + format: string + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + format: string + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutiveErrors: + format: int32 + type: integer + interval: + description: Time interval between ejection sweep + analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + type: object + port: + properties: + number: + type: integer + type: object + tls: + description: TLS related settings for connections to + the upstream service. + properties: + caCertificates: + format: string + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + sni: + description: SNI string to present to the server + during TLS handshake. + format: string + type: string + subjectAltNames: + items: + format: string + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + format: string + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + format: string + type: string + subjectAltNames: + items: + format: string + type: string + type: array + type: object + type: object + type: object + type: array + trafficPolicy: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should be upgraded + to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests to + a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection pool + connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection to + a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections to + a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the socket + to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + format: string + type: string + path: + description: Path to set for the cookie. + format: string + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + format: string + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutiveErrors: + format: int32 + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + type: object + portLevelSettings: + description: Traffic policies specific to individual ports. + items: + properties: + connectionPool: + properties: + http: + description: HTTP connection pool settings. + properties: + h2UpgradePolicy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + enum: + - DEFAULT + - DO_NOT_UPGRADE + - UPGRADE + type: string + http1MaxPendingRequests: + description: Maximum number of pending HTTP requests + to a destination. + format: int32 + type: integer + http2MaxRequests: + description: Maximum number of requests to a backend. + format: int32 + type: integer + idleTimeout: + description: The idle timeout for upstream connection + pool connections. + type: string + maxRequestsPerConnection: + description: Maximum number of requests per connection + to a backend. + format: int32 + type: integer + maxRetries: + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connectTimeout: + description: TCP connection timeout. + type: string + maxConnections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. + format: int32 + type: integer + tcpKeepalive: + description: If set then set SO_KEEPALIVE on the socket + to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. + type: string + probes: + type: integer + time: + type: string + type: object + type: object + type: object + loadBalancer: + description: Settings controlling the load balancer algorithms. + oneOf: + - required: + - simple + - properties: + consistentHash: + oneOf: + - required: + - httpHeaderName + - required: + - httpCookie + - required: + - useSourceIp + required: + - consistentHash + properties: + consistentHash: + properties: + httpCookie: + description: Hash based on HTTP cookie. + properties: + name: + description: Name of the cookie. + format: string + type: string + path: + description: Path to set for the cookie. + format: string + type: string + ttl: + description: Lifetime of the cookie. + type: string + type: object + httpHeaderName: + description: Hash based on a specific HTTP header. + format: string + type: string + minimumRingSize: + type: integer + useSourceIp: + description: Hash based on the source IP address. + type: boolean + type: object + simple: + enum: + - ROUND_ROBIN + - LEAST_CONN + - RANDOM + - PASSTHROUGH + type: string + type: object + outlierDetection: + properties: + baseEjectionTime: + description: Minimum ejection duration. + type: string + consecutiveErrors: + format: int32 + type: integer + interval: + description: Time interval between ejection sweep analysis. + type: string + maxEjectionPercent: + format: int32 + type: integer + minHealthPercent: + format: int32 + type: integer + type: object + port: + properties: + number: + type: integer + type: object + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + format: string + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + format: string + type: string + subjectAltNames: + items: + format: string + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections to the upstream + service. + properties: + caCertificates: + format: string + type: string + clientCertificate: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + mode: + enum: + - DISABLE + - SIMPLE + - MUTUAL + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + sni: + description: SNI string to present to the server during TLS + handshake. + format: string + type: string + subjectAltNames: + items: + format: string + type: string + type: array + type: object + type: object + type: object + type: object + versions: + - name: v1alpha3 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: envoyfilters.networking.istio.io + spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: EnvoyFilter + plural: envoyfilters + singular: envoyfilter + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Customizing Envoy configuration generated by Istio. See more + details at: https://istio.io/docs/reference/config/networking/v1alpha3/envoy-filter.html' + properties: + configPatches: + description: One or more patches with match conditions. + items: + properties: + applyTo: + enum: + - INVALID + - LISTENER + - FILTER_CHAIN + - NETWORK_FILTER + - HTTP_FILTER + - ROUTE_CONFIGURATION + - VIRTUAL_HOST + - HTTP_ROUTE + - CLUSTER + type: string + match: + description: Match on listener/route configuration/cluster. + oneOf: + - required: + - listener + - required: + - routeConfiguration + - required: + - cluster + properties: + cluster: + description: Match on envoy cluster attributes. + properties: + name: + description: The exact name of the cluster to match. + format: string + type: string + portNumber: + description: The service port for which this cluster was + generated. + type: integer + service: + description: The fully qualified service name for this + cluster. + format: string + type: string + subset: + description: The subset associated with the service. + format: string + type: string + type: object + context: + description: The specific config generation context to match + on. + enum: + - ANY + - SIDECAR_INBOUND + - SIDECAR_OUTBOUND + - GATEWAY + type: string + listener: + description: Match on envoy listener attributes. + properties: + filterChain: + description: Match a specific filter chain in a listener. + properties: + applicationProtocols: + description: Applies only to sidecars. + format: string + type: string + filter: + description: The name of a specific filter to apply + the patch to. + properties: + name: + description: The filter name to match on. + format: string + type: string + subFilter: + properties: + name: + description: The filter name to match on. + format: string + type: string + type: object + type: object + name: + description: The name assigned to the filter chain. + format: string + type: string + sni: + description: The SNI value used by a filter chain's + match condition. + format: string + type: string + transportProtocol: + description: Applies only to SIDECAR_INBOUND context. + format: string + type: string + type: object + name: + description: Match a specific listener by its name. + format: string + type: string + portName: + format: string + type: string + portNumber: + type: integer + type: object + proxy: + description: Match on properties associated with a proxy. + properties: + metadata: + additionalProperties: + format: string + type: string + type: object + proxyVersion: + format: string + type: string + type: object + routeConfiguration: + description: Match on envoy HTTP route configuration attributes. + properties: + gateway: + format: string + type: string + name: + description: Route configuration name to match on. + format: string + type: string + portName: + description: Applicable only for GATEWAY context. + format: string + type: string + portNumber: + type: integer + vhost: + properties: + name: + format: string + type: string + route: + description: Match a specific route within the virtual + host. + properties: + action: + description: Match a route with specific action + type. + enum: + - ANY + - ROUTE + - REDIRECT + - DIRECT_RESPONSE + type: string + name: + format: string + type: string + type: object + type: object + type: object + type: object + patch: + description: The patch to apply along with the operation. + properties: + operation: + description: Determines how the patch should be applied. + enum: + - INVALID + - MERGE + - ADD + - REMOVE + - INSERT_BEFORE + - INSERT_AFTER + type: string + value: + description: The JSON config of the object being patched. + type: object + type: object + type: object + type: array + filters: + items: + properties: + filterConfig: + type: object + filterName: + description: The name of the filter to instantiate. + format: string + type: string + filterType: + description: The type of filter to instantiate. + enum: + - INVALID + - HTTP + - NETWORK + type: string + insertPosition: + description: Insert position in the filter chain. + properties: + index: + description: Position of this filter in the filter chain. + enum: + - FIRST + - LAST + - BEFORE + - AFTER + type: string + relativeTo: + format: string + type: string + type: object + listenerMatch: + properties: + address: + description: One or more IP addresses to which the listener + is bound. + items: + format: string + type: string + type: array + listenerProtocol: + description: Selects a class of listeners for the same protocol. + enum: + - ALL + - HTTP + - TCP + type: string + listenerType: + description: Inbound vs outbound sidecar listener or gateway + listener. + enum: + - ANY + - SIDECAR_INBOUND + - SIDECAR_OUTBOUND + - GATEWAY + type: string + portNamePrefix: + format: string + type: string + portNumber: + type: integer + type: object + type: object + type: array + workloadLabels: + additionalProperties: + format: string + type: string + description: Deprecated. + type: object + workloadSelector: + properties: + labels: + additionalProperties: + format: string + type: string + type: object + type: object + type: object + type: object + versions: + - name: v1alpha3 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: gateways.networking.istio.io + spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: Gateway + plural: gateways + shortNames: + - gw + singular: gateway + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting edge load balancer. See more details + at: https://istio.io/docs/reference/config/networking/v1alpha3/gateway.html' + properties: + selector: + additionalProperties: + format: string + type: string + type: object + servers: + description: A list of server specifications. + items: + properties: + bind: + format: string + type: string + defaultEndpoint: + format: string + type: string + hosts: + description: One or more hosts exposed by this gateway. + items: + format: string + type: string + type: array + port: + properties: + name: + description: Label assigned to the port. + format: string + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + format: string + type: string + type: object + tls: + description: Set of TLS related options that govern the server's + behavior. + properties: + caCertificates: + description: REQUIRED if mode is `MUTUAL`. + format: string + type: string + cipherSuites: + description: 'Optional: If specified, only support the specified + cipher list.' + items: + format: string + type: string + type: array + credentialName: + format: string + type: string + httpsRedirect: + type: boolean + maxProtocolVersion: + description: 'Optional: Maximum TLS protocol version.' + enum: + - TLS_AUTO + - TLSV1_0 + - TLSV1_1 + - TLSV1_2 + - TLSV1_3 + type: string + minProtocolVersion: + description: 'Optional: Minimum TLS protocol version.' + enum: + - TLS_AUTO + - TLSV1_0 + - TLSV1_1 + - TLSV1_2 + - TLSV1_3 + type: string + mode: + enum: + - PASSTHROUGH + - SIMPLE + - MUTUAL + - AUTO_PASSTHROUGH + - ISTIO_MUTUAL + type: string + privateKey: + description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. + format: string + type: string + serverCertificate: + description: REQUIRED if mode is `SIMPLE` or `MUTUAL`. + format: string + type: string + subjectAltNames: + items: + format: string + type: string + type: array + verifyCertificateHash: + items: + format: string + type: string + type: array + verifyCertificateSpki: + items: + format: string + type: string + type: array + type: object + type: object + type: array + type: object + type: object + versions: + - name: v1alpha3 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + name: httpapispecbindings.config.istio.io + spec: + group: config.istio.io + names: + categories: + - istio-io + - apim-istio-io + kind: HTTPAPISpecBinding + plural: httpapispecbindings + singular: httpapispecbinding + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + api_specs: + items: + properties: + name: + description: The short name of the HTTPAPISpec. + format: string + type: string + namespace: + description: Optional namespace of the HTTPAPISpec. + format: string + type: string + type: object + type: array + apiSpecs: + items: + properties: + name: + description: The short name of the HTTPAPISpec. + format: string + type: string + namespace: + description: Optional namespace of the HTTPAPISpec. + format: string + type: string + type: object + type: array + services: + description: One or more services to map the listed HTTPAPISpec onto. + items: + properties: + domain: + description: Domain suffix used to construct the service FQDN + in implementations that support such specification. + format: string + type: string + labels: + additionalProperties: + format: string + type: string + description: Optional one or more labels that uniquely identify + the service version. + type: object + name: + description: The short name of the service such as "foo". + format: string + type: string + namespace: + description: Optional namespace of the service. + format: string + type: string + service: + description: The service FQDN. + format: string + type: string + type: object + type: array + type: object + type: object + versions: + - name: v1alpha2 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + name: httpapispecs.config.istio.io + spec: + group: config.istio.io + names: + categories: + - istio-io + - apim-istio-io + kind: HTTPAPISpec + plural: httpapispecs + singular: httpapispec + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + api_keys: + items: + oneOf: + - required: + - query + - required: + - header + - required: + - cookie + properties: + cookie: + format: string + type: string + header: + description: API key is sent in a request header. + format: string + type: string + query: + description: API Key is sent as a query parameter. + format: string + type: string + type: object + type: array + apiKeys: + items: + oneOf: + - required: + - query + - required: + - header + - required: + - cookie + properties: + cookie: + format: string + type: string + header: + description: API key is sent in a request header. + format: string + type: string + query: + description: API Key is sent as a query parameter. + format: string + type: string + type: object + type: array + attributes: + properties: + attributes: + additionalProperties: + oneOf: + - required: + - stringValue + - required: + - int64Value + - required: + - doubleValue + - required: + - boolValue + - required: + - bytesValue + - required: + - timestampValue + - required: + - durationValue + - required: + - stringMapValue + properties: + boolValue: + type: boolean + bytesValue: + format: binary + type: string + doubleValue: + format: double + type: number + durationValue: + type: string + int64Value: + format: int64 + type: integer + stringMapValue: + properties: + entries: + additionalProperties: + format: string + type: string + description: Holds a set of name/value pairs. + type: object + type: object + stringValue: + format: string + type: string + timestampValue: + format: dateTime + type: string + type: object + description: A map of attribute name to its value. + type: object + type: object + patterns: + description: List of HTTP patterns to match. + items: + oneOf: + - required: + - uriTemplate + - required: + - regex + properties: + attributes: + properties: + attributes: + additionalProperties: + oneOf: + - required: + - stringValue + - required: + - int64Value + - required: + - doubleValue + - required: + - boolValue + - required: + - bytesValue + - required: + - timestampValue + - required: + - durationValue + - required: + - stringMapValue + properties: + boolValue: + type: boolean + bytesValue: + format: binary + type: string + doubleValue: + format: double + type: number + durationValue: + type: string + int64Value: + format: int64 + type: integer + stringMapValue: + properties: + entries: + additionalProperties: + format: string + type: string + description: Holds a set of name/value pairs. + type: object + type: object + stringValue: + format: string + type: string + timestampValue: + format: dateTime + type: string + type: object + description: A map of attribute name to its value. + type: object + type: object + httpMethod: + format: string + type: string + regex: + format: string + type: string + uriTemplate: + format: string + type: string + type: object + type: array + type: object + type: object + versions: + - name: v1alpha2 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-citadel + chart: istio + heritage: Tiller + release: istio + name: meshpolicies.authentication.istio.io + spec: + group: authentication.istio.io + names: + categories: + - istio-io + - authentication-istio-io + kind: MeshPolicy + listKind: MeshPolicyList + plural: meshpolicies + singular: meshpolicy + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Authentication policy for Istio services. See more details + at: https://istio.io/docs/reference/config/istio.authentication.v1alpha1.html' + properties: + originIsOptional: + type: boolean + origins: + description: List of authentication methods that can be used for origin + authentication. + items: + properties: + jwt: + description: Jwt params for the method. + properties: + audiences: + items: + format: string + type: string + type: array + issuer: + description: Identifies the issuer that issued the JWT. + format: string + type: string + jwks: + description: JSON Web Key Set of public keys to validate signature + of the JWT. + format: string + type: string + jwks_uri: + format: string + type: string + jwksUri: + format: string + type: string + jwt_headers: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtHeaders: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtParams: + description: JWT is sent in a query parameter. + items: + format: string + type: string + type: array + trigger_rules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + triggerRules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + type: object + type: object + type: array + peerIsOptional: + type: boolean + peers: + description: List of authentication methods that can be used for peer + authentication. + items: + oneOf: + - required: + - mtls + - required: + - jwt + properties: + jwt: + properties: + audiences: + items: + format: string + type: string + type: array + issuer: + description: Identifies the issuer that issued the JWT. + format: string + type: string + jwks: + description: JSON Web Key Set of public keys to validate signature + of the JWT. + format: string + type: string + jwks_uri: + format: string + type: string + jwksUri: + format: string + type: string + jwt_headers: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtHeaders: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtParams: + description: JWT is sent in a query parameter. + items: + format: string + type: string + type: array + trigger_rules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + triggerRules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + type: object + mtls: + description: Set if mTLS is used. + properties: + allowTls: + description: WILL BE DEPRECATED, if set, will translates to + `TLS_PERMISSIVE` mode. + type: boolean + mode: + description: Defines the mode of mTLS authentication. + enum: + - STRICT + - PERMISSIVE + type: string + type: object + type: object + type: array + principalBinding: + description: Define whether peer or origin identity should be use for + principal. + enum: + - USE_PEER + - USE_ORIGIN + type: string + targets: + description: List rules to select workloads that the policy should be + applied on. + items: + properties: + labels: + additionalProperties: + format: string + type: string + type: object + name: + description: The name must be a short name from the service registry. + format: string + type: string + ports: + description: Specifies the ports. + items: + oneOf: + - required: + - number + - required: + - name + properties: + name: + format: string + type: string + number: + type: integer + type: object + type: array + type: object + type: array + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-citadel + chart: istio + heritage: Tiller + release: istio + name: policies.authentication.istio.io + spec: + group: authentication.istio.io + names: + categories: + - istio-io + - authentication-istio-io + kind: Policy + plural: policies + singular: policy + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Authentication policy for Istio services. See more details + at: https://istio.io/docs/reference/config/istio.authentication.v1alpha1.html' + properties: + originIsOptional: + type: boolean + origins: + description: List of authentication methods that can be used for origin + authentication. + items: + properties: + jwt: + description: Jwt params for the method. + properties: + audiences: + items: + format: string + type: string + type: array + issuer: + description: Identifies the issuer that issued the JWT. + format: string + type: string + jwks: + description: JSON Web Key Set of public keys to validate signature + of the JWT. + format: string + type: string + jwks_uri: + format: string + type: string + jwksUri: + format: string + type: string + jwt_headers: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtHeaders: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtParams: + description: JWT is sent in a query parameter. + items: + format: string + type: string + type: array + trigger_rules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + triggerRules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + type: object + type: object + type: array + peerIsOptional: + type: boolean + peers: + description: List of authentication methods that can be used for peer + authentication. + items: + oneOf: + - required: + - mtls + - required: + - jwt + properties: + jwt: + properties: + audiences: + items: + format: string + type: string + type: array + issuer: + description: Identifies the issuer that issued the JWT. + format: string + type: string + jwks: + description: JSON Web Key Set of public keys to validate signature + of the JWT. + format: string + type: string + jwks_uri: + format: string + type: string + jwksUri: + format: string + type: string + jwt_headers: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtHeaders: + description: JWT is sent in a request header. + items: + format: string + type: string + type: array + jwtParams: + description: JWT is sent in a query parameter. + items: + format: string + type: string + type: array + trigger_rules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + triggerRules: + items: + properties: + excluded_paths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + excludedPaths: + description: List of paths to be excluded from the request. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + included_paths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + includedPaths: + description: List of paths that the request must include. + items: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - suffix + - required: + - regex + properties: + exact: + description: exact string match. + format: string + type: string + prefix: + description: prefix-based match. + format: string + type: string + regex: + description: ECMAscript style regex-based match + as defined by [EDCA-262](http://en.cppreference.com/w/cpp/regex/ecmascript). + format: string + type: string + suffix: + description: suffix-based match. + format: string + type: string + type: object + type: array + type: object + type: array + type: object + mtls: + description: Set if mTLS is used. + properties: + allowTls: + description: WILL BE DEPRECATED, if set, will translates to + `TLS_PERMISSIVE` mode. + type: boolean + mode: + description: Defines the mode of mTLS authentication. + enum: + - STRICT + - PERMISSIVE + type: string + type: object + type: object + type: array + principalBinding: + description: Define whether peer or origin identity should be use for + principal. + enum: + - USE_PEER + - USE_ORIGIN + type: string + targets: + description: List rules to select workloads that the policy should be + applied on. + items: + properties: + labels: + additionalProperties: + format: string + type: string + type: object + name: + description: The name must be a short name from the service registry. + format: string + type: string + ports: + description: Specifies the ports. + items: + oneOf: + - required: + - number + - required: + - name + properties: + name: + format: string + type: string + number: + type: integer + type: object + type: array + type: object + type: array + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + name: quotaspecbindings.config.istio.io + spec: + group: config.istio.io + names: + categories: + - istio-io + - apim-istio-io + kind: QuotaSpecBinding + plural: quotaspecbindings + singular: quotaspecbinding + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + quotaSpecs: + items: + properties: + name: + description: The short name of the QuotaSpec. + format: string + type: string + namespace: + description: Optional namespace of the QuotaSpec. + format: string + type: string + type: object + type: array + services: + description: One or more services to map the listed QuotaSpec onto. + items: + properties: + domain: + description: Domain suffix used to construct the service FQDN + in implementations that support such specification. + format: string + type: string + labels: + additionalProperties: + format: string + type: string + description: Optional one or more labels that uniquely identify + the service version. + type: object + name: + description: The short name of the service such as "foo". + format: string + type: string + namespace: + description: Optional namespace of the service. + format: string + type: string + service: + description: The service FQDN. + format: string + type: string + type: object + type: array + type: object + type: object + versions: + - name: v1alpha2 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-mixer + chart: istio + heritage: Tiller + release: istio + name: quotaspecs.config.istio.io + spec: + group: config.istio.io + names: + categories: + - istio-io + - apim-istio-io + kind: QuotaSpec + plural: quotaspecs + singular: quotaspec + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: Determines the quotas used for individual requests. + properties: + rules: + description: A list of Quota rules. + items: + properties: + match: + description: If empty, match all request. + items: + properties: + clause: + additionalProperties: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + description: Map of attribute names to StringMatch type. + type: object + type: object + type: array + quotas: + description: The list of quotas to charge. + items: + properties: + charge: + format: int32 + type: integer + quota: + format: string + type: string + type: object + type: array + type: object + type: array + type: object + type: object + versions: + - name: v1alpha2 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: mixer + chart: istio + heritage: Tiller + istio: rbac + package: istio.io.mixer + release: istio + name: rbacconfigs.rbac.istio.io + spec: + group: rbac.istio.io + names: + categories: + - istio-io + - rbac-istio-io + kind: RbacConfig + plural: rbacconfigs + singular: rbacconfig + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration for Role Based Access Control. See more details + at: https://istio.io/docs/reference/config/authorization/istio.rbac.v1alpha1.html' + properties: + enforcementMode: + enum: + - ENFORCED + - PERMISSIVE + type: string + exclusion: + description: A list of services or namespaces that should not be enforced + by Istio RBAC policies. + properties: + namespaces: + description: A list of namespaces. + items: + format: string + type: string + type: array + services: + description: A list of services. + items: + format: string + type: string + type: array + type: object + inclusion: + description: A list of services or namespaces that should be enforced + by Istio RBAC policies. + properties: + namespaces: + description: A list of namespaces. + items: + format: string + type: string + type: array + services: + description: A list of services. + items: + format: string + type: string + type: array + type: object + mode: + description: Istio RBAC mode. + enum: + - "OFF" + - "ON" + - ON_WITH_INCLUSION + - ON_WITH_EXCLUSION + type: string + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: mixer + chart: istio + heritage: Tiller + istio: core + package: istio.io.mixer + release: istio + name: rules.config.istio.io + spec: + group: config.istio.io + names: + categories: + - istio-io + - policy-istio-io + kind: rule + plural: rules + singular: rule + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Describes the rules used to configure Mixer''s policy and + telemetry features. See more details at: https://istio.io/docs/reference/config/policy-and-telemetry/istio.policy.v1beta1.html' + properties: + actions: + description: The actions that will be executed when match evaluates + to `true`. + items: + properties: + handler: + description: Fully qualified name of the handler to invoke. + format: string + type: string + instances: + items: + format: string + type: string + type: array + name: + description: A handle to refer to the results of the action. + format: string + type: string + type: object + type: array + match: + description: Match is an attribute based predicate. + format: string + type: string + requestHeaderOperations: + items: + properties: + name: + description: Header name literal value. + format: string + type: string + operation: + description: Header operation type. + enum: + - REPLACE + - REMOVE + - APPEND + type: string + values: + description: Header value expressions. + items: + format: string + type: string + type: array + type: object + type: array + responseHeaderOperations: + items: + properties: + name: + description: Header name literal value. + format: string + type: string + operation: + description: Header operation type. + enum: + - REPLACE + - REMOVE + - APPEND + type: string + values: + description: Header value expressions. + items: + format: string + type: string + type: array + type: object + type: array + sampling: + properties: + random: + description: Provides filtering of actions based on random selection + per request. + properties: + attributeExpression: + description: Specifies an attribute expression to use to override + the numerator in the `percent_sampled` field. + format: string + type: string + percentSampled: + description: The default sampling rate, expressed as a percentage. + properties: + denominator: + description: Specifies the denominator. + enum: + - HUNDRED + - TEN_THOUSAND + type: string + numerator: + description: Specifies the numerator. + type: integer + type: object + useIndependentRandomness: + description: By default sampling will be based on the value + of the request header `x-request-id`. + type: boolean + type: object + rateLimit: + properties: + maxUnsampledEntries: + description: Number of entries to allow during the `sampling_duration` + before sampling is enforced. + format: int64 + type: integer + samplingDuration: + description: Window in which to enforce the sampling rate. + type: string + samplingRate: + description: The rate at which to sample entries once the unsampled + limit has been reached. + format: int64 + type: integer + type: object + type: object + type: object + type: object + versions: + - name: v1alpha2 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: serviceentries.networking.istio.io + spec: + additionalPrinterColumns: + - JSONPath: .spec.hosts + description: The hosts associated with the ServiceEntry + name: Hosts + type: string + - JSONPath: .spec.location + description: Whether the service is external to the mesh or part of the mesh (MESH_EXTERNAL + or MESH_INTERNAL) + name: Location + type: string + - JSONPath: .spec.resolution + description: Service discovery mode for the hosts (NONE, STATIC, or DNS) + name: Resolution + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: ServiceEntry + listKind: ServiceEntryList + plural: serviceentries + shortNames: + - se + singular: serviceentry + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting service registry. See more details + at: https://istio.io/docs/reference/config/networking/v1alpha3/service-entry.html' + properties: + addresses: + description: The virtual IP addresses associated with the service. + items: + format: string + type: string + type: array + endpoints: + description: One or more endpoints associated with the service. + items: + properties: + address: + format: string + type: string + labels: + additionalProperties: + format: string + type: string + description: One or more labels associated with the endpoint. + type: object + locality: + description: The locality associated with the endpoint. + format: string + type: string + network: + format: string + type: string + ports: + additionalProperties: + type: integer + description: Set of ports associated with the endpoint. + type: object + weight: + description: The load balancing weight associated with the endpoint. + type: integer + type: object + type: array + exportTo: + description: A list of namespaces to which this service is exported. + items: + format: string + type: string + type: array + hosts: + description: The hosts associated with the ServiceEntry. + items: + format: string + type: string + type: array + location: + enum: + - MESH_EXTERNAL + - MESH_INTERNAL + type: string + ports: + description: The ports associated with the external service. + items: + properties: + name: + description: Label assigned to the port. + format: string + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + format: string + type: string + type: object + type: array + resolution: + description: Service discovery mode for the hosts. + enum: + - NONE + - STATIC + - DNS + type: string + subjectAltNames: + items: + format: string + type: string + type: array + type: object + type: object + versions: + - name: v1alpha3 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: mixer + chart: istio + heritage: Tiller + istio: rbac + package: istio.io.mixer + release: istio + name: servicerolebindings.rbac.istio.io + spec: + additionalPrinterColumns: + - JSONPath: .spec.roleRef.name + description: The name of the ServiceRole object being referenced + name: Reference + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + group: rbac.istio.io + names: + categories: + - istio-io + - rbac-istio-io + kind: ServiceRoleBinding + plural: servicerolebindings + singular: servicerolebinding + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration for Role Based Access Control. See more details + at: https://istio.io/docs/reference/config/authorization/istio.rbac.v1alpha1.html' + properties: + actions: + items: + properties: + constraints: + description: Optional. + items: + properties: + key: + description: Key of the constraint. + format: string + type: string + values: + description: List of valid values for the constraint. + items: + format: string + type: string + type: array + type: object + type: array + hosts: + items: + format: string + type: string + type: array + methods: + description: Optional. + items: + format: string + type: string + type: array + notHosts: + items: + format: string + type: string + type: array + notMethods: + items: + format: string + type: string + type: array + notPaths: + items: + format: string + type: string + type: array + notPorts: + items: + format: int32 + type: integer + type: array + paths: + description: Optional. + items: + format: string + type: string + type: array + ports: + items: + format: int32 + type: integer + type: array + services: + description: A list of service names. + items: + format: string + type: string + type: array + type: object + type: array + mode: + enum: + - ENFORCED + - PERMISSIVE + type: string + role: + format: string + type: string + roleRef: + description: Reference to the ServiceRole object. + properties: + kind: + description: The type of the role being referenced. + format: string + type: string + name: + description: The name of the ServiceRole object being referenced. + format: string + type: string + type: object + subjects: + description: List of subjects that are assigned the ServiceRole object. + items: + properties: + group: + format: string + type: string + groups: + items: + format: string + type: string + type: array + ips: + items: + format: string + type: string + type: array + names: + items: + format: string + type: string + type: array + namespaces: + items: + format: string + type: string + type: array + notGroups: + items: + format: string + type: string + type: array + notIps: + items: + format: string + type: string + type: array + notNames: + items: + format: string + type: string + type: array + notNamespaces: + items: + format: string + type: string + type: array + properties: + additionalProperties: + format: string + type: string + description: Optional. + type: object + user: + description: Optional. + format: string + type: string + type: object + type: array + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: mixer + chart: istio + heritage: Tiller + istio: rbac + package: istio.io.mixer + release: istio + name: serviceroles.rbac.istio.io + spec: + group: rbac.istio.io + names: + categories: + - istio-io + - rbac-istio-io + kind: ServiceRole + plural: serviceroles + singular: servicerole + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration for Role Based Access Control. See more details + at: https://istio.io/docs/reference/config/authorization/istio.rbac.v1alpha1.html' + properties: + rules: + description: The set of access rules (permissions) that the role has. + items: + properties: + constraints: + description: Optional. + items: + properties: + key: + description: Key of the constraint. + format: string + type: string + values: + description: List of valid values for the constraint. + items: + format: string + type: string + type: array + type: object + type: array + hosts: + items: + format: string + type: string + type: array + methods: + description: Optional. + items: + format: string + type: string + type: array + notHosts: + items: + format: string + type: string + type: array + notMethods: + items: + format: string + type: string + type: array + notPaths: + items: + format: string + type: string + type: array + notPorts: + items: + format: int32 + type: integer + type: array + paths: + description: Optional. + items: + format: string + type: string + type: array + ports: + items: + format: int32 + type: integer + type: array + services: + description: A list of service names. + items: + format: string + type: string + type: array + type: object + type: array + type: object + type: object + versions: + - name: v1alpha1 + served: true + storage: true + + --- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: virtualservices.networking.istio.io + spec: + additionalPrinterColumns: + - JSONPath: .spec.gateways + description: The names of gateways and sidecars that should apply these routes + name: Gateways + type: string + - JSONPath: .spec.hosts + description: The destination hosts to which traffic is being sent + name: Hosts + type: string + - JSONPath: .metadata.creationTimestamp + description: |- + CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + name: Age + type: date + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: VirtualService + listKind: VirtualServiceList + plural: virtualservices + shortNames: + - vs + singular: virtualservice + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting label/content routing, sni routing, + etc. See more details at: https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service.html' + properties: + exportTo: + description: A list of namespaces to which this virtual service is exported. + items: + format: string + type: string + type: array + gateways: + description: The names of gateways and sidecars that should apply these + routes. + items: + format: string + type: string + type: array + hosts: + description: The destination hosts to which traffic is being sent. + items: + format: string + type: string + type: array + http: + description: An ordered list of route rules for HTTP traffic. + items: + properties: + appendHeaders: + additionalProperties: + format: string + type: string + type: object + appendRequestHeaders: + additionalProperties: + format: string + type: string + type: object + appendResponseHeaders: + additionalProperties: + format: string + type: string + type: object + corsPolicy: + description: Cross-Origin Resource Sharing policy (CORS). + properties: + allowCredentials: + nullable: true + type: boolean + allowHeaders: + items: + format: string + type: string + type: array + allowMethods: + description: List of HTTP methods allowed to access the resource. + items: + format: string + type: string + type: array + allowOrigin: + description: The list of origins that are allowed to perform + CORS requests. + items: + format: string + type: string + type: array + exposeHeaders: + items: + format: string + type: string + type: array + maxAge: + type: string + type: object + fault: + description: Fault injection policy to apply on HTTP traffic at + the client side. + properties: + abort: + oneOf: + - properties: + percent: {} + required: + - httpStatus + - properties: + percent: {} + required: + - grpcStatus + - properties: + percent: {} + required: + - http2Error + properties: + grpcStatus: + format: string + type: string + http2Error: + format: string + type: string + httpStatus: + description: HTTP status code to use to abort the Http + request. + format: int32 + type: integer + percent: + description: Percentage of requests to be aborted with + the error code provided (0-100). + format: int32 + type: integer + percentage: + description: Percentage of requests to be aborted with + the error code provided. + properties: + value: + format: double + type: number + type: object + type: object + delay: + oneOf: + - properties: + percent: {} + required: + - fixedDelay + - properties: + percent: {} + required: + - exponentialDelay + properties: + exponentialDelay: + type: string + fixedDelay: + description: Add a fixed delay before forwarding the request. + type: string + percent: + description: Percentage of requests on which the delay + will be injected (0-100). + format: int32 + type: integer + percentage: + description: Percentage of requests on which the delay + will be injected. + properties: + value: + format: double + type: number + type: object + type: object + type: object + headers: + properties: + request: + properties: + add: + additionalProperties: + format: string + type: string + type: object + remove: + items: + format: string + type: string + type: array + set: + additionalProperties: + format: string + type: string + type: object + type: object + response: + properties: + add: + additionalProperties: + format: string + type: string + type: object + remove: + items: + format: string + type: string + type: array + set: + additionalProperties: + format: string + type: string + type: object + type: object + type: object + match: + items: + properties: + authority: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + gateways: + items: + format: string + type: string + type: array + headers: + additionalProperties: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + type: object + ignoreUriCase: + description: Flag to specify whether the URI matching should + be case-insensitive. + type: boolean + method: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + name: + description: The name assigned to a match. + format: string + type: string + port: + description: Specifies the ports on the host that is being + addressed. + type: integer + queryParams: + additionalProperties: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + description: Query parameters for matching. + type: object + scheme: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + sourceLabels: + additionalProperties: + format: string + type: string + type: object + uri: + oneOf: + - required: + - exact + - required: + - prefix + - required: + - regex + properties: + exact: + format: string + type: string + prefix: + format: string + type: string + regex: + format: string + type: string + type: object + type: object + type: array + mirror: + properties: + host: + description: The name of a service from the service registry. + format: string + type: string + port: + description: Specifies the port on the host that is being + addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + format: string + type: string + type: object + mirror_percent: + description: Percentage of the traffic to be mirrored by the `mirror` + field. + nullable: true + type: integer + mirrorPercent: + description: Percentage of the traffic to be mirrored by the `mirror` + field. + nullable: true + type: integer + name: + description: The name assigned to the route for debugging purposes. + format: string + type: string + redirect: + description: A http rule can either redirect or forward (default) + traffic. + properties: + authority: + format: string + type: string + redirectCode: + type: integer + uri: + format: string + type: string + type: object + removeRequestHeaders: + items: + format: string + type: string + type: array + removeResponseHeaders: + items: + format: string + type: string + type: array + retries: + description: Retry policy for HTTP requests. + properties: + attempts: + description: Number of retries for a given request. + format: int32 + type: integer + perTryTimeout: + description: Timeout per retry attempt for a given request. + type: string + retryOn: + description: Specifies the conditions under which retry takes + place. + format: string + type: string + type: object + rewrite: + description: Rewrite HTTP URIs and Authority headers. + properties: + authority: + description: rewrite the Authority/Host header with this value. + format: string + type: string + uri: + format: string + type: string + type: object + route: + description: A http rule can either redirect or forward (default) + traffic. + items: + properties: + appendRequestHeaders: + additionalProperties: + format: string + type: string + description: Use of `append_request_headers` is deprecated. + type: object + appendResponseHeaders: + additionalProperties: + format: string + type: string + description: Use of `append_response_headers` is deprecated. + type: object + destination: + properties: + host: + description: The name of a service from the service + registry. + format: string + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + format: string + type: string + type: object + headers: + properties: + request: + properties: + add: + additionalProperties: + format: string + type: string + type: object + remove: + items: + format: string + type: string + type: array + set: + additionalProperties: + format: string + type: string + type: object + type: object + response: + properties: + add: + additionalProperties: + format: string + type: string + type: object + remove: + items: + format: string + type: string + type: array + set: + additionalProperties: + format: string + type: string + type: object + type: object + type: object + removeRequestHeaders: + description: Use of `remove_request_headers` is deprecated. + items: + format: string + type: string + type: array + removeResponseHeaders: + description: Use of `remove_response_header` is deprecated. + items: + format: string + type: string + type: array + weight: + format: int32 + type: integer + type: object + type: array + timeout: + description: Timeout for HTTP requests. + type: string + websocketUpgrade: + description: Deprecated. + type: boolean + type: object + type: array + tcp: + description: An ordered list of route rules for opaque TCP traffic. + items: + properties: + match: + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination with + optional subnet. + items: + format: string + type: string + type: array + gateways: + description: Names of gateways where the rule should be + applied to. + items: + format: string + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. + type: integer + sourceLabels: + additionalProperties: + format: string + type: string + type: object + sourceSubnet: + description: IPv4 or IPv6 ip address of source with optional + subnet. + format: string + type: string + type: object + type: array + route: + description: The destination to which the connection should be + forwarded to. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + format: string + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + format: string + type: string + type: object + weight: + format: int32 + type: integer + type: object + type: array + type: object + type: array + tls: + items: + properties: + match: + items: + properties: + destinationSubnets: + description: IPv4 or IPv6 ip addresses of destination with + optional subnet. + items: + format: string + type: string + type: array + gateways: + description: Names of gateways where the rule should be + applied to. + items: + format: string + type: string + type: array + port: + description: Specifies the port on the host that is being + addressed. + type: integer + sniHosts: + description: SNI (server name indicator) to match on. + items: + format: string + type: string + type: array + sourceLabels: + additionalProperties: + format: string + type: string + type: object + sourceSubnet: + description: IPv4 or IPv6 ip address of source with optional + subnet. + format: string + type: string + type: object + type: array + route: + description: The destination to which the connection should be + forwarded to. + items: + properties: + destination: + properties: + host: + description: The name of a service from the service + registry. + format: string + type: string + port: + description: Specifies the port on the host that is + being addressed. + properties: + number: + type: integer + type: object + subset: + description: The name of a subset within the service. + format: string + type: string + type: object + weight: + format: int32 + type: integer + type: object + type: array + type: object + type: array + type: object + type: object + versions: + - name: v1alpha3 + served: true + storage: true + + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: adapters.config.istio.io + labels: + app: mixer + package: adapter + istio: mixer-adapter + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: adapter + plural: adapters + singular: adapter + categories: + - istio-io + - policy-istio-io + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha2 + served: true + storage: true + + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: instances.config.istio.io + labels: + app: mixer + package: instance + istio: mixer-instance + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: instance + plural: instances + singular: instance + categories: + - istio-io + - policy-istio-io + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha2 + served: true + storage: true + + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: templates.config.istio.io + labels: + app: mixer + package: template + istio: mixer-template + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: template + plural: templates + singular: template + categories: + - istio-io + - policy-istio-io + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha2 + served: true + storage: true + + --- + kind: CustomResourceDefinition + apiVersion: apiextensions.k8s.io/v1beta1 + metadata: + name: handlers.config.istio.io + labels: + app: mixer + package: handler + istio: mixer-handler + chart: istio + heritage: Tiller + release: istio + annotations: + "helm.sh/resource-policy": keep + spec: + group: config.istio.io + names: + kind: handler + plural: handlers + singular: handler + categories: + - istio-io + - policy-istio-io + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha2 + served: true + storage: true + + --- +--- +# Source: istio-init/templates/configmap-crd-11.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: istio-system + name: istio-crd-11 +data: + crd-11.yaml: |- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + annotations: + "helm.sh/resource-policy": keep + labels: + app: istio-pilot + chart: istio + heritage: Tiller + release: istio + name: sidecars.networking.istio.io + spec: + group: networking.istio.io + names: + categories: + - istio-io + - networking-istio-io + kind: Sidecar + plural: sidecars + singular: sidecar + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration affecting network reachability of a sidecar. + See more details at: https://istio.io/docs/reference/config/networking/v1alpha3/sidecar.html' + properties: + egress: + items: + properties: + bind: + format: string + type: string + captureMode: + enum: + - DEFAULT + - IPTABLES + - NONE + type: string + hosts: + items: + format: string + type: string + type: array + port: + description: The port associated with the listener. + properties: + name: + description: Label assigned to the port. + format: string + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + format: string + type: string + type: object + type: object + type: array + ingress: + items: + properties: + bind: + description: The ip to which the listener should be bound. + format: string + type: string + captureMode: + enum: + - DEFAULT + - IPTABLES + - NONE + type: string + defaultEndpoint: + format: string + type: string + port: + description: The port associated with the listener. + properties: + name: + description: Label assigned to the port. + format: string + type: string + number: + description: A valid non-negative integer port number. + type: integer + protocol: + description: The protocol exposed on the port. + format: string + type: string + type: object + type: object + type: array + outboundTrafficPolicy: + description: This allows to configure the outbound traffic policy. + properties: + mode: + enum: + - REGISTRY_ONLY + - ALLOW_ANY + type: string + type: object + workloadSelector: + properties: + labels: + additionalProperties: + format: string + type: string + type: object + type: object + type: object + type: object + versions: + - name: v1alpha3 + served: true + storage: true + + --- +--- +# Source: istio-init/templates/configmap-crd-14.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: istio-system + name: istio-crd-14 +data: + crd-14.yaml: |- + apiVersion: apiextensions.k8s.io/v1beta1 + kind: CustomResourceDefinition + metadata: + labels: + app: istio-pilot + heritage: Tiller + istio: security + release: istio + name: authorizationpolicies.security.istio.io + spec: + group: security.istio.io + names: + categories: + - istio-io + - security-istio-io + kind: AuthorizationPolicy + plural: authorizationpolicies + singular: authorizationpolicy + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + description: 'Configuration for access control on workloads. See more details + at: https://istio.io/docs/reference/config/security/v1beta1/authorization-policy.html' + properties: + rules: + description: Optional. + items: + properties: + from: + description: Optional. + items: + properties: + source: + description: Source specifies the source of a request. + properties: + ipBlocks: + description: Optional. + items: + format: string + type: string + type: array + namespaces: + description: Optional. + items: + format: string + type: string + type: array + principals: + description: Optional. + items: + format: string + type: string + type: array + requestPrincipals: + description: Optional. + items: + format: string + type: string + type: array + type: object + type: object + type: array + to: + description: Optional. + items: + properties: + operation: + description: Operation specifies the operation of a request. + properties: + hosts: + description: Optional. + items: + format: string + type: string + type: array + methods: + description: Optional. + items: + format: string + type: string + type: array + paths: + description: Optional. + items: + format: string + type: string + type: array + ports: + description: Optional. + items: + format: string + type: string + type: array + type: object + type: object + type: array + when: + description: Optional. + items: + properties: + key: + description: The name of an Istio attribute. + format: string + type: string + values: + description: The allowed values for the attribute. + items: + format: string + type: string + type: array + type: object + type: array + type: object + type: array + selector: + description: Optional. + properties: + matchLabels: + additionalProperties: + format: string + type: string + type: object + type: object + type: object + type: object + versions: + - name: v1beta1 + served: true + storage: true + + --- +--- +# Source: istio-init/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-init-service-account + namespace: istio-system + labels: + app: istio-init + istio: init +--- +# Source: istio-init/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-init-istio-system + labels: + app: istio-init + istio: init +rules: +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "get", "list", "watch", "patch"] +--- +# Source: istio-init/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-init-admin-role-binding-istio-system + labels: + app: istio-init + istio: init +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-init-istio-system +subjects: + - kind: ServiceAccount + name: istio-init-service-account + namespace: istio-system +--- +# Source: istio-init/templates/job-crd-10.yaml +apiVersion: batch/v1 +kind: Job +metadata: + namespace: istio-system + name: istio-init-crd-10-1.4.2 +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-init-service-account + containers: + - name: istio-init-crd-10 + image: "docker.io/istio/kubectl:1.4.2" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 10m + memory: 50Mi + volumeMounts: + - name: crd-10 + mountPath: /etc/istio/crd-10 + readOnly: true + command: ["kubectl", "apply", "-f", "/etc/istio/crd-10/crd-10.yaml"] + volumes: + - name: crd-10 + configMap: + name: istio-crd-10 + restartPolicy: OnFailure +--- +# Source: istio-init/templates/job-crd-11.yaml +apiVersion: batch/v1 +kind: Job +metadata: + namespace: istio-system + name: istio-init-crd-11-1.4.2 +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-init-service-account + containers: + - name: istio-init-crd-11 + image: "docker.io/istio/kubectl:1.4.2" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 10m + memory: 50Mi + volumeMounts: + - name: crd-11 + mountPath: /etc/istio/crd-11 + readOnly: true + command: ["kubectl", "apply", "-f", "/etc/istio/crd-11/crd-11.yaml"] + volumes: + - name: crd-11 + configMap: + name: istio-crd-11 + restartPolicy: OnFailure +--- +# Source: istio-init/templates/job-crd-14.yaml +apiVersion: batch/v1 +kind: Job +metadata: + namespace: istio-system + name: istio-init-crd-14-1.4.2 +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-init-service-account + containers: + - name: istio-init-crd-14 + image: "docker.io/istio/kubectl:1.4.2" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 10m + memory: 50Mi + volumeMounts: + - name: crd-14 + mountPath: /etc/istio/crd-14 + readOnly: true + command: ["kubectl", "apply", "-f", "/etc/istio/crd-14/crd-14.yaml"] + volumes: + - name: crd-14 + configMap: + name: istio-crd-14 + restartPolicy: OnFailure diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-knative-extras.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-knative-extras.yaml new file mode 100644 index 0000000000..6234718ed5 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-knative-extras.yaml @@ -0,0 +1,268 @@ +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.4.2 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 10m + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"cluster-local-gateway","chart":"gateways","heritage":"Helm","istio":"cluster-local-gateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-minimal.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-minimal.yaml new file mode 100644 index 0000000000..50058ff828 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/istio-minimal.yaml @@ -0,0 +1,960 @@ +--- +# PATCH #1: Creating the istio-system namespace. +apiVersion: v1 +kind: Namespace +metadata: + name: istio-system + labels: + istio-injection: disabled +# PATCH #1 ends. +--- +# Source: istio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio + namespace: istio-system + labels: + app: istio + chart: istio + heritage: Helm + release: RELEASE-NAME +data: + mesh: |- + # Set the following variable to true to disable policy checks by Mixer. + # Note that metrics will still be reported to Mixer. + disablePolicyChecks: true + + disableMixerHttpReports: false + # reportBatchMaxEntries is the number of requests that are batched before telemetry data is sent to the mixer server + reportBatchMaxEntries: 100 + # reportBatchMaxTime is the max waiting time before the telemetry data of a request is sent to the mixer server + reportBatchMaxTime: 1s + + # Set enableTracing to false to disable request tracing. + enableTracing: true + + # Set accessLogFile to empty string to disable access log. + accessLogFile: "/dev/stdout" + + # If accessLogEncoding is TEXT, value will be used directly as the log format + # example: "[%START_TIME%] %REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\n" + # If AccessLogEncoding is JSON, value will be parsed as map[string]string + # example: '{"start_time": "%START_TIME%", "req_method": "%REQ(:METHOD)%"}' + # Leave empty to use default log format + accessLogFormat: "" + + # Set accessLogEncoding to JSON or TEXT to configure sidecar access log + accessLogEncoding: 'JSON' + + enableEnvoyAccessLogService: false + # Let Pilot give ingresses the public IP of the Istio ingressgateway + ingressService: istio-ingressgateway + + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + + # Automatic protocol detection uses a set of heuristics to + # determine whether the connection is using TLS or not (on the + # server side), as well as the application protocol being used + # (e.g., http vs tcp). These heuristics rely on the client sending + # the first bits of data. For server first protocols like MySQL, + # MongoDB, etc., Envoy will timeout on the protocol detection after + # the specified period, defaulting to non mTLS plain TCP + # traffic. Set this field to tweak the period that Envoy will wait + # for the client to send the first bits of data. (MUST BE >=1ms) + protocolDetectionTimeout: 100ms + + # DNS refresh rate for Envoy clusters of type STRICT_DNS + dnsRefreshRate: 300s + + # Unix Domain Socket through which envoy communicates with NodeAgent SDS to get + # key/cert for mTLS. Use secret-mount files instead of SDS if set to empty. + sdsUdsPath: "" + + # The trust domain corresponds to the trust root of a system. + # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain + trustDomain: "" + + # The trust domain aliases represent the aliases of trust_domain. + # For example, if we have + # trustDomain: td1 + # trustDomainAliases: [“td2”, "td3"] + # Any service with the identity "td1/ns/foo/sa/a-service-account", "td2/ns/foo/sa/a-service-account", + # or "td3/ns/foo/sa/a-service-account" will be treated the same in the Istio mesh. + trustDomainAliases: + + # If true, automatically configure client side mTLS settings to match the corresponding service's + # server side mTLS authentication policy, when destination rule for that service does not specify + # TLS settings. + enableAutoMtls: false + + # Set the default behavior of the sidecar for handling outbound traffic from the application: + # ALLOW_ANY - outbound traffic to unknown destinations will be allowed, in case there are no + # services or ServiceEntries for the destination port + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio + # configuration. + rootNamespace: istio-system + + # Configures DNS certificates provisioned through Chiron linked into Pilot. + certificates: + [] + + defaultConfig: + # + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file + connectTimeout: 10s + # + ### ADVANCED SETTINGS ############# + # Where should envoy's configuration be stored in the istio-proxy container + configPath: "/etc/istio/proxy" + binaryPath: "/usr/local/bin/envoy" + # The pseudo service name used for Envoy. + serviceCluster: istio-proxy + # These settings that determine how long an old Envoy + # process should be kept alive after an occasional reload. + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # + # Port where Envoy listens (on local host) for admin commands + # You can exec into the istio-proxy container in a pod and + # curl the admin port (curl http://localhost:15000/) to obtain + # diagnostic information from Envoy. See + # https://lyft.github.io/envoy/docs/operations/admin.html + # for more details + proxyAdminPort: 15000 + # + # Set concurrency to a specific number to control the number of Proxy worker threads. + # If set to 0 (default), then start worker thread for each CPU thread/core. + concurrency: 2 + # + tracing: + zipkin: + # Address of the Zipkin collector + address: zipkin.istio-system:9411 + # + # Mutual TLS authentication between sidecars and istio control plane. + controlPlaneAuthPolicy: NONE + # + # Address where istio Pilot service is running + discoveryAddress: istio-pilot.istio-system:15010 + + # Configuration file for the mesh networks to be used by the Split Horizon EDS. + meshNetworks: |- + networks: {} +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-ingressgateway-service-account + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/gateways/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cluster-local-gateway-service-account + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/charts/pilot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-pilot-service-account + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +--- +# Source: istio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/pilot/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +rules: +- apiGroups: ["config.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["rbac.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["security.istio.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +- apiGroups: ["networking.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["authentication.istio.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["ingresses", "ingresses/status"] + verbs: ["*"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create", "get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["endpoints", "pods", "services", "namespaces", "nodes", "secrets"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "watch", "list", "update", "delete"] +- apiGroups: ["certificates.k8s.io"] + resources: + - "certificatesigningrequests" + - "certificatesigningrequests/approval" + - "certificatesigningrequests/status" + verbs: ["update", "create", "get", "delete"] +--- +# Source: istio/templates/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-reader +rules: + - apiGroups: [''] + resources: ['nodes', 'pods', 'services', 'endpoints', "replicationcontrollers"] + verbs: ['get', 'watch', 'list'] + - apiGroups: ["extensions", "apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: istio/charts/pilot/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-pilot-istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-pilot-istio-system +subjects: + - kind: ServiceAccount + name: istio-pilot-service-account + namespace: istio-system +--- +# Source: istio/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: istio-multi + labels: + chart: istio-1.4.2 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: istio-reader +subjects: +- kind: ServiceAccount + name: istio-multi + namespace: istio-system +--- +# Source: istio/charts/gateways/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] +--- +# Source: istio/charts/gateways/templates/rolebindings.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-ingressgateway-sds + namespace: istio-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-ingressgateway-sds +subjects: +- kind: ServiceAccount + name: istio-ingressgateway-service-account +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway +spec: + type: LoadBalancer + selector: + release: RELEASE-NAME + app: istio-ingressgateway + istio: ingressgateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/gateways/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cluster-local-gateway + namespace: istio-system + annotations: + labels: + chart: gateways + heritage: Helm + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway +spec: + type: ClusterIP + selector: + release: RELEASE-NAME + app: cluster-local-gateway + istio: cluster-local-gateway + ports: + - + name: status-port + port: 15020 + - + name: http2 + port: 80 + - + name: https + port: 443 +--- +# Source: istio/charts/pilot/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: istio-pilot + namespace: istio-system + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + ports: + - port: 15010 + name: grpc-xds # direct + - port: 15011 + name: https-xds # mTLS + - port: 8080 + name: http-legacy-discovery # direct + - port: 15014 + name: http-monitoring + selector: + istio: pilot +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-local-gateway + namespace: istio-system + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-local-gateway + istio: cluster-local-gateway + strategy: + rollingUpdate: + maxSurge: + maxUnavailable: + template: + metadata: + labels: + app: cluster-local-gateway + chart: gateways + heritage: Helm + istio: cluster-local-gateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: cluster-local-gateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - cluster-local-gateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 128Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"cluster-local-gateway","chart":"gateways","heritage":"Helm","istio":"cluster-local-gateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: cluster-local-gateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/cluster-local-gateway + + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: cluster-local-gateway-certs + mountPath: "/etc/istio/cluster-local-gateway-certs" + readOnly: true + - name: cluster-local-gateway-ca-certs + mountPath: "/etc/istio/cluster-local-gateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.cluster-local-gateway-service-account + optional: true + - name: cluster-local-gateway-certs + secret: + secretName: "istio-cluster-local-gateway-certs" + optional: true + - name: cluster-local-gateway-ca-certs + secret: + secretName: "istio-cluster-local-gateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/gateways/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-ingressgateway + namespace: istio-system + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + istio: ingressgateway + release: RELEASE-NAME +spec: + replicas: 1 + selector: + matchLabels: + app: istio-ingressgateway + istio: ingressgateway + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + labels: + app: istio-ingressgateway + chart: gateways + heritage: Helm + istio: ingressgateway + release: RELEASE-NAME + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: ingress-sds + image: "docker.io/istio/node-agent-k8s:1.4.2" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: "ENABLE_WORKLOAD_SDS" + value: "false" + - name: "ENABLE_INGRESS_GATEWAY_SDS" + value: "true" + - name: "INGRESS_GATEWAY_NAMESPACE" + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.4.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 15020 + - containerPort: 80 + - containerPort: 443 + - containerPort: 15090 + protocol: TCP + name: http-envoy-prom + args: + - proxy + - router + - --domain + - $(POD_NAMESPACE).svc.cluster.local + - --log_output_level=default:info + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - istio-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --proxyAdminPort + - "15000" + - --statusPort + - "15020" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:15010 + readinessProbe: + failureThreshold: 30 + httpGet: + path: /healthz/ready + port: 15020 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: ISTIO_META_CONFIG_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: ISTIO_METAJSON_LABELS + value: | + {"app":"istio-ingressgateway","chart":"gateways","heritage":"Helm","istio":"ingressgateway","release":"RELEASE-NAME"} + - name: ISTIO_META_CLUSTER_ID + value: "Kubernetes" + - name: SDS_ENABLED + value: "false" + - name: ISTIO_META_WORKLOAD_NAME + value: istio-ingressgateway + - name: ISTIO_META_OWNER + value: kubernetes://api/apps/v1/namespaces/istio-system/deployments/istio-ingressgateway + - name: ISTIO_META_USER_SDS + value: "true" + - name: ISTIO_META_ROUTER_MODE + value: sni-dnat + + + volumeMounts: + - name: ingressgatewaysdsudspath + mountPath: /var/run/ingress_gateway + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: ingressgatewaysdsudspath + emptyDir: {} + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" +--- +# Source: istio/charts/pilot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: istio-pilot + namespace: istio-system + # TODO: default template doesn't have this, which one is right ? + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + selector: + matchLabels: + istio: pilot + template: + metadata: + labels: + app: pilot + chart: pilot + heritage: Helm + release: RELEASE-NAME + istio: pilot + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: istio-pilot-service-account + containers: + - name: discovery + image: "docker.io/istio/pilot:1.4.2" + imagePullPolicy: IfNotPresent + args: + - "discovery" + - --monitoringAddr=:15014 + - --log_output_level=default:info + - --domain + - cluster.local + - --secureGrpcAddr + - "" + - --keepaliveMaxServerConnectionAge + - "30m" + ports: + - containerPort: 8080 + - containerPort: 15010 + - containerPort: 15011 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: PILOT_PUSH_THROTTLE + value: "100" + - name: PILOT_TRACE_SAMPLING + value: "100" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_OUTBOUND + value: "true" + - name: PILOT_ENABLE_PROTOCOL_SNIFFING_FOR_INBOUND + value: "false" + resources: + requests: + cpu: 100m + memory: 128Mi + volumeMounts: + - name: config-volume + mountPath: /etc/istio/config + - name: istio-certs + mountPath: /etc/certs + readOnly: true + volumes: + - name: config-volume + configMap: + name: istio + - name: istio-certs + secret: + secretName: istio.istio-pilot-service-account + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - "ppc64le" + - "s390x" + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "amd64" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "ppc64le" + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - "s390x" diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/namespace.yaml.patch b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/namespace.yaml.patch new file mode 100644 index 0000000000..3e15f44aa1 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/namespace.yaml.patch @@ -0,0 +1,10 @@ +1a2,10 +> # PATCH #1: Creating the istio-system namespace. +> apiVersion: v1 +> kind: Namespace +> metadata: +> name: istio-system +> labels: +> istio-injection: disabled +> # PATCH #1 ends. +> --- diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-extras.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-extras.yaml new file mode 100644 index 0000000000..d6f2d68ede --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-extras.yaml @@ -0,0 +1,86 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + autoInject: disabled + disablePolicyChecks: true + omitSidecarInjectorConfigMap: true + defaultPodDisruptionBudget: + enabled: false + useMCP: false + +sidecarInjectorWebhook: + enabled: false + enableNamespacesByDefault: false + +gateways: + enabled: true + + istio-ingressgateway: + enabled: false + istio-egressgateway: + enabled: false + istio-ilbgateway: + enabled: false + + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + replicaCount: 1 + autoscaleMin: 1 + autoscaleMax: 1 + resources: {} + cpu: + targetAverageUtilization: 80 + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +security: + enabled: false + +galley: + enabled: false + +mixer: + policy: + enabled: false + telemetry: + enabled: false + +pilot: + enabled: false + +grafana: + enabled: false + +prometheus: + enabled: false + +tracing: + enabled: false + +kiali: + enabled: false + +certmanager: + enabled: false diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-lean.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-lean.yaml new file mode 100644 index 0000000000..726abf12fe --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-lean.yaml @@ -0,0 +1,96 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + accessLogEncoding: 'JSON' + autoInject: disabled + disablePolicyChecks: true + omitSidecarInjectorConfigMap: true + defaultPodDisruptionBudget: + enabled: false + useMCP: false + +sidecarInjectorWebhook: + enabled: false + enableNamespacesByDefault: false + +gateways: + istio-ingressgateway: + enabled: true + sds: + enabled: true + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 5 + resources: + requests: + cpu: 1000m + memory: 1024Mi + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - port: 443 + name: https + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 5 + resources: + requests: + cpu: 1000m + memory: 1024Mi + cpu: + targetAverageUtilization: 80 + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +prometheus: + enabled: false + +mixer: + enabled: false + policy: + enabled: false + telemetry: + enabled: false + adapters: + prometheus: + enabled: false + +pilot: + traceSampling: 100 + sidecar: false + resources: + requests: + cpu: 1000m + memory: 1024Mi + +galley: + enabled: true + +security: + enabled: true diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-local.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-local.yaml new file mode 100644 index 0000000000..0f2f0ada03 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values-local.yaml @@ -0,0 +1,91 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + accessLogEncoding: 'JSON' + autoInject: disabled + disablePolicyChecks: true + omitSidecarInjectorConfigMap: true + defaultPodDisruptionBudget: + enabled: false + useMCP: false + +sidecarInjectorWebhook: + enabled: false + enableNamespacesByDefault: false + +gateways: + istio-ingressgateway: + enabled: true + sds: + enabled: true + autoscaleEnabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - port: 443 + name: https + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + autoscaleEnabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +prometheus: + enabled: false + +mixer: + enabled: false + policy: + enabled: false + telemetry: + enabled: false + adapters: + prometheus: + enabled: false + +pilot: + traceSampling: 100 + sidecar: false + autoscaleEnabled: false + resources: + requests: + cpu: 100m + memory: 128Mi + +galley: + enabled: false + +security: + enabled: false diff --git a/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values.yaml b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values.yaml new file mode 100644 index 0000000000..96fe0a36bf --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/istio-1.4.2/values.yaml @@ -0,0 +1,85 @@ +global: + proxy: + # Enable proxy to write access log to /dev/stdout. + accessLogFile: "/dev/stdout" + accessLogEncoding: 'JSON' + autoInject: enabled + disablePolicyChecks: true + +sidecarInjectorWebhook: + enabled: true + enableNamespacesByDefault: false + rewriteAppHTTPProbe: true + +gateways: + istio-ingressgateway: + enabled: true + sds: + enabled: true + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 4 + resources: + limits: + cpu: 3000m + memory: 2048Mi + requests: + cpu: 3000m + memory: 2048Mi + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - port: 443 + name: https + cluster-local-gateway: + enabled: true + labels: + app: cluster-local-gateway + istio: cluster-local-gateway + replicaCount: 2 + autoscaleMin: 2 + autoscaleMax: 4 + resources: + requests: + cpu: 250m + memory: 256Mi + cpu: + targetAverageUtilization: 80 + loadBalancerIP: "" + loadBalancerSourceRanges: {} + externalIPs: [] + serviceAnnotations: {} + podAnnotations: {} + type: ClusterIP + ports: + - name: status-port + port: 15020 + - name: http2 + port: 80 + - name: https + port: 443 + secretVolumes: + - name: cluster-local-gateway-certs + secretName: istio-cluster-local-gateway-certs + mountPath: /etc/istio/cluster-local-gateway-certs + - name: cluster-local-gateway-ca-certs + secretName: istio-cluster-local-gateway-ca-certs + mountPath: /etc/istio/cluster-local-gateway-ca-certs + +prometheus: + enabled: false + +mixer: + adapters: + prometheus: + enabled: false + +pilot: + traceSampling: 100 + autoscaleMin: 2 + resources: + requests: + cpu: 3000m + memory: 2048Mi diff --git a/test/vendor/knative.dev/serving/third_party/kourier-latest/README.md b/test/vendor/knative.dev/serving/third_party/kourier-latest/README.md new file mode 100644 index 0000000000..c4fcf438c5 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/kourier-latest/README.md @@ -0,0 +1,5 @@ +The kourier.yaml file is generated by running + +``` +./download-kourier.sh +``` diff --git a/test/vendor/knative.dev/serving/third_party/kourier-latest/download-kourier.sh b/test/vendor/knative.dev/serving/third_party/kourier-latest/download-kourier.sh new file mode 100755 index 0000000000..5fd737b307 --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/kourier-latest/download-kourier.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# Download Kourier +KOURIER_VERSION=0.3.4 +KOURIER_YAML=kourier-knative.yaml +DOWNLOAD_URL=https://raw.githubusercontent.com/3scale/kourier/v${KOURIER_VERSION}/deploy/${KOURIER_YAML} + +wget ${DOWNLOAD_URL} + +cat < kourier.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: kourier-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: kourier-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: kourier-system +--- +EOF + +cat ${KOURIER_YAML} \ + `# Install Kourier into the kourier-system namespace` \ + | sed 's/namespace: knative-serving/namespace: kourier-system/' \ + `# Expose Kourier services with LoadBalancer IPs instead of ClusterIP` \ + | sed 's/ClusterIP/LoadBalancer/' \ + >> kourier.yaml + +# Clean up. +rm ${KOURIER_YAML} diff --git a/test/vendor/knative.dev/serving/third_party/kourier-latest/kourier.yaml b/test/vendor/knative.dev/serving/third_party/kourier-latest/kourier.yaml new file mode 100644 index 0000000000..fe54faa69b --- /dev/null +++ b/test/vendor/knative.dev/serving/third_party/kourier-latest/kourier.yaml @@ -0,0 +1,256 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kourier-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-logging + namespace: kourier-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-observability + namespace: kourier-system +--- +apiVersion: v1 +kind: Service +metadata: + name: kourier + namespace: kourier-system +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: 3scale-kourier-gateway + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: 3scale-kourier-gateway + namespace: kourier-system +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: 3scale-kourier-gateway + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: 3scale-kourier-gateway + spec: + containers: + - args: + - -c + - /tmp/config/envoy-bootstrap.yaml + image: quay.io/3scale/kourier-gateway:v0.1.3 + imagePullPolicy: Always + name: kourier-gateway + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - name: config-volume + mountPath: /tmp/config + readinessProbe: + exec: + command: ['ash','-c','(printf "GET /__internalkouriersnapshot HTTP/1.1\r\nHost: internalkourier\r\n\r\n"; sleep 1) | nc -n localhost 8081 | grep "HTTP/1.1 200 OK"'] + initialDelaySeconds: 5 + periodSeconds: 2 + volumes: + - name: config-volume + configMap: + name: kourier-bootstrap + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: 3scale-kourier-control + namespace: kourier-system +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: 3scale-kourier-control + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: 3scale-kourier-control + spec: + containers: + - image: quay.io/3scale/kourier:v0.3.4 + imagePullPolicy: Always + name: kourier-control + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + env: + - name: CERTS_SECRET_NAMESPACE + value: "" + - name: CERTS_SECRET_NAME + value: "" + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccountName: 3scale-kourier + terminationGracePeriodSeconds: 30 +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: 3scale-kourier + namespace: kourier-system +rules: + - apiGroups: [""] + resources: ["pods", "endpoints", "namespaces", "services", "secrets", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.internal.knative.dev"] + resources: ["clusteringresses","ingresses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.internal.knative.dev"] + resources: ["ingresses/status","clusteringresses/status"] + verbs: ["update"] + - apiGroups: [ "apiextensions.k8s.io" ] + resources: [ "customresourcedefinitions" ] + verbs: ["get", "list", "watch"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: 3scale-kourier + namespace: kourier-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: 3scale-kourier +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 3scale-kourier +subjects: + - kind: ServiceAccount + name: 3scale-kourier + namespace: kourier-system +--- +apiVersion: v1 +kind: Service +metadata: + name: kourier-internal + namespace: kourier-system +spec: + ports: + - name: http2 + port: 80 + protocol: TCP + targetPort: 8081 + selector: + app: 3scale-kourier-gateway + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + name: kourier-external + namespace: kourier-system +spec: + ports: + - name: http2 + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: 3scale-kourier-gateway + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + name: kourier-control + namespace: kourier-system +spec: + ports: + - port: 18000 + protocol: TCP + targetPort: 18000 + selector: + app: 3scale-kourier-control + type: LoadBalancer +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kourier-bootstrap + namespace: kourier-system +data: + envoy-bootstrap.yaml: | + admin: + access_log_path: /tmp/test + address: + socket_address: + address: 0.0.0.0 + port_value: 19000 + dynamic_resources: + ads_config: + api_type: GRPC + grpc_services: + - envoy_grpc: + cluster_name: xds_cluster + cds_config: + ads: {} + lds_config: + ads: {} + node: + cluster: kourier-knative + id: 3scale-kourier-gateway + static_resources: + clusters: + - connect_timeout: 0.2s + load_assignment: + cluster_name: xds_cluster + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: kourier-control + port_value: 18000 + http2_protocol_options: {} + upstream_connection_options: + tcp_keepalive: {} + lb_policy: ROUND_ROBIN + name: xds_cluster + type: STRICT_DNS diff --git a/test/vendor/knative.dev/test-infra/LICENSE b/test/vendor/knative.dev/test-infra/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/test/vendor/knative.dev/test-infra/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/vendor/github.com/knative/test-infra/scripts/README.md b/test/vendor/knative.dev/test-infra/scripts/README.md similarity index 77% rename from test/vendor/github.com/knative/test-infra/scripts/README.md rename to test/vendor/knative.dev/test-infra/scripts/README.md index 549a540ca1..ca673fb135 100644 --- a/test/vendor/github.com/knative/test-infra/scripts/README.md +++ b/test/vendor/knative.dev/test-infra/scripts/README.md @@ -1,6 +1,6 @@ # Helper scripts -This directory contains helper scripts used by Prow test jobs, as well and local +This directory contains helper scripts used by Prow test jobs, as well as local development scripts. ## Using the `presubmit-tests.sh` helper script @@ -61,16 +61,19 @@ This is a helper script to run the presubmit tests. To use it: the integration tests (either your custom one or the default action) and will cause the test to fail if they don't return success. -1. Call the `main()` function passing `$@` (without quotes). +1. Call the `main()` function passing `"$@"` (with quotes). Running the script without parameters, or with the `--all-tests` flag causes all tests to be executed, in the right order (i.e., build, then unit, then integration tests). Use the flags `--build-tests`, `--unit-tests` and `--integration-tests` to run a -specific set of tests. The flag `--emit-metrics` is used to emit metrics when -running the tests, and is automatically handled by the default action for -integration tests (see above). +specific set of tests. + +To run a specific program as a test, use the `--run-test` flag, and provide the +program as the argument. If arguments are required for the program, pass +everything as a single quotes argument. For example, +`./presubmit-tests.sh --run-test "test/my/test data"`. The script will automatically skip all presubmit tests for PRs where all changed files are exempt of tests (e.g., a PR changing only the `OWNERS` file). @@ -99,7 +102,7 @@ function pre_integration_tests() { # We use the default integration test runner. -main $@ +main "$@" ``` ## Using the `e2e-tests.sh` helper script @@ -118,7 +121,7 @@ This is a helper script for Knative E2E test scripts. To use it: cluster creation in case of stockout. If defined, `E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none. - `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to - `n1-standard-4}`. + `e2-standard-4}`. - `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when autoscaling, defaults to 1. - `E2E_MAX_CLUSTER_NODES`: Maximum number of nodes in the cluster when @@ -162,12 +165,7 @@ This is a helper script for Knative E2E test scripts. To use it: (or `report_go_test()` if you need a more fine-grained control) and call `fail_test()` or `success()` if any of them failed. The environment variable `KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test - cluster. You can also use the following boolean (0 is false, 1 is true) - environment variables for the logic: - - - `EMIT_METRICS`: true if `--emit-metrics` was passed. - - All environment variables above are marked read-only. + cluster. **Notes:** @@ -178,6 +176,9 @@ This is a helper script for Knative E2E test scripts. To use it: will immediately start the tests against the cluster currently configured for `kubectl`. +1. By default `knative_teardown()` and `test_teardown()` will be called after + the tests finish, use `--skip-teardowns` if you don't want them to be called. + 1. By default Istio is installed on the cluster via Addon, use `--skip-istio-addon` if you choose not to have it preinstalled. @@ -224,6 +225,64 @@ kubectl get pods || fail_test success ``` +## Using the `performance-tests.sh` helper script + +This is a helper script for Knative performance test scripts. In combination +with specific Prow jobs, it can automatically manage the environment for running +benchmarking jobs for each repo. To use it: + +1. Source the script. + +1. [optional] Customize GCP project settings for the benchmarks. Set the + following environment variables if the default value doesn't fit your needs: + + - `PROJECT_NAME`: GCP project name for keeping the clusters that run the + benchmarks. Defaults to `knative-performance`. + - `SERVICE_ACCOUNT_NAME`: Service account name for controlling GKE clusters + and interacting with [Mako](https://github.com/google/mako) server. It MUST + have `Kubernetes Engine Admin` and `Storage Admin` role, and be + [whitelisted](https://github.com/google/mako/blob/master/docs/ACCESS.md) by + Mako admin. Defaults to `mako-job`. + +1. [optional] Customize root path of the benchmarks. This root folder should + contain and only contain all benchmarks you want to run continuously. Set the + following environment variable if the default value doesn't fit your needs: + + - `BENCHMARK_ROOT_PATH`: Benchmark root path, defaults to + `test/performance/benchmarks`. Each repo can decide which folder to put its + benchmarks in, and override this environment variable to be the path of + that folder. + +1. [optional] Write the `update_knative` function, which will update your system + under test (e.g. Knative Serving). + +1. [optional] Write the `update_benchmark` function, which will update the + underlying resources for the benchmark (usually Knative resources and + Kubernetes cronjobs for benchmarking). This function accepts a parameter, + which is the benchmark name in the current repo. + +1. Call the `main()` function with all parameters (e.g. `$@`). + +### Sample performance test script + +This script will update `Knative serving` and the given benchmark. + +```bash +source vendor/knative.dev/test-infra/scripts/performance-tests.sh + +function update_knative() { + echo ">> Updating serving" + ko apply -f config/ || abort "failed to apply serving" +} + +function update_benchmark() { + echo ">> Updating benchmark $1" + ko apply -f ${BENCHMARK_ROOT_PATH}/$1 || abort "failed to apply benchmark $1" +} + +main $@ +``` + ## Using the `release.sh` helper script This is a helper script for Knative release scripts. To use it: @@ -251,6 +310,9 @@ This is a helper script for Knative release scripts. To use it: if `--release-gcs` was passed, otherwise the default value `knative-nightly/` will be used. It is empty if `--publish` was not passed. + - `RELEASE_DIR`: contains the directory to store the manifests if + `--release-dir` was passed. Defaults to empty value, but if `--nopublish` + was passed then points to the repository root directory. - `BUILD_COMMIT_HASH`: the commit short hash for the current repo. If the current git tree is dirty, it will have `-dirty` appended to it. - `BUILD_YYYYMMDD`: current UTC date in `YYYYMMDD` format. @@ -276,7 +338,7 @@ This is a helper script for Knative release scripts. To use it: All environment variables above, except `KO_FLAGS`, are marked read-only once `main()` is called (see below). -1. Call the `main()` function passing `$@` (without quotes). +1. Call the `main()` function passing `"$@"` (with quotes). ### Sample release script @@ -289,5 +351,5 @@ function build_release() { ARTIFACTS_TO_PUBLISH="release.yaml" } -main $@ +main "$@" ``` diff --git a/test/vendor/github.com/knative/test-infra/scripts/dummy.go b/test/vendor/knative.dev/test-infra/scripts/dummy.go similarity index 100% rename from test/vendor/github.com/knative/test-infra/scripts/dummy.go rename to test/vendor/knative.dev/test-infra/scripts/dummy.go diff --git a/test/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh b/test/vendor/knative.dev/test-infra/scripts/e2e-tests.sh similarity index 91% rename from test/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh rename to test/vendor/knative.dev/test-infra/scripts/e2e-tests.sh index 7ec256c93a..a78920dccb 100755 --- a/test/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh +++ b/test/vendor/knative.dev/test-infra/scripts/e2e-tests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2019 The Knative Authors # @@ -47,7 +47,7 @@ export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-} readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1} readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-} -readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-4} +readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-e2-standard-4} readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod} readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta} @@ -81,7 +81,6 @@ function teardown_test_resources() { function go_test_e2e() { local test_options="" local go_options="" - (( EMIT_METRICS )) && test_options="-emitmetrics" [[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e" report_go_test -v -race -count=1 ${go_options} $@ ${test_options} } @@ -93,13 +92,32 @@ function dump_cluster_state() { echo "*** E2E TEST FAILED ***" echo "*** Start of information dump ***" echo "***************************************" - echo ">>> All resources:" - kubectl get all --all-namespaces - echo ">>> Services:" - kubectl get services --all-namespaces - echo ">>> Events:" - kubectl get events --all-namespaces - function_exists dump_extra_cluster_state && dump_extra_cluster_state + + local output="${ARTIFACTS}/k8s.dump.txt" + echo ">>> The dump is located at ${output}" + + for crd in $(kubectl api-resources --verbs=list -o name | sort); do + local count="$(kubectl get $crd --all-namespaces --no-headers 2>/dev/null | wc -l)" + echo ">>> ${crd} (${count} objects)" + if [[ "${count}" > "0" ]]; then + echo ">>> ${crd} (${count} objects)" >> ${output} + + echo ">>> Listing" >> ${output} + kubectl get ${crd} --all-namespaces >> ${output} + + echo ">>> Details" >> ${output} + if [[ "${crd}" == "secrets" ]]; then + echo "Secrets are ignored for security reasons" >> ${output} + else + kubectl get ${crd} --all-namespaces -o yaml >> ${output} + fi + fi + done + + if function_exists dump_extra_cluster_state; then + echo ">>> Extra dump" >> ${output} + dump_extra_cluster_state >> ${output} + fi echo "***************************************" echo "*** E2E TEST FAILED ***" echo "*** End of information dump ***" @@ -209,14 +227,16 @@ function create_test_cluster() { echo "Test script is ${E2E_SCRIPT}" # Set arguments for this script again local test_cmd_args="--run-tests" - (( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics" (( SKIP_KNATIVE_SETUP )) && test_cmd_args+=" --skip-knative-setup" [[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}" [[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}" local extra_flags=() - if (( IS_BOSKOS )); then # Add arbitrary duration, wait for Boskos projects acquisition before error out + if (( IS_BOSKOS )); then + # Add arbitrary duration, wait for Boskos projects acquisition before error out extra_flags+=(--boskos-wait-duration=20m) - else # Only let kubetest tear down the cluster if not using Boskos, it's done by Janitor if using Boskos + elif (( ! SKIP_TEARDOWNS )); then + # Only let kubetest tear down the cluster if not using Boskos and teardowns are not expected to be skipped, + # it's done by Janitor if using Boskos extra_flags+=(--down) fi @@ -226,7 +246,7 @@ function create_test_cluster() { local test_wrapper="${kubedir}/e2e-test.sh" mkdir ${kubedir}/cluster ln -s "$(which kubectl)" ${kubedir}/cluster/kubectl.sh - echo "#!/bin/bash" > ${test_wrapper} + echo "#!/usr/bin/env bash" > ${test_wrapper} echo "cd $(pwd) && set -x" >> ${test_wrapper} echo "${E2E_SCRIPT} ${test_cmd_args}" >> ${test_wrapper} chmod +x ${test_wrapper} @@ -291,7 +311,8 @@ function create_test_cluster_with_retries() { # - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694) [[ -z "$(grep -Fo 'does not have enough resources available to fulfill' ${cluster_creation_log})" \ && -z "$(grep -Fo 'ResponseError: code=400, message=No valid versions with the prefix' ${cluster_creation_log})" \ - && -z "$(grep -Po 'ResponseError: code=400, message=Master version "[0-9a-z\-\.]+" is unsupported' ${cluster_creation_log})" ]] \ + && -z "$(grep -Po 'ResponseError: code=400, message=Master version "[0-9a-z\-\.]+" is unsupported' ${cluster_creation_log})" \ + && -z "$(grep -Po 'only \d+ nodes out of \d+ have registered; this is likely due to Nodes failing to start correctly' ${cluster_creation_log})" ]] \ && return 1 done done @@ -305,6 +326,9 @@ function setup_test_cluster() { set -o errexit set -o pipefail + header "Test cluster setup" + kubectl get nodes + header "Setting up test cluster" # Set the actual project the test cluster resides in @@ -344,7 +368,8 @@ function setup_test_cluster() { export KO_DATA_PATH="${REPO_ROOT_DIR}/.git" - trap teardown_test_resources EXIT + # Do not run teardowns if we explicitly want to skip them. + (( ! SKIP_TEARDOWNS )) && trap teardown_test_resources EXIT # Handle failures ourselves, so we can dump useful info. set +o errexit @@ -396,9 +421,9 @@ function fail_test() { } RUN_TESTS=0 -EMIT_METRICS=0 SKIP_KNATIVE_SETUP=0 SKIP_ISTIO_ADDON=0 +SKIP_TEARDOWNS=0 GCP_PROJECT="" E2E_SCRIPT="" E2E_CLUSTER_VERSION="" @@ -432,8 +457,8 @@ function initialize() { # Try parsing flag as a standard one. case ${parameter} in --run-tests) RUN_TESTS=1 ;; - --emit-metrics) EMIT_METRICS=1 ;; --skip-knative-setup) SKIP_KNATIVE_SETUP=1 ;; + --skip-teardowns) SKIP_TEARDOWNS=1 ;; --skip-istio-addon) SKIP_ISTIO_ADDON=1 ;; *) [[ $# -ge 2 ]] || abort "missing parameter after $1" @@ -463,12 +488,12 @@ function initialize() { (( SKIP_ISTIO_ADDON )) || GKE_ADDONS="--addons=Istio" readonly RUN_TESTS - readonly EMIT_METRICS readonly GCP_PROJECT readonly IS_BOSKOS readonly EXTRA_CLUSTER_CREATION_FLAGS readonly EXTRA_KUBETEST_FLAGS readonly SKIP_KNATIVE_SETUP + readonly SKIP_TEARDOWNS readonly GKE_ADDONS if (( ! RUN_TESTS )); then diff --git a/test/vendor/github.com/knative/test-infra/scripts/library.sh b/test/vendor/knative.dev/test-infra/scripts/library.sh similarity index 83% rename from test/vendor/github.com/knative/test-infra/scripts/library.sh rename to test/vendor/knative.dev/test-infra/scripts/library.sh index 1cee803542..d2715650cd 100755 --- a/test/vendor/github.com/knative/test-infra/scripts/library.sh +++ b/test/vendor/knative.dev/test-infra/scripts/library.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2018 The Knative Authors # @@ -132,30 +132,46 @@ function wait_until_object_does_not_exist() { # Parameters: $1 - namespace. function wait_until_pods_running() { echo -n "Waiting until all pods in namespace $1 are up" + local failed_pod="" for i in {1..150}; do # timeout after 5 minutes local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)" - # All pods must be running - local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l) - if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then + # All pods must be running (ignore ImagePull error to allow the pod to retry) + local not_running_pods=$(echo "${pods}" | grep -v Running | grep -v Completed | grep -v ErrImagePull | grep -v ImagePullBackOff) + if [[ -n "${pods}" ]] && [[ -z "${not_running_pods}" ]]; then + # All Pods are running or completed. Verify the containers on each Pod. local all_ready=1 while read pod ; do local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`) + # Set this Pod as the failed_pod. If nothing is wrong with it, then after the checks, set + # failed_pod to the empty string. + failed_pod=$(echo -n "${pod}" | cut -f1 -d' ') # All containers must be ready [[ -z ${status[0]} ]] && all_ready=0 && break [[ -z ${status[1]} ]] && all_ready=0 && break [[ ${status[0]} -lt 1 ]] && all_ready=0 && break [[ ${status[1]} -lt 1 ]] && all_ready=0 && break [[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break + # All the tests passed, this is not a failed pod. + failed_pod="" done <<< "$(echo "${pods}" | grep -v Completed)" if (( all_ready )); then echo -e "\nAll pods are up:\n${pods}" return 0 fi + elif [[ -n "${not_running_pods}" ]]; then + # At least one Pod is not running, just save the first one's name as the failed_pod. + failed_pod="$(echo "${not_running_pods}" | head -n 1 | cut -f1 -d' ')" fi echo -n "." sleep 2 done echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}" + if [[ -n "${failed_pod}" ]]; then + echo -e "\n\nFailed Pod (data in YAML format) - ${failed_pod}\n" + kubectl -n $1 get pods "${failed_pod}" -oyaml + echo -e "\n\nPod Logs\n" + kubectl -n $1 logs "${failed_pod}" --all-containers + fi return 1 } @@ -283,9 +299,9 @@ function acquire_cluster_admin_role() { local key=$(mktemp) echo "Certificate in ${cert}, key in ${key}" gcloud --format="value(masterAuth.clientCertificate)" \ - container clusters describe $2 ${geoflag} | base64 -d > ${cert} + container clusters describe $2 ${geoflag} | base64 --decode > ${cert} gcloud --format="value(masterAuth.clientKey)" \ - container clusters describe $2 ${geoflag} | base64 -d > ${key} + container clusters describe $2 ${geoflag} | base64 --decode > ${key} kubectl config set-credentials cluster-admin \ --client-certificate=${cert} --client-key=${key} fi @@ -337,7 +353,7 @@ function create_junit_xml() { # Also escape `<` and `>` as here: https://github.com/golang/go/blob/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/src/encoding/json/encode.go#L48, # this is temporary solution for fixing https://github.com/knative/test-infra/issues/1204, # which should be obsolete once Test-infra 2.0 is in place - local msg="$(echo -n "$3" | sed 's/$/\ /g' | sed 's//\\u003e/' | tr -d '\n')" + local msg="$(echo -n "$3" | sed 's/$/\ /g' | sed 's//\\u003e/' | sed 's/&/\\u0026/' | tr -d '\n')" failure="${msg}" fi cat << EOF > "${xml}" @@ -401,6 +417,21 @@ function start_knative_serving() { wait_until_pods_running knative-serving || return 1 } +# Install Knative Monitoring in the current cluster. +# Parameters: $1 - Knative Monitoring manifest. +function start_knative_monitoring() { + header "Starting Knative Monitoring" + subheader "Installing Knative Monitoring" + # namespace istio-system needs to be created first, due to the comment + # mentioned in + # https://github.com/knative/serving/blob/4202efc0dc12052edc0630515b101cbf8068a609/config/monitoring/tracing/zipkin/100-zipkin.yaml#L21 + kubectl create namespace istio-system 2>/dev/null + echo "Installing Monitoring from $1" + kubectl apply -f "$1" || return 1 + wait_until_pods_running knative-monitoring || return 1 + wait_until_pods_running istio-system || return 1 +} + # Install the stable release Knative/serving in the current cluster. # Parameters: $1 - Knative Serving version number, e.g. 0.6.0. function start_release_knative_serving() { @@ -412,6 +443,29 @@ function start_latest_knative_serving() { start_knative_serving "${KNATIVE_SERVING_RELEASE}" } +# Install Knative Eventing in the current cluster. +# Parameters: $1 - Knative Eventing manifest. +function start_knative_eventing() { + header "Starting Knative Eventing" + subheader "Installing Knative Eventing" + echo "Installing Eventing CRDs from $1" + kubectl apply --selector knative.dev/crd-install=true -f "$1" + echo "Installing the rest of eventing components from $1" + kubectl apply -f "$1" + wait_until_pods_running knative-eventing || return 1 +} + +# Install the stable release Knative/eventing in the current cluster. +# Parameters: $1 - Knative Eventing version number, e.g. 0.6.0. +function start_release_knative_eventing() { + start_knative_eventing "https://storage.googleapis.com/knative-releases/eventing/previous/v$1/eventing.yaml" +} + +# Install the latest stable Knative Eventing in the current cluster. +function start_latest_knative_eventing() { + start_knative_eventing "${KNATIVE_EVENTING_RELEASE}" +} + # Run a go tool, installing it first if necessary. # Parameters: $1 - tool package/dir for go get/install. # $2 - tool to run. @@ -543,14 +597,19 @@ function get_canonical_path() { echo "$(cd ${path%/*} && echo $PWD/${path##*/})" } -# Returns whether the current branch is a release branch. -function is_release_branch() { +# Returns the current branch. +function current_branch() { local branch_name="" # Get the branch name from Prow's env var, see https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md. # Otherwise, try getting the current branch from git. (( IS_PROW )) && branch_name="${PULL_BASE_REF:-}" [[ -z "${branch_name}" ]] && branch_name="$(git rev-parse --abbrev-ref HEAD)" - [[ ${branch_name} =~ ^release-[0-9\.]+$ ]] + echo "${branch_name}" +} + +# Returns whether the current branch is a release branch. +function is_release_branch() { + [[ $(current_branch) =~ ^release-[0-9\.]+$ ]] } # Returns the URL to the latest manifest for the given Knative project. @@ -561,19 +620,23 @@ function get_latest_knative_yaml_source() { local yaml_name="$2" # If it's a release branch, the yaml source URL should point to a specific version. if is_release_branch; then - # Get the latest tag name for the current branch, which is likely formatted as v0.5.0 - local tag_name="$(git describe --tags --abbrev=0)" - # The given repo might not have this tag, so we need to find its latest release manifest with the same major&minor version. - local major_minor="$(echo ${tag_name} | cut -d. -f1-2)" - local yaml_source_path="$(gsutil ls gs://knative-releases/${repo_name}/previous/${major_minor}.*/${yaml_name}.yaml \ + # Extract the release major&minor version from the branch name. + local branch_name="$(current_branch)" + local major_minor="${branch_name##release-}" + # Find the latest release manifest with the same major&minor version. + local yaml_source_path="$( + gsutil ls gs://knative-releases/${repo_name}/previous/v${major_minor}.*/${yaml_name}.yaml 2> /dev/null \ | sort \ | tail -n 1 \ | cut -b6-)" - echo "https://storage.googleapis.com/${yaml_source_path}" - # If it's not a release branch, the yaml source URL should be nightly build. - else - echo "https://storage.googleapis.com/knative-nightly/${repo_name}/latest/${yaml_name}.yaml" + # The version does exist, return it. + if [[ -n "${yaml_source_path}" ]]; then + echo "https://storage.googleapis.com/${yaml_source_path}" + return + fi + # Otherwise, fall back to nightly. fi + echo "https://storage.googleapis.com/knative-nightly/${repo_name}/latest/${yaml_name}.yaml" } # Initializations that depend on previous functions. @@ -584,5 +647,5 @@ readonly REPO_NAME_FORMATTED="Knative $(capitalize ${REPO_NAME//-/ })" # Public latest nightly or release yaml files. readonly KNATIVE_SERVING_RELEASE="$(get_latest_knative_yaml_source "serving" "serving")" -readonly KNATIVE_BUILD_RELEASE="$(get_latest_knative_yaml_source "build" "build")" -readonly KNATIVE_EVENTING_RELEASE="$(get_latest_knative_yaml_source "eventing" "release")" +readonly KNATIVE_EVENTING_RELEASE="$(get_latest_knative_yaml_source "eventing" "eventing")" +readonly KNATIVE_MONITORING_RELEASE="$(get_latest_knative_yaml_source "serving" "monitoring")" diff --git a/test/vendor/github.com/knative/test-infra/scripts/markdown-link-check-config.rc b/test/vendor/knative.dev/test-infra/scripts/markdown-link-check-config.rc similarity index 100% rename from test/vendor/github.com/knative/test-infra/scripts/markdown-link-check-config.rc rename to test/vendor/knative.dev/test-infra/scripts/markdown-link-check-config.rc diff --git a/test/vendor/github.com/knative/test-infra/scripts/markdown-lint-config.rc b/test/vendor/knative.dev/test-infra/scripts/markdown-lint-config.rc similarity index 100% rename from test/vendor/github.com/knative/test-infra/scripts/markdown-lint-config.rc rename to test/vendor/knative.dev/test-infra/scripts/markdown-lint-config.rc diff --git a/test/vendor/knative.dev/test-infra/scripts/performance-tests.sh b/test/vendor/knative.dev/test-infra/scripts/performance-tests.sh new file mode 100755 index 0000000000..9c9c259000 --- /dev/null +++ b/test/vendor/knative.dev/test-infra/scripts/performance-tests.sh @@ -0,0 +1,156 @@ +#!/bin/bash + +# Copyright 2019 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a helper script for Knative performance test scripts. +# See README.md for instructions on how to use it. + +source $(dirname ${BASH_SOURCE})/library.sh + +# Configurable parameters. +# If not provided, they will fall back to the default values. +readonly BENCHMARK_ROOT_PATH=${BENCHMARK_ROOT_PATH:-test/performance/benchmarks} +readonly PROJECT_NAME=${PROJECT_NAME:-knative-performance} +readonly SERVICE_ACCOUNT_NAME=${SERVICE_ACCOUNT_NAME:-mako-job@knative-performance.iam.gserviceaccount.com} + +# Setup env vars. +export KO_DOCKER_REPO="gcr.io/${PROJECT_NAME}" +# Constants +readonly GOOGLE_APPLICATION_CREDENTIALS="/etc/performance-test/service-account.json" +readonly GITHUB_TOKEN="/etc/performance-test/github-token" +readonly SLACK_READ_TOKEN="/etc/performance-test/slack-read-token" +readonly SLACK_WRITE_TOKEN="/etc/performance-test/slack-write-token" + +# Set up the user for cluster operations. +function setup_user() { + echo ">> Setting up user" + echo "Using gcloud user ${SERVICE_ACCOUNT_NAME}" + gcloud config set core/account ${SERVICE_ACCOUNT_NAME} + echo "Using gcloud project ${PROJECT_NAME}" + gcloud config set core/project ${PROJECT_NAME} +} + +# Update resources installed on the cluster. +# Parameters: $1 - cluster name +# $2 - cluster region/zone +function update_cluster() { + # --zone option can work with both region and zone, (e.g. us-central1 and + # us-central1-a), so we don't need to add extra check here. + gcloud container clusters get-credentials $1 --zone=$2 --project=${PROJECT_NAME} || abort "failed to get cluster creds" + # Set up the configmap to run benchmarks in production + echo ">> Setting up 'prod' config-mako on cluster $1 in zone $2" + cat <> Creating secrets on cluster $1 in zone $2" + kubectl create secret generic mako-secrets \ + --from-file=robot.json=${GOOGLE_APPLICATION_CREDENTIALS} \ + --from-file=github-token=${GITHUB_TOKEN} \ + --from-file=slack-read-token=${SLACK_READ_TOKEN} \ + --from-file=slack-write-token=${SLACK_WRITE_TOKEN} + # Delete all benchmark jobs to avoid noise in the update process + echo ">> Deleting all cronjobs and jobs on cluster $1 in zone $2" + kubectl delete cronjob --all + kubectl delete job --all + + if function_exists update_knative; then + update_knative || abort "failed to update knative" + fi + # get benchmark name from the cluster name + local benchmark_name=$(get_benchmark_name $1) + if function_exists update_benchmark; then + update_benchmark ${benchmark_name} || abort "failed to update benchmark" + fi +} + +# Get benchmark name from the cluster name. +# Parameters: $1 - cluster name +function get_benchmark_name() { + # get benchmark_name by removing the prefix from cluster name, e.g. get "load-test" from "serving--load-test" + echo ${1#$REPO_NAME"--"} +} + +# Update the clusters related to the current repo. +function update_clusters() { + header "Updating all clusters for ${REPO_NAME}" + local all_clusters=$(gcloud container clusters list --project="${PROJECT_NAME}" --format="csv[no-heading](name,zone)") + echo ">> Project contains clusters:" ${all_clusters} + for cluster in ${all_clusters}; do + local name=$(echo "${cluster}" | cut -f1 -d",") + # the cluster name is prefixed with "${REPO_NAME}--", here we should only handle clusters belonged to the current repo + [[ ! ${name} =~ ^${REPO_NAME}-- ]] && continue + local zone=$(echo "${cluster}" | cut -f2 -d",") + + # Update all resources installed on the cluster + update_cluster ${name} ${zone} + done + header "Done updating all clusters" +} + +# Run the perf-tests tool +# Parameters: $1..$n - parameters passed to the tool +function run_perf_cluster_tool() { + go run ${REPO_ROOT_DIR}/vendor/knative.dev/pkg/testutils/clustermanager/perf-tests $@ +} + +# Delete the old clusters belonged to the current repo, and recreate them with the same configuration. +function recreate_clusters() { + header "Recreating clusters for ${REPO_NAME}" + run_perf_cluster_tool --recreate \ + --gcp-project=${PROJECT_NAME} --repository=${REPO_NAME} --benchmark-root=${BENCHMARK_ROOT_PATH} \ + || abort "failed recreating clusters for ${REPO_NAME}" + header "Done recreating clusters" + # Update all clusters after they are recreated + update_clusters +} + +# Try to reconcile clusters for benchmarks in the current repo. +# This function will be run as postsubmit jobs. +function reconcile_benchmark_clusters() { + header "Reconciling clusters for ${REPO_NAME}" + run_perf_cluster_tool --reconcile \ + --gcp-project=${PROJECT_NAME} --repository=${REPO_NAME} --benchmark-root=${BENCHMARK_ROOT_PATH} \ + || abort "failed reconciling clusters for ${REPO_NAME}" + header "Done reconciling clusters" + # For now, do nothing after reconciling the clusters, and the next update_clusters job will automatically + # update them. So there will be a period that the newly created clusters are being idle, and the duration + # can be as long as . +} + +# Parse flags and excute the command. +function main() { + if (( ! IS_PROW )); then + abort "this script should only be run by Prow since it needs secrets created on Prow cluster" + fi + + # Set up the user credential for cluster operations + setup_user || abort "failed to set up user" + + # Try parsing the first flag as a command. + case $1 in + --recreate-clusters) recreate_clusters ;; + --update-clusters) update_clusters ;; + --reconcile-benchmark-clusters) reconcile_benchmark_clusters ;; + *) abort "unknown command $1, must be --recreate-clusters, --update-clusters or --reconcile_benchmark_clusters" + esac + shift +} diff --git a/test/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh b/test/vendor/knative.dev/test-infra/scripts/presubmit-tests.sh similarity index 97% rename from test/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh rename to test/vendor/knative.dev/test-infra/scripts/presubmit-tests.sh index 18d785ac9a..85897acc15 100755 --- a/test/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh +++ b/test/vendor/knative.dev/test-infra/scripts/presubmit-tests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2018 The Knative Authors # @@ -49,7 +49,7 @@ function pr_only_contains() { # List changed files in the current PR. # This is implemented as a function so it can be mocked in unit tests. function list_changed_files() { - /workspace/githubhelper -list-changed-files + /workspace/githubhelper -list-changed-files -github-token /etc/repoview-token/token } # Initialize flags and context for presubmit tests: @@ -156,8 +156,10 @@ function default_build_test_runner() { markdown_build_tests || failed=1 # For documentation PRs, just check the md files (( IS_DOCUMENTATION_PR )) && return ${failed} + # Don't merge these two lines, or return code will always be 0. + local go_pkg_dirs + go_pkg_dirs="$(go list ./...)" || return 1 # Skip build test if there is no go code - local go_pkg_dirs="$(go list ./...)" [[ -z "${go_pkg_dirs}" ]] && return ${failed} # Ensure all the code builds subheader "Checking that go code builds" @@ -267,7 +269,6 @@ function run_integration_tests() { function default_integration_test_runner() { local options="" local failed=0 - (( EMIT_METRICS )) && options="--emit-metrics" for e2e_test in $(find test/ -name e2e-*tests.sh); do echo "Running integration test ${e2e_test}" if ! ${e2e_test} ${options}; then @@ -281,7 +282,6 @@ function default_integration_test_runner() { RUN_BUILD_TESTS=0 RUN_UNIT_TESTS=0 RUN_INTEGRATION_TESTS=0 -EMIT_METRICS=0 # Process flags and run tests accordingly. function main() { @@ -304,7 +304,7 @@ function main() { go version echo ">> git version" git version - echo ">> ko built from commit" + echo ">> ko version" [[ -f /ko_version ]] && cat /ko_version || echo "unknown" echo ">> bazel version" [[ -f /bazel_version ]] && cat /bazel_version || echo "unknown" @@ -333,7 +333,6 @@ function main() { --build-tests) RUN_BUILD_TESTS=1 ;; --unit-tests) RUN_UNIT_TESTS=1 ;; --integration-tests) RUN_INTEGRATION_TESTS=1 ;; - --emit-metrics) EMIT_METRICS=1 ;; --all-tests) RUN_BUILD_TESTS=1 RUN_UNIT_TESTS=1 @@ -342,7 +341,7 @@ function main() { --run-test) shift [[ $# -ge 1 ]] || abort "missing executable after --run-test" - TEST_TO_RUN=$1 + TEST_TO_RUN="$1" ;; *) abort "error: unknown option ${parameter}" ;; esac @@ -352,7 +351,6 @@ function main() { readonly RUN_BUILD_TESTS readonly RUN_UNIT_TESTS readonly RUN_INTEGRATION_TESTS - readonly EMIT_METRICS readonly TEST_TO_RUN cd ${REPO_ROOT_DIR} diff --git a/test/vendor/github.com/knative/test-infra/scripts/release.sh b/test/vendor/knative.dev/test-infra/scripts/release.sh similarity index 92% rename from test/vendor/github.com/knative/test-infra/scripts/release.sh rename to test/vendor/knative.dev/test-infra/scripts/release.sh index 39d680c040..915fc97f62 100755 --- a/test/vendor/github.com/knative/test-infra/scripts/release.sh +++ b/test/vendor/knative.dev/test-infra/scripts/release.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2018 The Knative Authors # @@ -93,9 +93,9 @@ RELEASE_VERSION="" RELEASE_NOTES="" RELEASE_BRANCH="" RELEASE_GCS_BUCKET="knative-nightly/${REPO_NAME}" +RELEASE_DIR="" KO_FLAGS="-P" VALIDATION_TESTS="./test/presubmit-tests.sh" -YAMLS_TO_PUBLISH="" ARTIFACTS_TO_PUBLISH="" FROM_NIGHTLY_RELEASE="" FROM_NIGHTLY_RELEASE_GCS="" @@ -165,8 +165,8 @@ function prepare_auto_release() { PUBLISH_RELEASE=1 git fetch --all || abort "error fetching branches/tags from remote" - local tags="$(git tag | cut -d 'v' -f2 | cut -d '.' -f1-2 | sort | uniq)" - local branches="$( { (git branch -r | grep upstream/release-) ; (git branch | grep release-); } | cut -d '-' -f2 | sort | uniq)" + local tags="$(git tag | cut -d 'v' -f2 | cut -d '.' -f1-2 | sort -V | uniq)" + local branches="$( { (git branch -r | grep upstream/release-) ; (git branch | grep release-); } | cut -d '-' -f2 | sort -V | uniq)" echo "Versions released (from tags): [" ${tags} "]" echo "Versions released (from branches): [" ${branches} "]" @@ -212,7 +212,7 @@ function prepare_dot_release() { echo "Dot release will be generated for ${version_filter}" releases="$(echo "${releases}" | grep ^${version_filter})" fi - local last_version="$(echo "${releases}" | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$' | sort -r | head -1)" + local last_version="$(echo "${releases}" | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$' | sort -r -V | head -1)" [[ -n "${last_version}" ]] || abort "no previous release exist" local major_minor_version="" if [[ -z "${RELEASE_BRANCH}" ]]; then @@ -334,6 +334,7 @@ function find_latest_nightly() { function parse_flags() { local has_gcr_flag=0 local has_gcs_flag=0 + local has_dir_flag=0 local is_dot_release=0 local is_auto_release=0 @@ -365,8 +366,14 @@ function parse_flags() { ;; --release-gcs) RELEASE_GCS_BUCKET=$1 + RELEASE_DIR="" has_gcs_flag=1 ;; + --release-dir) + RELEASE_DIR=$1 + RELEASE_GCS_BUCKET="" + has_dir_flag=1 + ;; --version) [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'" RELEASE_VERSION=$1 @@ -389,6 +396,9 @@ function parse_flags() { shift done + (( has_gcs_flag )) && (( has_dir_flag )) && abort "cannot have both --release-gcs and --release-dir set simultaneously" + [[ -n "${RELEASE_GCS_BUCKET}" && -n "${RELEASE_DIR}" ]] && abort "cannot have both GCS and release directory set" + # Do auto release unless release is forced if (( is_auto_release )); then (( is_dot_release )) && abort "cannot have both --dot-release and --auto-release set simultaneously" @@ -422,8 +432,13 @@ function parse_flags() { (( has_gcr_flag )) && echo "Not publishing the release, GCR flag is ignored" (( has_gcs_flag )) && echo "Not publishing the release, GCS flag is ignored" KO_DOCKER_REPO="ko.local" - KO_FLAGS="-L ${KO_FLAGS}" RELEASE_GCS_BUCKET="" + [[ -z "${RELEASE_DIR}" ]] && RELEASE_DIR="${REPO_ROOT_DIR}" + fi + + [[ -z "${RELEASE_GCS_BUCKET}" && -z "${RELEASE_DIR}" ]] && abort "--release-gcs or --release-dir must be used" + if [[ -n "${RELEASE_DIR}" ]]; then + mkdir -p "${RELEASE_DIR}" || abort "cannot create release dir '${RELEASE_DIR}'" fi # Get the commit, excluding any tags but keeping the "dirty" flag @@ -450,6 +465,7 @@ function parse_flags() { readonly RELEASE_NOTES readonly RELEASE_BRANCH readonly RELEASE_GCS_BUCKET + readonly RELEASE_DIR readonly KO_DOCKER_REPO readonly VALIDATION_TESTS readonly FROM_NIGHTLY_RELEASE @@ -458,22 +474,24 @@ function parse_flags() { # Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so. # Parameters: $1 - executable that runs the tests. function run_validation_tests() { - if (( ! SKIP_TESTS )); then - banner "Running release validation tests" - # Run tests. - if ! $1; then - banner "Release validation tests failed, aborting" - exit 1 - fi + (( SKIP_TESTS )) && return + banner "Running release validation tests" + # Run tests. + if ! $1; then + banner "Release validation tests failed, aborting" + abort "release validation tests failed" fi } -# Publishes the generated artifacts to GCS, GitHub, etc. +# Publishes the generated artifacts to directory, GCS, GitHub, etc. # Parameters: $1..$n - files to add to the release. function publish_artifacts() { (( ! PUBLISH_RELEASE )) && return tag_images_in_yamls ${ARTIFACTS_TO_PUBLISH} - publish_to_gcs ${ARTIFACTS_TO_PUBLISH} + if [[ -n "${RELEASE_DIR}" ]]; then + cp ${ARTIFACTS_TO_PUBLISH} ${RELEASE_DIR} || abort "cannot copy release to '${RELEASE_DIR}'" + fi + [[ -n "${RELEASE_GCS_BUCKET}" ]] && publish_to_gcs ${ARTIFACTS_TO_PUBLISH} publish_to_github ${ARTIFACTS_TO_PUBLISH} banner "New release published successfully" } @@ -482,7 +500,7 @@ function publish_artifacts() { function main() { function_exists build_release || abort "function 'build_release()' not defined" [[ -x ${VALIDATION_TESTS} ]] || abort "test script '${VALIDATION_TESTS}' doesn't exist" - parse_flags $@ + parse_flags "$@" # Log what will be done and where. banner "Release configuration" if which gcloud &>/dev/null ; then @@ -498,7 +516,9 @@ function main() { echo "- Artifacts WILL NOT be tagged" fi if (( PUBLISH_RELEASE )); then - echo "- Release WILL BE published to '${RELEASE_GCS_BUCKET}'" + local dst="${RELEASE_DIR}" + [[ -z "${dst}" ]] && dst="${RELEASE_GCS_BUCKET}" + echo "- Release WILL BE published to '${dst}'" else echo "- Release will not be published" fi @@ -527,8 +547,6 @@ function main() { build_from_source set +e +o pipefail fi - # TODO(adrcunha): Remove once all repos use ARTIFACTS_TO_PUBLISH. - [[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && ARTIFACTS_TO_PUBLISH="${YAMLS_TO_PUBLISH}" [[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && abort "no artifacts were generated" # Ensure no empty file will be published. for artifact in ${ARTIFACTS_TO_PUBLISH}; do diff --git a/test/vendor/github.com/knative/test-infra/tools/dep-collector/README.md b/test/vendor/knative.dev/test-infra/tools/dep-collector/README.md similarity index 100% rename from test/vendor/github.com/knative/test-infra/tools/dep-collector/README.md rename to test/vendor/knative.dev/test-infra/tools/dep-collector/README.md diff --git a/test/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go b/test/vendor/knative.dev/test-infra/tools/dep-collector/imports.go similarity index 100% rename from test/vendor/github.com/knative/test-infra/tools/dep-collector/imports.go rename to test/vendor/knative.dev/test-infra/tools/dep-collector/imports.go diff --git a/test/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go b/test/vendor/knative.dev/test-infra/tools/dep-collector/licenses.go similarity index 100% rename from test/vendor/github.com/knative/test-infra/tools/dep-collector/licenses.go rename to test/vendor/knative.dev/test-infra/tools/dep-collector/licenses.go diff --git a/test/vendor/github.com/knative/test-infra/tools/dep-collector/main.go b/test/vendor/knative.dev/test-infra/tools/dep-collector/main.go similarity index 100% rename from test/vendor/github.com/knative/test-infra/tools/dep-collector/main.go rename to test/vendor/knative.dev/test-infra/tools/dep-collector/main.go